aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'portage_with_autodep/pym/_emerge')
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractDepPriority.py29
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py266
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractPollTask.py62
-rw-r--r--portage_with_autodep/pym/_emerge/AsynchronousLock.py288
-rw-r--r--portage_with_autodep/pym/_emerge/AsynchronousTask.py129
-rw-r--r--portage_with_autodep/pym/_emerge/AtomArg.py11
-rw-r--r--portage_with_autodep/pym/_emerge/Binpkg.py333
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py66
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py31
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgFetcher.py181
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py43
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgVerifier.py75
-rw-r--r--portage_with_autodep/pym/_emerge/Blocker.py15
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerCache.py182
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerDB.py124
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerDepPriority.py13
-rw-r--r--portage_with_autodep/pym/_emerge/CompositeTask.py157
-rw-r--r--portage_with_autodep/pym/_emerge/DepPriority.py49
-rw-r--r--portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py47
-rw-r--r--portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py85
-rw-r--r--portage_with_autodep/pym/_emerge/Dependency.py20
-rw-r--r--portage_with_autodep/pym/_emerge/DependencyArg.py33
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBinpkg.py46
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBuild.py426
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBuildDir.py109
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildExecuter.py99
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildFetcher.py302
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildFetchonly.py32
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py108
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildMerge.py56
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py133
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildPhase.py350
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildProcess.py21
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py16
-rw-r--r--portage_with_autodep/pym/_emerge/EventsAnalyser.py511
-rw-r--r--portage_with_autodep/pym/_emerge/EventsLogger.py180
-rw-r--r--portage_with_autodep/pym/_emerge/FakeVartree.py265
-rw-r--r--portage_with_autodep/pym/_emerge/FifoIpcDaemon.py81
-rw-r--r--portage_with_autodep/pym/_emerge/JobStatusDisplay.py292
-rw-r--r--portage_with_autodep/pym/_emerge/MergeListItem.py135
-rw-r--r--portage_with_autodep/pym/_emerge/MetadataRegen.py184
-rw-r--r--portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py33
-rw-r--r--portage_with_autodep/pym/_emerge/Package.py700
-rw-r--r--portage_with_autodep/pym/_emerge/PackageArg.py19
-rw-r--r--portage_with_autodep/pym/_emerge/PackageMerge.py40
-rw-r--r--portage_with_autodep/pym/_emerge/PackageUninstall.py110
-rw-r--r--portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py145
-rw-r--r--portage_with_autodep/pym/_emerge/PipeReader.py96
-rw-r--r--portage_with_autodep/pym/_emerge/PollConstants.py18
-rw-r--r--portage_with_autodep/pym/_emerge/PollScheduler.py398
-rw-r--r--portage_with_autodep/pym/_emerge/PollSelectAdapter.py73
-rw-r--r--portage_with_autodep/pym/_emerge/ProgressHandler.py22
-rw-r--r--portage_with_autodep/pym/_emerge/QueueScheduler.py116
-rw-r--r--portage_with_autodep/pym/_emerge/RootConfig.py34
-rw-r--r--portage_with_autodep/pym/_emerge/Scheduler.py1975
-rw-r--r--portage_with_autodep/pym/_emerge/SequentialTaskQueue.py89
-rw-r--r--portage_with_autodep/pym/_emerge/SetArg.py11
-rw-r--r--portage_with_autodep/pym/_emerge/SlotObject.py42
-rw-r--r--portage_with_autodep/pym/_emerge/SpawnProcess.py235
-rw-r--r--portage_with_autodep/pym/_emerge/SubProcess.py141
-rw-r--r--portage_with_autodep/pym/_emerge/Task.py42
-rw-r--r--portage_with_autodep/pym/_emerge/TaskScheduler.py25
-rw-r--r--portage_with_autodep/pym/_emerge/TaskSequence.py44
-rw-r--r--portage_with_autodep/pym/_emerge/UninstallFailure.py15
-rw-r--r--portage_with_autodep/pym/_emerge/UnmergeDepPriority.py41
-rw-r--r--portage_with_autodep/pym/_emerge/UseFlagDisplay.py122
-rw-r--r--portage_with_autodep/pym/_emerge/__init__.py2
-rw-r--r--portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py38
-rw-r--r--portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py15
-rw-r--r--portage_with_autodep/pym/_emerge/actions.py3123
-rw-r--r--portage_with_autodep/pym/_emerge/clear_caches.py19
-rw-r--r--portage_with_autodep/pym/_emerge/countdown.py22
-rw-r--r--portage_with_autodep/pym/_emerge/create_depgraph_params.py72
-rw-r--r--portage_with_autodep/pym/_emerge/create_world_atom.py92
-rw-r--r--portage_with_autodep/pym/_emerge/depgraph.py7029
-rw-r--r--portage_with_autodep/pym/_emerge/emergelog.py63
-rw-r--r--portage_with_autodep/pym/_emerge/getloadavg.py27
-rw-r--r--portage_with_autodep/pym/_emerge/help.py815
-rw-r--r--portage_with_autodep/pym/_emerge/is_valid_package_atom.py21
-rw-r--r--portage_with_autodep/pym/_emerge/main.py1910
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/__init__.py2
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/backtracking.py197
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/circular_dependency.py267
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/output.py888
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/output_helpers.py576
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/slot_collision.py978
-rw-r--r--portage_with_autodep/pym/_emerge/search.py385
-rw-r--r--portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.py35
-rw-r--r--portage_with_autodep/pym/_emerge/stdout_spinner.py83
-rw-r--r--portage_with_autodep/pym/_emerge/sync/__init__.py2
-rw-r--r--portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.py29
-rw-r--r--portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.py98
-rw-r--r--portage_with_autodep/pym/_emerge/unmerge.py578
-rw-r--r--portage_with_autodep/pym/_emerge/userquery.py55
94 files changed, 27592 insertions, 0 deletions
diff --git a/portage_with_autodep/pym/_emerge/AbstractDepPriority.py b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
new file mode 100644
index 0000000..94a9379
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
@@ -0,0 +1,29 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+from _emerge.SlotObject import SlotObject
+
+class AbstractDepPriority(SlotObject):
+ __slots__ = ("buildtime", "runtime", "runtime_post")
+
+ def __lt__(self, other):
+ return self.__int__() < other
+
+ def __le__(self, other):
+ return self.__int__() <= other
+
+ def __eq__(self, other):
+ return self.__int__() == other
+
+ def __ne__(self, other):
+ return self.__int__() != other
+
+ def __gt__(self, other):
+ return self.__int__() > other
+
+ def __ge__(self, other):
+ return self.__int__() >= other
+
+ def copy(self):
+ return copy.copy(self)
diff --git a/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
new file mode 100644
index 0000000..4147ecb
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
@@ -0,0 +1,266 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import stat
+import textwrap
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
+import portage
+from portage.elog import messages as elog_messages
+from portage.localization import _
+from portage.package.ebuild._ipc.ExitCommand import ExitCommand
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage import os
+from portage.util._pty import _create_pty_or_pipe
+from portage.util import apply_secpass_permissions
+
+class AbstractEbuildProcess(SpawnProcess):
+
+ __slots__ = ('phase', 'settings',) + \
+ ('_build_dir', '_ipc_daemon', '_exit_command',)
+ _phases_without_builddir = ('clean', 'cleanrm', 'depend', 'help',)
+
+ # Number of milliseconds to allow natural exit of the ebuild
+ # process after it has called the exit command via IPC. It
+ # doesn't hurt to be generous here since the scheduler
+ # continues to process events during this period, and it can
+ # return long before the timeout expires.
+ _exit_timeout = 10000 # 10 seconds
+
+ # The EbuildIpcDaemon support is well tested, but this variable
+ # is left so we can temporarily disable it if any issues arise.
+ _enable_ipc_daemon = True
+
+ def __init__(self, **kwargs):
+ SpawnProcess.__init__(self, **kwargs)
+ if self.phase is None:
+ phase = self.settings.get("EBUILD_PHASE")
+ if not phase:
+ phase = 'other'
+ self.phase = phase
+
+ def _start(self):
+
+ need_builddir = self.phase not in self._phases_without_builddir
+
+ # This can happen if the pre-clean phase triggers
+ # die_hooks for some reason, and PORTAGE_BUILDDIR
+ # doesn't exist yet.
+ if need_builddir and \
+ not os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
+ msg = _("The ebuild phase '%s' has been aborted "
+ "since PORTAGE_BUILDIR does not exist: '%s'") % \
+ (self.phase, self.settings['PORTAGE_BUILDDIR'])
+ self._eerror(textwrap.wrap(msg, 72))
+ self._set_returncode((self.pid, 1 << 8))
+ self.wait()
+ return
+
+ if self.background:
+ # Automatically prevent color codes from showing up in logs,
+ # since we're not displaying to a terminal anyway.
+ self.settings['NOCOLOR'] = 'true'
+
+ if self._enable_ipc_daemon:
+ self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+ if self.phase not in self._phases_without_builddir:
+ if 'PORTAGE_BUILDIR_LOCKED' not in self.settings:
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=self.settings)
+ self._build_dir.lock()
+ self.settings['PORTAGE_IPC_DAEMON'] = "1"
+ self._start_ipc_daemon()
+ else:
+ self.settings.pop('PORTAGE_IPC_DAEMON', None)
+ else:
+ # Since the IPC daemon is disabled, use a simple tempfile based
+ # approach to detect unexpected exit like in bug #190128.
+ self.settings.pop('PORTAGE_IPC_DAEMON', None)
+ if self.phase not in self._phases_without_builddir:
+ exit_file = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'],
+ '.exit_status')
+ self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file
+ try:
+ os.unlink(exit_file)
+ except OSError:
+ if os.path.exists(exit_file):
+ # make sure it doesn't exist
+ raise
+ else:
+ self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+
+ SpawnProcess._start(self)
+
+ def _init_ipc_fifos(self):
+
+ input_fifo = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'], '.ipc_in')
+ output_fifo = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'], '.ipc_out')
+
+ for p in (input_fifo, output_fifo):
+
+ st = None
+ try:
+ st = os.lstat(p)
+ except OSError:
+ os.mkfifo(p)
+ else:
+ if not stat.S_ISFIFO(st.st_mode):
+ st = None
+ try:
+ os.unlink(p)
+ except OSError:
+ pass
+ os.mkfifo(p)
+
+ apply_secpass_permissions(p,
+ uid=os.getuid(),
+ gid=portage.data.portage_gid,
+ mode=0o770, stat_cached=st)
+
+ return (input_fifo, output_fifo)
+
+ def _start_ipc_daemon(self):
+ self._exit_command = ExitCommand()
+ self._exit_command.reply_hook = self._exit_command_callback
+ query_command = QueryCommand(self.settings, self.phase)
+ commands = {
+ 'best_version' : query_command,
+ 'exit' : self._exit_command,
+ 'has_version' : query_command,
+ }
+ input_fifo, output_fifo = self._init_ipc_fifos()
+ self._ipc_daemon = EbuildIpcDaemon(commands=commands,
+ input_fifo=input_fifo,
+ output_fifo=output_fifo,
+ scheduler=self.scheduler)
+ self._ipc_daemon.start()
+
+ def _exit_command_callback(self):
+ if self._registered:
+ # Let the process exit naturally, if possible.
+ self.scheduler.schedule(self._reg_id, timeout=self._exit_timeout)
+ if self._registered:
+ # If it doesn't exit naturally in a reasonable amount
+ # of time, kill it (solves bug #278895). We try to avoid
+ # this when possible since it makes sandbox complain about
+ # being killed by a signal.
+ self.cancel()
+
+ def _orphan_process_warn(self):
+ phase = self.phase
+
+ msg = _("The ebuild phase '%s' with pid %s appears "
+ "to have left an orphan process running in the "
+ "background.") % (phase, self.pid)
+
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _pipe(self, fd_pipes):
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _can_log(self, slave_fd):
+ # With sesandbox, logging works through a pty but not through a
+ # normal pipe. So, disable logging if ptys are broken.
+ # See Bug #162404.
+ # TODO: Add support for logging via named pipe (fifo) with
+ # sesandbox, since EbuildIpcDaemon uses a fifo and it's known
+ # to be compatible with sesandbox.
+ return not ('sesandbox' in self.settings.features \
+ and self.settings.selinux_enabled()) or os.isatty(slave_fd)
+
+ def _killed_by_signal(self, signum):
+ msg = _("The ebuild phase '%s' has been "
+ "killed by signal %s.") % (self.phase, signum)
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _unexpected_exit(self):
+
+ phase = self.phase
+
+ msg = _("The ebuild phase '%s' has exited "
+ "unexpectedly. This type of behavior "
+ "is known to be triggered "
+ "by things such as failed variable "
+ "assignments (bug #190128) or bad substitution "
+ "errors (bug #200313). Normally, before exiting, bash should "
+ "have displayed an error message above. If bash did not "
+ "produce an error message above, it's possible "
+ "that the ebuild has called `exit` when it "
+ "should have called `die` instead. This behavior may also "
+ "be triggered by a corrupt bash binary or a hardware "
+ "problem such as memory or cpu malfunction. If the problem is not "
+ "reproducible or it appears to occur randomly, then it is likely "
+ "to be triggered by a hardware problem. "
+ "If you suspect a hardware problem then you should "
+ "try some basic hardware diagnostics such as memtest. "
+ "Please do not report this as a bug unless it is consistently "
+ "reproducible and you are sure that your bash binary and hardware "
+ "are functioning properly.") % phase
+
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _eerror(self, lines):
+ self._elog('eerror', lines)
+
+ def _elog(self, elog_funcname, lines):
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ if msg:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ self.scheduler.output(msg, log_path=log_path)
+
+ def _log_poll_exception(self, event):
+ self._elog("eerror",
+ ["%s received strange poll event: %s\n" % \
+ (self.__class__.__name__, event,)])
+
+ def _set_returncode(self, wait_retval):
+ SpawnProcess._set_returncode(self, wait_retval)
+
+ if self._ipc_daemon is not None:
+ self._ipc_daemon.cancel()
+ if self._exit_command.exitcode is not None:
+ self.returncode = self._exit_command.exitcode
+ else:
+ if self.returncode < 0:
+ if not self.cancelled:
+ self._killed_by_signal(-self.returncode)
+ else:
+ self.returncode = 1
+ if not self.cancelled:
+ self._unexpected_exit()
+ if self._build_dir is not None:
+ self._build_dir.unlock()
+ self._build_dir = None
+ elif not self.cancelled:
+ exit_file = self.settings.get('PORTAGE_EBUILD_EXIT_FILE')
+ if exit_file and not os.path.exists(exit_file):
+ if self.returncode < 0:
+ if not self.cancelled:
+ self._killed_by_signal(-self.returncode)
+ else:
+ self.returncode = 1
+ if not self.cancelled:
+ self._unexpected_exit()
diff --git a/portage_with_autodep/pym/_emerge/AbstractPollTask.py b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
new file mode 100644
index 0000000..f7f3a95
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
@@ -0,0 +1,62 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import logging
+
+from portage.util import writemsg_level
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollConstants import PollConstants
+class AbstractPollTask(AsynchronousTask):
+
+ __slots__ = ("scheduler",) + \
+ ("_registered",)
+
+ _bufsize = 4096
+ _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
+ _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
+ _exceptional_events
+
+ def isAlive(self):
+ return bool(self._registered)
+
+ def _read_buf(self, f, event):
+ """
+ | POLLIN | RETURN
+ | BIT | VALUE
+ | ---------------------------------------------------
+ | 1 | Read self._bufsize into an instance of
+ | | array.array('B') and return it, ignoring
+ | | EOFError and IOError. An empty array
+ | | indicates EOF.
+ | ---------------------------------------------------
+ | 0 | None
+ """
+ buf = None
+ if event & PollConstants.POLLIN:
+ buf = array.array('B')
+ try:
+ buf.fromfile(f, self._bufsize)
+ except (EOFError, IOError):
+ pass
+ return buf
+
+ def _unregister(self):
+ raise NotImplementedError(self)
+
+ def _log_poll_exception(self, event):
+ writemsg_level(
+ "!!! %s received strange poll event: %s\n" % \
+ (self.__class__.__name__, event,),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _unregister_if_appropriate(self, event):
+ if self._registered:
+ if event & self._exceptional_events:
+ self._log_poll_exception(event)
+ self._unregister()
+ self.cancel()
+ elif event & PollConstants.POLLHUP:
+ self._unregister()
+ self.wait()
+
diff --git a/portage_with_autodep/pym/_emerge/AsynchronousLock.py b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
new file mode 100644
index 0000000..637ba73
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
@@ -0,0 +1,288 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import dummy_threading
+import fcntl
+import logging
+import sys
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage
+from portage import os
+from portage.exception import TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.util import writemsg_level
+from _emerge.AbstractPollTask import AbstractPollTask
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollConstants import PollConstants
+from _emerge.SpawnProcess import SpawnProcess
+
+class AsynchronousLock(AsynchronousTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using either a thread (if available) or a subprocess.
+
+ The default behavior is to use a process instead of a thread, since
+ there is currently no way to interrupt a thread that is waiting for
+ a lock (notably, SIGINT doesn't work because python delivers all
+ signals to the main thread).
+ """
+
+ __slots__ = ('path', 'scheduler',) + \
+ ('_imp', '_force_async', '_force_dummy', '_force_process', \
+ '_force_thread', '_waiting')
+
+ _use_process_by_default = True
+
+ def _start(self):
+
+ if not self._force_async:
+ try:
+ self._imp = lockfile(self.path,
+ wantnewlockfile=True, flags=os.O_NONBLOCK)
+ except TryAgain:
+ pass
+ else:
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ if self._force_process or \
+ (not self._force_thread and \
+ (self._use_process_by_default or threading is dummy_threading)):
+ self._imp = _LockProcess(path=self.path, scheduler=self.scheduler)
+ else:
+ self._imp = _LockThread(path=self.path,
+ scheduler=self.scheduler,
+ _force_dummy=self._force_dummy)
+
+ self._imp.addExitListener(self._imp_exit)
+ self._imp.start()
+
+ def _imp_exit(self, imp):
+ # call exit listeners
+ if not self._waiting:
+ self.wait()
+
+ def _cancel(self):
+ if isinstance(self._imp, AsynchronousTask):
+ self._imp.cancel()
+
+ def _poll(self):
+ if isinstance(self._imp, AsynchronousTask):
+ self._imp.poll()
+ return self.returncode
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._waiting = True
+ self.returncode = self._imp.wait()
+ self._waiting = False
+ return self.returncode
+
+ def unlock(self):
+ if self._imp is None:
+ raise AssertionError('not locked')
+ if isinstance(self._imp, (_LockProcess, _LockThread)):
+ self._imp.unlock()
+ else:
+ unlockfile(self._imp)
+ self._imp = None
+
+class _LockThread(AbstractPollTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using a background thread. After the lock is acquired, the thread
+ writes to a pipe in order to notify a poll loop running in the main
+ thread.
+
+ If the threading module is unavailable then the dummy_threading
+ module will be used, and the lock will be acquired synchronously
+ (before the start() method returns).
+ """
+
+ __slots__ = ('path',) + \
+ ('_files', '_force_dummy', '_lock_obj',
+ '_thread', '_reg_id',)
+
+ def _start(self):
+ pr, pw = os.pipe()
+ self._files = {}
+ self._files['pipe_read'] = os.fdopen(pr, 'rb', 0)
+ self._files['pipe_write'] = os.fdopen(pw, 'wb', 0)
+ for k, f in self._files.items():
+ fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
+ fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+ self._reg_id = self.scheduler.register(self._files['pipe_read'].fileno(),
+ PollConstants.POLLIN, self._output_handler)
+ self._registered = True
+ threading_mod = threading
+ if self._force_dummy:
+ threading_mod = dummy_threading
+ self._thread = threading_mod.Thread(target=self._run_lock)
+ self._thread.start()
+
+ def _run_lock(self):
+ self._lock_obj = lockfile(self.path, wantnewlockfile=True)
+ self._files['pipe_write'].write(b'\0')
+
+ def _output_handler(self, f, event):
+ buf = self._read_buf(self._files['pipe_read'], event)
+ if buf:
+ self._unregister()
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _cancel(self):
+ # There's currently no way to force thread termination.
+ pass
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ if self._registered:
+ self.scheduler.schedule(self._reg_id)
+ return self.returncode
+
+ def unlock(self):
+ if self._lock_obj is None:
+ raise AssertionError('not locked')
+ if self.returncode is None:
+ raise AssertionError('lock not acquired yet')
+ unlockfile(self._lock_obj)
+ self._lock_obj = None
+
+ def _unregister(self):
+ self._registered = False
+
+ if self._thread is not None:
+ self._thread.join()
+ self._thread = None
+
+ if self._reg_id is not None:
+ self.scheduler.unregister(self._reg_id)
+ self._reg_id = None
+
+ if self._files is not None:
+ for f in self._files.values():
+ f.close()
+ self._files = None
+
+class _LockProcess(AbstractPollTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using a subprocess. After the lock is acquired, the process
+ writes to a pipe in order to notify a poll loop running in the main
+ process. The unlock() method notifies the subprocess to release the
+ lock and exit.
+ """
+
+ __slots__ = ('path',) + \
+ ('_acquired', '_kill_test', '_proc', '_files', '_reg_id', '_unlocked')
+
+ def _start(self):
+ in_pr, in_pw = os.pipe()
+ out_pr, out_pw = os.pipe()
+ self._files = {}
+ self._files['pipe_in'] = os.fdopen(in_pr, 'rb', 0)
+ self._files['pipe_out'] = os.fdopen(out_pw, 'wb', 0)
+ fcntl.fcntl(in_pr, fcntl.F_SETFL,
+ fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ self._reg_id = self.scheduler.register(in_pr,
+ PollConstants.POLLIN, self._output_handler)
+ self._registered = True
+ self._proc = SpawnProcess(
+ args=[portage._python_interpreter,
+ os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
+ env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
+ fd_pipes={0:out_pr, 1:in_pw, 2:sys.stderr.fileno()},
+ scheduler=self.scheduler)
+ self._proc.addExitListener(self._proc_exit)
+ self._proc.start()
+ os.close(out_pr)
+ os.close(in_pw)
+
+ def _proc_exit(self, proc):
+ if proc.returncode != os.EX_OK:
+ # Typically, this will happen due to the
+ # process being killed by a signal.
+ if not self._acquired:
+ # If the lock hasn't been aquired yet, the
+ # caller can check the returncode and handle
+ # this failure appropriately.
+ if not (self.cancelled or self._kill_test):
+ writemsg_level("_LockProcess: %s\n" % \
+ _("failed to acquire lock on '%s'") % (self.path,),
+ level=logging.ERROR, noiselevel=-1)
+ self._unregister()
+ self.returncode = proc.returncode
+ self.wait()
+ return
+
+ if not self.cancelled and \
+ not self._unlocked:
+ # We don't want lost locks going unnoticed, so it's
+ # only safe to ignore if either the cancel() or
+ # unlock() methods have been previously called.
+ raise AssertionError("lock process failed with returncode %s" \
+ % (proc.returncode,))
+
+ def _cancel(self):
+ if self._proc is not None:
+ self._proc.cancel()
+
+ def _poll(self):
+ if self._proc is not None:
+ self._proc.poll()
+ return self.returncode
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ if self._registered:
+ self.scheduler.schedule(self._reg_id)
+ return self.returncode
+
+ def _output_handler(self, f, event):
+ buf = self._read_buf(self._files['pipe_in'], event)
+ if buf:
+ self._acquired = True
+ self._unregister()
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _unregister(self):
+ self._registered = False
+
+ if self._reg_id is not None:
+ self.scheduler.unregister(self._reg_id)
+ self._reg_id = None
+
+ if self._files is not None:
+ try:
+ pipe_in = self._files.pop('pipe_in')
+ except KeyError:
+ pass
+ else:
+ pipe_in.close()
+
+ def unlock(self):
+ if self._proc is None:
+ raise AssertionError('not locked')
+ if self.returncode is None:
+ raise AssertionError('lock not acquired yet')
+ if self.returncode != os.EX_OK:
+ raise AssertionError("lock process failed with returncode %s" \
+ % (self.returncode,))
+ self._unlocked = True
+ self._files['pipe_out'].write(b'\0')
+ self._files['pipe_out'].close()
+ self._files = None
+ self._proc.wait()
+ self._proc = None
diff --git a/portage_with_autodep/pym/_emerge/AsynchronousTask.py b/portage_with_autodep/pym/_emerge/AsynchronousTask.py
new file mode 100644
index 0000000..36522ca
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AsynchronousTask.py
@@ -0,0 +1,129 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.SlotObject import SlotObject
+class AsynchronousTask(SlotObject):
+ """
+ Subclasses override _wait() and _poll() so that calls
+ to public methods can be wrapped for implementing
+ hooks such as exit listener notification.
+
+ Sublasses should call self.wait() to notify exit listeners after
+ the task is complete and self.returncode has been set.
+ """
+
+ __slots__ = ("background", "cancelled", "returncode") + \
+ ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
+
+ def start(self):
+ """
+ Start an asynchronous task and then return as soon as possible.
+ """
+ self._start_hook()
+ self._start()
+
+ def _start(self):
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def isAlive(self):
+ return self.returncode is None
+
+ def poll(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._poll()
+ self._wait_hook()
+ return self.returncode
+
+ def _poll(self):
+ return self.returncode
+
+ def wait(self):
+ if self.returncode is None:
+ self._wait()
+ self._wait_hook()
+ return self.returncode
+
+ def _wait(self):
+ return self.returncode
+
+ def cancel(self):
+ if not self.cancelled:
+ self.cancelled = True
+ self._cancel()
+ self.wait()
+
+ def _cancel(self):
+ """
+ Subclasses should implement this, as a template method
+ to be called by AsynchronousTask.cancel().
+ """
+ pass
+
+ def addStartListener(self, f):
+ """
+ The function will be called with one argument, a reference to self.
+ """
+ if self._start_listeners is None:
+ self._start_listeners = []
+ self._start_listeners.append(f)
+
+ def removeStartListener(self, f):
+ if self._start_listeners is None:
+ return
+ self._start_listeners.remove(f)
+
+ def _start_hook(self):
+ if self._start_listeners is not None:
+ start_listeners = self._start_listeners
+ self._start_listeners = None
+
+ for f in start_listeners:
+ f(self)
+
+ def addExitListener(self, f):
+ """
+ The function will be called with one argument, a reference to self.
+ """
+ if self._exit_listeners is None:
+ self._exit_listeners = []
+ self._exit_listeners.append(f)
+
+ def removeExitListener(self, f):
+ if self._exit_listeners is None:
+ if self._exit_listener_stack is not None:
+ self._exit_listener_stack.remove(f)
+ return
+ self._exit_listeners.remove(f)
+
+ def _wait_hook(self):
+ """
+ Call this method after the task completes, just before returning
+ the returncode from wait() or poll(). This hook is
+ used to trigger exit listeners when the returncode first
+ becomes available.
+ """
+ if self.returncode is not None and \
+ self._exit_listeners is not None:
+
+ # This prevents recursion, in case one of the
+ # exit handlers triggers this method again by
+ # calling wait(). Use a stack that gives
+ # removeExitListener() an opportunity to consume
+ # listeners from the stack, before they can get
+ # called below. This is necessary because a call
+ # to one exit listener may result in a call to
+ # removeExitListener() for another listener on
+ # the stack. That listener needs to be removed
+ # from the stack since it would be inconsistent
+ # to call it after it has been been passed into
+ # removeExitListener().
+ self._exit_listener_stack = self._exit_listeners
+ self._exit_listeners = None
+
+ self._exit_listener_stack.reverse()
+ while self._exit_listener_stack:
+ self._exit_listener_stack.pop()(self)
+
diff --git a/portage_with_autodep/pym/_emerge/AtomArg.py b/portage_with_autodep/pym/_emerge/AtomArg.py
new file mode 100644
index 0000000..a929b43
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AtomArg.py
@@ -0,0 +1,11 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage._sets.base import InternalPackageSet
+from _emerge.DependencyArg import DependencyArg
+
+class AtomArg(DependencyArg):
+ def __init__(self, atom=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.atom = atom
+ self.pset = InternalPackageSet(initial_atoms=(self.atom,), allow_repo=True)
diff --git a/portage_with_autodep/pym/_emerge/Binpkg.py b/portage_with_autodep/pym/_emerge/Binpkg.py
new file mode 100644
index 0000000..bc6511e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Binpkg.py
@@ -0,0 +1,333 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.BinpkgExtractorAsync import BinpkgExtractorAsync
+from _emerge.CompositeTask import CompositeTask
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.EbuildMerge import EbuildMerge
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from portage.eapi import eapi_exports_replace_vars
+from portage.util import writemsg
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import io
+import logging
+from portage.output import colorize
+
+class Binpkg(CompositeTask):
+
+ __slots__ = ("find_blockers",
+ "ldpath_mtimes", "logger", "opts",
+ "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
+ ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
+ "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
+
+ def _writemsg_level(self, msg, level=0, noiselevel=0):
+ self.scheduler.output(msg, level=level, noiselevel=noiselevel,
+ log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+ def _start(self):
+
+ pkg = self.pkg
+ settings = self.settings
+ settings.setcpv(pkg)
+ self._tree = "bintree"
+ self._bintree = self.pkg.root_config.trees[self._tree]
+ self._verify = not self.opts.pretend
+
+ # Use realpath like doebuild_environment() does, since we assert
+ # that this path is literally identical to PORTAGE_BUILDDIR.
+ dir_path = os.path.join(os.path.realpath(settings["PORTAGE_TMPDIR"]),
+ "portage", pkg.category, pkg.pf)
+ self._image_dir = os.path.join(dir_path, "image")
+ self._infloc = os.path.join(dir_path, "build-info")
+ self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
+ settings["EBUILD"] = self._ebuild_path
+ portage.doebuild_environment(self._ebuild_path, 'setup',
+ settings=self.settings, db=self._bintree.dbapi)
+ if dir_path != self.settings['PORTAGE_BUILDDIR']:
+ raise AssertionError("'%s' != '%s'" % \
+ (dir_path, self.settings['PORTAGE_BUILDDIR']))
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=settings)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ if eapi_exports_replace_vars(settings["EAPI"]):
+ vardb = self.pkg.root_config.trees["vartree"].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(x) \
+ for x in vardb.match(self.pkg.slot_atom) + \
+ vardb.match('='+self.pkg.cpv)))
+
+ # The prefetcher has already completed or it
+ # could be running now. If it's running now,
+ # wait for it to complete since it holds
+ # a lock on the file being fetched. The
+ # portage.locks functions are only designed
+ # to work between separate processes. Since
+ # the lock is held by the current process,
+ # use the scheduler and fetcher methods to
+ # synchronize with the fetcher.
+ prefetcher = self.prefetcher
+ if prefetcher is None:
+ pass
+ elif prefetcher.isAlive() and \
+ prefetcher.poll() is None:
+
+ waiting_msg = ("Fetching '%s' " + \
+ "in the background. " + \
+ "To view fetch progress, run `tail -f " + \
+ "/var/log/emerge-fetch.log` in another " + \
+ "terminal.") % prefetcher.pkg_path
+ msg_prefix = colorize("GOOD", " * ")
+ from textwrap import wrap
+ waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
+ for line in wrap(waiting_msg, 65))
+ if not self.background:
+ writemsg(waiting_msg, noiselevel=-1)
+
+ self._current_task = prefetcher
+ prefetcher.addExitListener(self._prefetch_exit)
+ return
+
+ self._prefetch_exit(prefetcher)
+
+ def _prefetch_exit(self, prefetcher):
+
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ if not (self.opts.pretend or self.opts.fetchonly):
+ self._build_dir.lock()
+ # Initialize PORTAGE_LOG_FILE (clean_log won't work without it).
+ portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+ # If necessary, discard old log so that we don't
+ # append to it.
+ self._build_dir.clean_log()
+ fetcher = BinpkgFetcher(background=self.background,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
+ pretend=self.opts.pretend, scheduler=self.scheduler)
+ pkg_path = fetcher.pkg_path
+ self._pkg_path = pkg_path
+ # This gives bashrc users an opportunity to do various things
+ # such as remove binary packages after they're installed.
+ self.settings["PORTAGE_BINPKG_FILE"] = pkg_path
+
+ if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
+
+ msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
+ short_msg = "emerge: (%s of %s) %s Fetch" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ self.logger.log(msg, short_msg=short_msg)
+
+ # Allow the Scheduler's fetch queue to control the
+ # number of concurrent fetchers.
+ fetcher.addExitListener(self._fetcher_exit)
+ self._task_queued(fetcher)
+ self.scheduler.fetch.schedule(fetcher)
+ return
+
+ self._fetcher_exit(fetcher)
+
+ def _fetcher_exit(self, fetcher):
+
+ # The fetcher only has a returncode when
+ # --getbinpkg is enabled.
+ if fetcher.returncode is not None:
+ self._fetched_pkg = True
+ if self._default_exit(fetcher) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ if self.opts.pretend:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ verifier = None
+ if self._verify:
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ verifier = BinpkgVerifier(background=self.background,
+ logfile=logfile, pkg=self.pkg, scheduler=self.scheduler)
+ self._start_task(verifier, self._verifier_exit)
+ return
+
+ self._verifier_exit(verifier)
+
+ def _verifier_exit(self, verifier):
+ if verifier is not None and \
+ self._default_exit(verifier) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ logger = self.logger
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ pkg_path = self._pkg_path
+
+ if self._fetched_pkg:
+ self._bintree.inject(pkg.cpv, filename=pkg_path)
+
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ if logfile is not None and os.path.isfile(logfile):
+ # Remove fetch log after successful fetch.
+ try:
+ os.unlink(logfile)
+ except OSError:
+ pass
+
+ if self.opts.fetchonly:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ msg = " === (%s of %s) Merging Binary (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
+ short_msg = "emerge: (%s of %s) %s Merge Binary" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ phase = "clean"
+ settings = self.settings
+ ebuild_phase = EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler,
+ settings=settings)
+
+ self._start_task(ebuild_phase, self._clean_exit)
+
+ def _clean_exit(self, clean_phase):
+ if self._default_exit(clean_phase) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ dir_path = self.settings['PORTAGE_BUILDDIR']
+
+ infloc = self._infloc
+ pkg = self.pkg
+ pkg_path = self._pkg_path
+
+ dir_mode = 0o755
+ for mydir in (dir_path, self._image_dir, infloc):
+ portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
+ gid=portage.data.portage_gid, mode=dir_mode)
+
+ # This initializes PORTAGE_LOG_FILE.
+ portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+ self._writemsg_level(">>> Extracting info\n")
+
+ pkg_xpak = portage.xpak.tbz2(self._pkg_path)
+ check_missing_metadata = ("CATEGORY", "PF")
+ missing_metadata = set()
+ for k in check_missing_metadata:
+ v = pkg_xpak.getfile(_unicode_encode(k,
+ encoding=_encodings['repo.content']))
+ if not v:
+ missing_metadata.add(k)
+
+ pkg_xpak.unpackinfo(infloc)
+ for k in missing_metadata:
+ if k == "CATEGORY":
+ v = pkg.category
+ elif k == "PF":
+ v = pkg.pf
+ else:
+ continue
+
+ f = io.open(_unicode_encode(os.path.join(infloc, k),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['content'],
+ errors='backslashreplace')
+ try:
+ f.write(_unicode_decode(v + "\n"))
+ finally:
+ f.close()
+
+ # Store the md5sum in the vdb.
+ f = io.open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['content'], errors='strict')
+ try:
+ f.write(_unicode_decode(
+ str(portage.checksum.perform_md5(pkg_path)) + "\n"))
+ finally:
+ f.close()
+
+ env_extractor = BinpkgEnvExtractor(background=self.background,
+ scheduler=self.scheduler, settings=self.settings)
+
+ self._start_task(env_extractor, self._env_extractor_exit)
+
+ def _env_extractor_exit(self, env_extractor):
+ if self._default_exit(env_extractor) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ setup_phase = EbuildPhase(background=self.background,
+ phase="setup", scheduler=self.scheduler,
+ settings=self.settings)
+
+ setup_phase.addExitListener(self._setup_exit)
+ self._task_queued(setup_phase)
+ self.scheduler.scheduleSetup(setup_phase)
+
+ def _setup_exit(self, setup_phase):
+ if self._default_exit(setup_phase) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ extractor = BinpkgExtractorAsync(background=self.background,
+ env=self.settings.environ(),
+ image_dir=self._image_dir,
+ pkg=self.pkg, pkg_path=self._pkg_path,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"),
+ scheduler=self.scheduler)
+ self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
+ self._start_task(extractor, self._extractor_exit)
+
+ def _extractor_exit(self, extractor):
+ if self._final_exit(extractor) != os.EX_OK:
+ self._unlock_builddir()
+ self._writemsg_level("!!! Error Extracting '%s'\n" % \
+ self._pkg_path, noiselevel=-1, level=logging.ERROR)
+ self.wait()
+
+ def _unlock_builddir(self):
+ if self.opts.pretend or self.opts.fetchonly:
+ return
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self._build_dir.unlock()
+
+ def create_install_task(self):
+ task = EbuildMerge(find_blockers=self.find_blockers,
+ ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
+ pkg=self.pkg, pkg_count=self.pkg_count,
+ pkg_path=self._pkg_path, scheduler=self.scheduler,
+ settings=self.settings, tree=self._tree,
+ world_atom=self.world_atom)
+ task.addExitListener(self._install_exit)
+ return task
+
+ def _install_exit(self, task):
+ self.settings.pop("PORTAGE_BINPKG_FILE", None)
+ self._unlock_builddir()
+ if task.returncode == os.EX_OK and \
+ 'binpkg-logs' not in self.settings.features and \
+ self.settings.get("PORTAGE_LOG_FILE"):
+ try:
+ os.unlink(self.settings["PORTAGE_LOG_FILE"])
+ except OSError:
+ pass
diff --git a/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
new file mode 100644
index 0000000..f68971b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
@@ -0,0 +1,66 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+
+from _emerge.CompositeTask import CompositeTask
+from _emerge.SpawnProcess import SpawnProcess
+from portage import os, _shell_quote, _unicode_encode
+from portage.const import BASH_BINARY
+
+class BinpkgEnvExtractor(CompositeTask):
+ """
+ Extract environment.bz2 for a binary or installed package.
+ """
+ __slots__ = ('settings',)
+
+ def saved_env_exists(self):
+ return os.path.exists(self._get_saved_env_path())
+
+ def dest_env_exists(self):
+ return os.path.exists(self._get_dest_env_path())
+
+ def _get_saved_env_path(self):
+ return os.path.join(os.path.dirname(self.settings['EBUILD']),
+ "environment.bz2")
+
+ def _get_dest_env_path(self):
+ return os.path.join(self.settings["T"], "environment")
+
+ def _start(self):
+ saved_env_path = self._get_saved_env_path()
+ dest_env_path = self._get_dest_env_path()
+ shell_cmd = "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- %s > %s" % \
+ (_shell_quote(saved_env_path),
+ _shell_quote(dest_env_path))
+ extractor_proc = SpawnProcess(
+ args=[BASH_BINARY, "-c", shell_cmd],
+ background=self.background,
+ env=self.settings.environ(),
+ scheduler=self.scheduler,
+ logfile=self.settings.get('PORTAGE_LOGFILE'))
+
+ self._start_task(extractor_proc, self._extractor_exit)
+
+ def _remove_dest_env(self):
+ try:
+ os.unlink(self._get_dest_env_path())
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ def _extractor_exit(self, extractor_proc):
+
+ if self._default_exit(extractor_proc) != os.EX_OK:
+ self._remove_dest_env()
+ self.wait()
+ return
+
+ # This is a signal to ebuild.sh, so that it knows to filter
+ # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
+ # would be preserved between normal phases.
+ open(_unicode_encode(self._get_dest_env_path() + '.raw'), 'w')
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
diff --git a/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
new file mode 100644
index 0000000..d1630f2
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
@@ -0,0 +1,31 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SpawnProcess import SpawnProcess
+import portage
+import os
+import signal
+
+class BinpkgExtractorAsync(SpawnProcess):
+
+ __slots__ = ("image_dir", "pkg", "pkg_path")
+
+ _shell_binary = portage.const.BASH_BINARY
+
+ def _start(self):
+ # Add -q to bzip2 opts, in order to avoid "trailing garbage after
+ # EOF ignored" warning messages due to xpak trailer.
+ # SIGPIPE handling (128 + SIGPIPE) should be compatible with
+ # assert_sigpipe_ok() that's used by the ebuild unpack() helper.
+ self.args = [self._shell_binary, "-c",
+ ("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp -C %s -f - ; " + \
+ "p=(${PIPESTATUS[@]}) ; " + \
+ "if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \
+ "echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \
+ "if [ ${p[1]} != 0 ] ; then " + \
+ "echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \
+ "exit 0 ;") % \
+ (portage._shell_quote(self.pkg_path),
+ portage._shell_quote(self.image_dir))]
+
+ SpawnProcess._start(self)
diff --git a/portage_with_autodep/pym/_emerge/BinpkgFetcher.py b/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
new file mode 100644
index 0000000..baea4d6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
@@ -0,0 +1,181 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.SpawnProcess import SpawnProcess
+try:
+ from urllib.parse import urlparse as urllib_parse_urlparse
+except ImportError:
+ from urlparse import urlparse as urllib_parse_urlparse
+import stat
+import sys
+import portage
+from portage import os
+from portage.util._pty import _create_pty_or_pipe
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class BinpkgFetcher(SpawnProcess):
+
+ __slots__ = ("pkg", "pretend",
+ "locked", "pkg_path", "_lock_obj")
+
+ def __init__(self, **kwargs):
+ SpawnProcess.__init__(self, **kwargs)
+ pkg = self.pkg
+ self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
+
+ def _start(self):
+
+ if self.cancelled:
+ return
+
+ pkg = self.pkg
+ pretend = self.pretend
+ bintree = pkg.root_config.trees["bintree"]
+ settings = bintree.settings
+ use_locks = "distlocks" in settings.features
+ pkg_path = self.pkg_path
+
+ if not pretend:
+ portage.util.ensure_dirs(os.path.dirname(pkg_path))
+ if use_locks:
+ self.lock()
+ exists = os.path.exists(pkg_path)
+ resume = exists and os.path.basename(pkg_path) in bintree.invalids
+ if not (pretend or resume):
+ # Remove existing file or broken symlink.
+ try:
+ os.unlink(pkg_path)
+ except OSError:
+ pass
+
+ # urljoin doesn't work correctly with
+ # unrecognized protocols like sftp
+ if bintree._remote_has_index:
+ rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
+ if not rel_uri:
+ rel_uri = pkg.cpv + ".tbz2"
+ remote_base_uri = bintree._remotepkgs[pkg.cpv]["BASE_URI"]
+ uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/")
+ else:
+ uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
+ "/" + pkg.pf + ".tbz2"
+
+ if pretend:
+ portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self.wait()
+ return
+
+ protocol = urllib_parse_urlparse(uri)[0]
+ fcmd_prefix = "FETCHCOMMAND"
+ if resume:
+ fcmd_prefix = "RESUMECOMMAND"
+ fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
+ if not fcmd:
+ fcmd = settings.get(fcmd_prefix)
+
+ fcmd_vars = {
+ "DISTDIR" : os.path.dirname(pkg_path),
+ "URI" : uri,
+ "FILE" : os.path.basename(pkg_path)
+ }
+
+ fetch_env = dict(settings.items())
+ fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
+ for x in portage.util.shlex_split(fcmd)]
+
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ fd_pipes = self.fd_pipes
+
+ # Redirect all output to stdout since some fetchers like
+ # wget pollute stderr (if portage detects a problem then it
+ # can send it's own message to stderr).
+ fd_pipes.setdefault(0, sys.stdin.fileno())
+ fd_pipes.setdefault(1, sys.stdout.fileno())
+ fd_pipes.setdefault(2, sys.stdout.fileno())
+
+ self.args = fetch_args
+ self.env = fetch_env
+ if settings.selinux_enabled():
+ self._selinux_type = settings["PORTAGE_FETCH_T"]
+ SpawnProcess._start(self)
+
+ def _pipe(self, fd_pipes):
+ """When appropriate, use a pty so that fetcher progress bars,
+ like wget has, will work properly."""
+ if self.background or not sys.stdout.isatty():
+ # When the output only goes to a log file,
+ # there's no point in creating a pty.
+ return os.pipe()
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _set_returncode(self, wait_retval):
+ SpawnProcess._set_returncode(self, wait_retval)
+ if not self.pretend and self.returncode == os.EX_OK:
+ # If possible, update the mtime to match the remote package if
+ # the fetcher didn't already do it automatically.
+ bintree = self.pkg.root_config.trees["bintree"]
+ if bintree._remote_has_index:
+ remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
+ if remote_mtime is not None:
+ try:
+ remote_mtime = long(remote_mtime)
+ except ValueError:
+ pass
+ else:
+ try:
+ local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME]
+ except OSError:
+ pass
+ else:
+ if remote_mtime != local_mtime:
+ try:
+ os.utime(self.pkg_path,
+ (remote_mtime, remote_mtime))
+ except OSError:
+ pass
+
+ if self.locked:
+ self.unlock()
+
+ def lock(self):
+ """
+ This raises an AlreadyLocked exception if lock() is called
+ while a lock is already held. In order to avoid this, call
+ unlock() or check whether the "locked" attribute is True
+ or False before calling lock().
+ """
+ if self._lock_obj is not None:
+ raise self.AlreadyLocked((self._lock_obj,))
+
+ async_lock = AsynchronousLock(path=self.pkg_path,
+ scheduler=self.scheduler)
+ async_lock.start()
+
+ if async_lock.wait() != os.EX_OK:
+ # TODO: Use CompositeTask for better handling, like in EbuildPhase.
+ raise AssertionError("AsynchronousLock failed with returncode %s" \
+ % (async_lock.returncode,))
+
+ self._lock_obj = async_lock
+ self.locked = True
+
+ class AlreadyLocked(portage.exception.PortageException):
+ pass
+
+ def unlock(self):
+ if self._lock_obj is None:
+ return
+ self._lock_obj.unlock()
+ self._lock_obj = None
+ self.locked = False
+
diff --git a/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py b/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py
new file mode 100644
index 0000000..ffa4900
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py
@@ -0,0 +1,43 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.CompositeTask import CompositeTask
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from portage import os
+
+class BinpkgPrefetcher(CompositeTask):
+
+ __slots__ = ("pkg",) + \
+ ("pkg_path", "_bintree",)
+
+ def _start(self):
+ self._bintree = self.pkg.root_config.trees["bintree"]
+ fetcher = BinpkgFetcher(background=self.background,
+ logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
+ scheduler=self.scheduler)
+ self.pkg_path = fetcher.pkg_path
+ self._start_task(fetcher, self._fetcher_exit)
+
+ def _fetcher_exit(self, fetcher):
+
+ if self._default_exit(fetcher) != os.EX_OK:
+ self.wait()
+ return
+
+ verifier = BinpkgVerifier(background=self.background,
+ logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
+ scheduler=self.scheduler)
+ self._start_task(verifier, self._verifier_exit)
+
+ def _verifier_exit(self, verifier):
+ if self._default_exit(verifier) != os.EX_OK:
+ self.wait()
+ return
+
+ self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+
diff --git a/portage_with_autodep/pym/_emerge/BinpkgVerifier.py b/portage_with_autodep/pym/_emerge/BinpkgVerifier.py
new file mode 100644
index 0000000..0052967
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgVerifier.py
@@ -0,0 +1,75 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from portage.util import writemsg
+import io
+import sys
+import portage
+from portage import os
+from portage.package.ebuild.fetch import _checksum_failure_temp_file
+
+class BinpkgVerifier(AsynchronousTask):
+ __slots__ = ("logfile", "pkg", "scheduler")
+
+ def _start(self):
+ """
+ Note: Unlike a normal AsynchronousTask.start() method,
+ this one does all work is synchronously. The returncode
+ attribute will be set before it returns.
+ """
+
+ pkg = self.pkg
+ root_config = pkg.root_config
+ bintree = root_config.trees["bintree"]
+ rval = os.EX_OK
+ stdout_orig = sys.stdout
+ stderr_orig = sys.stderr
+ global_havecolor = portage.output.havecolor
+ out = io.StringIO()
+ file_exists = True
+ try:
+ sys.stdout = out
+ sys.stderr = out
+ if portage.output.havecolor:
+ portage.output.havecolor = not self.background
+ try:
+ bintree.digestCheck(pkg)
+ except portage.exception.FileNotFound:
+ writemsg("!!! Fetching Binary failed " + \
+ "for '%s'\n" % pkg.cpv, noiselevel=-1)
+ rval = 1
+ file_exists = False
+ except portage.exception.DigestException as e:
+ writemsg("\n!!! Digest verification failed:\n",
+ noiselevel=-1)
+ writemsg("!!! %s\n" % e.value[0],
+ noiselevel=-1)
+ writemsg("!!! Reason: %s\n" % e.value[1],
+ noiselevel=-1)
+ writemsg("!!! Got: %s\n" % e.value[2],
+ noiselevel=-1)
+ writemsg("!!! Expected: %s\n" % e.value[3],
+ noiselevel=-1)
+ rval = 1
+ if rval == os.EX_OK:
+ pass
+ elif file_exists:
+ pkg_path = bintree.getname(pkg.cpv)
+ head, tail = os.path.split(pkg_path)
+ temp_filename = _checksum_failure_temp_file(head, tail)
+ writemsg("File renamed to '%s'\n" % (temp_filename,),
+ noiselevel=-1)
+ finally:
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+ portage.output.havecolor = global_havecolor
+
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=self.logfile,
+ background=self.background)
+
+ self.returncode = rval
+ self.wait()
+
diff --git a/portage_with_autodep/pym/_emerge/Blocker.py b/portage_with_autodep/pym/_emerge/Blocker.py
new file mode 100644
index 0000000..9304606
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Blocker.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.Task import Task
+
+class Blocker(Task):
+
+ __hash__ = Task.__hash__
+ __slots__ = ("root", "atom", "cp", "eapi", "priority", "satisfied")
+
+ def __init__(self, **kwargs):
+ Task.__init__(self, **kwargs)
+ self.cp = self.atom.cp
+ self._hash_key = ("blocks", self.root, self.atom, self.eapi)
+ self._hash_value = hash(self._hash_key)
diff --git a/portage_with_autodep/pym/_emerge/BlockerCache.py b/portage_with_autodep/pym/_emerge/BlockerCache.py
new file mode 100644
index 0000000..5c4f43e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerCache.py
@@ -0,0 +1,182 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.util import writemsg
+from portage.data import secpass
+import portage
+from portage import os
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class BlockerCache(portage.cache.mappings.MutableMapping):
+ """This caches blockers of installed packages so that dep_check does not
+ have to be done for every single installed package on every invocation of
+ emerge. The cache is invalidated whenever it is detected that something
+ has changed that might alter the results of dep_check() calls:
+ 1) the set of installed packages (including COUNTER) has changed
+ """
+
+ # Number of uncached packages to trigger cache update, since
+ # it's wasteful to update it for every vdb change.
+ _cache_threshold = 5
+
+ class BlockerData(object):
+
+ __slots__ = ("__weakref__", "atoms", "counter")
+
+ def __init__(self, counter, atoms):
+ self.counter = counter
+ self.atoms = atoms
+
+ def __init__(self, myroot, vardb):
+ """ myroot is ignored in favour of EROOT """
+ self._vardb = vardb
+ self._cache_filename = os.path.join(vardb.settings['EROOT'],
+ portage.CACHE_PATH, "vdb_blockers.pickle")
+ self._cache_version = "1"
+ self._cache_data = None
+ self._modified = set()
+ self._load()
+
+ def _load(self):
+ try:
+ f = open(self._cache_filename, mode='rb')
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ self._cache_data = mypickle.load()
+ f.close()
+ del f
+ except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
+ if isinstance(e, pickle.UnpicklingError):
+ writemsg("!!! Error loading '%s': %s\n" % \
+ (self._cache_filename, str(e)), noiselevel=-1)
+ del e
+
+ cache_valid = self._cache_data and \
+ isinstance(self._cache_data, dict) and \
+ self._cache_data.get("version") == self._cache_version and \
+ isinstance(self._cache_data.get("blockers"), dict)
+ if cache_valid:
+ # Validate all the atoms and counters so that
+ # corruption is detected as soon as possible.
+ invalid_items = set()
+ for k, v in self._cache_data["blockers"].items():
+ if not isinstance(k, basestring):
+ invalid_items.add(k)
+ continue
+ try:
+ if portage.catpkgsplit(k) is None:
+ invalid_items.add(k)
+ continue
+ except portage.exception.InvalidData:
+ invalid_items.add(k)
+ continue
+ if not isinstance(v, tuple) or \
+ len(v) != 2:
+ invalid_items.add(k)
+ continue
+ counter, atoms = v
+ if not isinstance(counter, (int, long)):
+ invalid_items.add(k)
+ continue
+ if not isinstance(atoms, (list, tuple)):
+ invalid_items.add(k)
+ continue
+ invalid_atom = False
+ for atom in atoms:
+ if not isinstance(atom, basestring):
+ invalid_atom = True
+ break
+ if atom[:1] != "!" or \
+ not portage.isvalidatom(
+ atom, allow_blockers=True):
+ invalid_atom = True
+ break
+ if invalid_atom:
+ invalid_items.add(k)
+ continue
+
+ for k in invalid_items:
+ del self._cache_data["blockers"][k]
+ if not self._cache_data["blockers"]:
+ cache_valid = False
+
+ if not cache_valid:
+ self._cache_data = {"version":self._cache_version}
+ self._cache_data["blockers"] = {}
+ self._modified.clear()
+
+ def flush(self):
+ """If the current user has permission and the internal blocker cache
+ been updated, save it to disk and mark it unmodified. This is called
+ by emerge after it has proccessed blockers for all installed packages.
+ Currently, the cache is only written if the user has superuser
+ privileges (since that's required to obtain a lock), but all users
+ have read access and benefit from faster blocker lookups (as long as
+ the entire cache is still valid). The cache is stored as a pickled
+ dict object with the following format:
+
+ {
+ version : "1",
+ "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
+ }
+ """
+ if len(self._modified) >= self._cache_threshold and \
+ secpass >= 2:
+ try:
+ f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
+ pickle.dump(self._cache_data, f, protocol=2)
+ f.close()
+ portage.util.apply_secpass_permissions(
+ self._cache_filename, gid=portage.portage_gid, mode=0o644)
+ except (IOError, OSError) as e:
+ pass
+ self._modified.clear()
+
+ def __setitem__(self, cpv, blocker_data):
+ """
+ Update the cache and mark it as modified for a future call to
+ self.flush().
+
+ @param cpv: Package for which to cache blockers.
+ @type cpv: String
+ @param blocker_data: An object with counter and atoms attributes.
+ @type blocker_data: BlockerData
+ """
+ self._cache_data["blockers"][cpv] = \
+ (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
+ self._modified.add(cpv)
+
+ def __iter__(self):
+ if self._cache_data is None:
+ # triggered by python-trace
+ return iter([])
+ return iter(self._cache_data["blockers"])
+
+ def __len__(self):
+ """This needs to be implemented in order to avoid
+ infinite recursion in some cases."""
+ return len(self._cache_data["blockers"])
+
+ def __delitem__(self, cpv):
+ del self._cache_data["blockers"][cpv]
+
+ def __getitem__(self, cpv):
+ """
+ @rtype: BlockerData
+ @returns: An object with counter and atoms attributes.
+ """
+ return self.BlockerData(*self._cache_data["blockers"][cpv])
+
diff --git a/portage_with_autodep/pym/_emerge/BlockerDB.py b/portage_with_autodep/pym/_emerge/BlockerDB.py
new file mode 100644
index 0000000..4819749
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerDB.py
@@ -0,0 +1,124 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+import portage
+from portage import os
+from portage import digraph
+from portage._sets.base import InternalPackageSet
+
+from _emerge.BlockerCache import BlockerCache
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class BlockerDB(object):
+
+ def __init__(self, fake_vartree):
+ root_config = fake_vartree._root_config
+ self._root_config = root_config
+ self._vartree = root_config.trees["vartree"]
+ self._portdb = root_config.trees["porttree"].dbapi
+
+ self._dep_check_trees = None
+ self._fake_vartree = fake_vartree
+ self._dep_check_trees = {
+ self._vartree.root : {
+ "porttree" : fake_vartree,
+ "vartree" : fake_vartree,
+ }}
+
+ def findInstalledBlockers(self, new_pkg):
+ """
+ Search for installed run-time blockers in the root where
+ new_pkg is planned to be installed. This ignores build-time
+ blockers, since new_pkg is assumed to be built already.
+ """
+ blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
+ dep_keys = ["RDEPEND", "PDEPEND"]
+ settings = self._vartree.settings
+ stale_cache = set(blocker_cache)
+ fake_vartree = self._fake_vartree
+ dep_check_trees = self._dep_check_trees
+ vardb = fake_vartree.dbapi
+ installed_pkgs = list(vardb)
+
+ for inst_pkg in installed_pkgs:
+ stale_cache.discard(inst_pkg.cpv)
+ cached_blockers = blocker_cache.get(inst_pkg.cpv)
+ if cached_blockers is not None and \
+ cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
+ cached_blockers = None
+ if cached_blockers is not None:
+ blocker_atoms = cached_blockers.atoms
+ else:
+ # Use aux_get() to trigger FakeVartree global
+ # updates on *DEPEND when appropriate.
+ depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
+ success, atoms = portage.dep_check(depstr,
+ vardb, settings, myuse=inst_pkg.use.enabled,
+ trees=dep_check_trees, myroot=inst_pkg.root)
+ if not success:
+ pkg_location = os.path.join(inst_pkg.root,
+ portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
+ portage.writemsg("!!! %s/*DEPEND: %s\n" % \
+ (pkg_location, atoms), noiselevel=-1)
+ continue
+
+ blocker_atoms = [atom for atom in atoms \
+ if atom.startswith("!")]
+ blocker_atoms.sort()
+ counter = long(inst_pkg.metadata["COUNTER"])
+ blocker_cache[inst_pkg.cpv] = \
+ blocker_cache.BlockerData(counter, blocker_atoms)
+ for cpv in stale_cache:
+ del blocker_cache[cpv]
+ blocker_cache.flush()
+
+ blocker_parents = digraph()
+ blocker_atoms = []
+ for pkg in installed_pkgs:
+ for blocker_atom in blocker_cache[pkg.cpv].atoms:
+ blocker_atom = blocker_atom.lstrip("!")
+ blocker_atoms.append(blocker_atom)
+ blocker_parents.add(blocker_atom, pkg)
+
+ blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+ blocking_pkgs = set()
+ for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
+ blocking_pkgs.update(blocker_parents.parent_nodes(atom))
+
+ # Check for blockers in the other direction.
+ depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
+ success, atoms = portage.dep_check(depstr,
+ vardb, settings, myuse=new_pkg.use.enabled,
+ trees=dep_check_trees, myroot=new_pkg.root)
+ if not success:
+ # We should never get this far with invalid deps.
+ show_invalid_depstring_notice(new_pkg, depstr, atoms)
+ assert False
+
+ blocker_atoms = [atom.lstrip("!") for atom in atoms \
+ if atom[:1] == "!"]
+ if blocker_atoms:
+ blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+ for inst_pkg in installed_pkgs:
+ try:
+ next(blocker_atoms.iterAtomsForPackage(inst_pkg))
+ except (portage.exception.InvalidDependString, StopIteration):
+ continue
+ blocking_pkgs.add(inst_pkg)
+
+ return blocking_pkgs
+
+ def discardBlocker(self, pkg):
+ """Discard a package from the list of potential blockers.
+ This will match any package(s) with identical cpv or cp:slot."""
+ for cpv_match in self._fake_vartree.dbapi.match_pkgs("=%s" % (pkg.cpv,)):
+ if cpv_match.cp == pkg.cp:
+ self._fake_vartree.cpv_discard(cpv_match)
+ for slot_match in self._fake_vartree.dbapi.match_pkgs(pkg.slot_atom):
+ if slot_match.cp == pkg.cp:
+ self._fake_vartree.cpv_discard(slot_match)
diff --git a/portage_with_autodep/pym/_emerge/BlockerDepPriority.py b/portage_with_autodep/pym/_emerge/BlockerDepPriority.py
new file mode 100644
index 0000000..1004a37
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerDepPriority.py
@@ -0,0 +1,13 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class BlockerDepPriority(DepPriority):
+ __slots__ = ()
+ def __int__(self):
+ return 0
+
+ def __str__(self):
+ return 'blocker'
+
+BlockerDepPriority.instance = BlockerDepPriority()
diff --git a/portage_with_autodep/pym/_emerge/CompositeTask.py b/portage_with_autodep/pym/_emerge/CompositeTask.py
new file mode 100644
index 0000000..644a69b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/CompositeTask.py
@@ -0,0 +1,157 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from portage import os
+
+class CompositeTask(AsynchronousTask):
+
+ __slots__ = ("scheduler",) + ("_current_task",)
+
+ _TASK_QUEUED = -1
+
+ def isAlive(self):
+ return self._current_task is not None
+
+ def _cancel(self):
+ if self._current_task is not None:
+ if self._current_task is self._TASK_QUEUED:
+ self.returncode = 1
+ self._current_task = None
+ else:
+ self._current_task.cancel()
+
+ def _poll(self):
+ """
+ This does a loop calling self._current_task.poll()
+ repeatedly as long as the value of self._current_task
+ keeps changing. It calls poll() a maximum of one time
+ for a given self._current_task instance. This is useful
+ since calling poll() on a task can trigger advance to
+ the next task could eventually lead to the returncode
+ being set in cases when polling only a single task would
+ not have the same effect.
+ """
+
+ prev = None
+ while True:
+ task = self._current_task
+ if task is None or \
+ task is self._TASK_QUEUED or \
+ task is prev:
+ # don't poll the same task more than once
+ break
+ task.poll()
+ prev = task
+
+ return self.returncode
+
+ def _wait(self):
+
+ prev = None
+ while True:
+ task = self._current_task
+ if task is None:
+ # don't wait for the same task more than once
+ break
+ if task is self._TASK_QUEUED:
+ if self.cancelled:
+ self.returncode = 1
+ self._current_task = None
+ break
+ else:
+ self.scheduler.schedule(condition=self._task_queued_wait)
+ if self.returncode is not None:
+ break
+ elif self.cancelled:
+ self.returncode = 1
+ self._current_task = None
+ break
+ else:
+ # try this again with new _current_task value
+ continue
+ if task is prev:
+ if self.returncode is not None:
+ # This is expected if we're being
+ # called from the task's exit listener
+ # after it's been cancelled.
+ break
+ # Before the task.wait() method returned, an exit
+ # listener should have set self._current_task to either
+ # a different task or None. Something is wrong.
+ raise AssertionError("self._current_task has not " + \
+ "changed since calling wait", self, task)
+ task.wait()
+ prev = task
+
+ return self.returncode
+
+ def _assert_current(self, task):
+ """
+ Raises an AssertionError if the given task is not the
+ same one as self._current_task. This can be useful
+ for detecting bugs.
+ """
+ if task is not self._current_task:
+ raise AssertionError("Unrecognized task: %s" % (task,))
+
+ def _default_exit(self, task):
+ """
+ Calls _assert_current() on the given task and then sets the
+ composite returncode attribute if task.returncode != os.EX_OK.
+ If the task failed then self._current_task will be set to None.
+ Subclasses can use this as a generic task exit callback.
+
+ @rtype: int
+ @returns: The task.returncode attribute.
+ """
+ self._assert_current(task)
+ if task.returncode != os.EX_OK:
+ self.returncode = task.returncode
+ self._current_task = None
+ return task.returncode
+
+ def _final_exit(self, task):
+ """
+ Assumes that task is the final task of this composite task.
+ Calls _default_exit() and sets self.returncode to the task's
+ returncode and sets self._current_task to None.
+ """
+ self._default_exit(task)
+ self._current_task = None
+ self.returncode = task.returncode
+ return self.returncode
+
+ def _default_final_exit(self, task):
+ """
+ This calls _final_exit() and then wait().
+
+ Subclasses can use this as a generic final task exit callback.
+
+ """
+ self._final_exit(task)
+ return self.wait()
+
+ def _start_task(self, task, exit_handler):
+ """
+ Register exit handler for the given task, set it
+ as self._current_task, and call task.start().
+
+ Subclasses can use this as a generic way to start
+ a task.
+
+ """
+ task.addExitListener(exit_handler)
+ self._current_task = task
+ task.start()
+
+ def _task_queued(self, task):
+ task.addStartListener(self._task_queued_start_handler)
+ self._current_task = self._TASK_QUEUED
+
+ def _task_queued_start_handler(self, task):
+ self._current_task = task
+
+ def _task_queued_wait(self):
+ return self._current_task is not self._TASK_QUEUED or \
+ self.cancelled or self.returncode is not None
diff --git a/portage_with_autodep/pym/_emerge/DepPriority.py b/portage_with_autodep/pym/_emerge/DepPriority.py
new file mode 100644
index 0000000..3c2256a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPriority.py
@@ -0,0 +1,49 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractDepPriority import AbstractDepPriority
+class DepPriority(AbstractDepPriority):
+
+ __slots__ = ("satisfied", "optional", "ignored")
+
+ def __int__(self):
+ """
+ Note: These priorities are only used for measuring hardness
+ in the circular dependency display via digraph.debug_print(),
+ and nothing more. For actual merge order calculations, the
+ measures defined by the DepPriorityNormalRange and
+ DepPrioritySatisfiedRange classes are used.
+
+ Attributes Hardness
+
+ buildtime 0
+ runtime -1
+ runtime_post -2
+ optional -3
+ (none of the above) -4
+
+ """
+
+ if self.optional:
+ return -3
+ if self.buildtime:
+ return 0
+ if self.runtime:
+ return -1
+ if self.runtime_post:
+ return -2
+ return -4
+
+ def __str__(self):
+ if self.ignored:
+ return "ignored"
+ if self.optional:
+ return "optional"
+ if self.buildtime:
+ return "buildtime"
+ if self.runtime:
+ return "runtime"
+ if self.runtime_post:
+ return "runtime_post"
+ return "soft"
+
diff --git a/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py b/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py
new file mode 100644
index 0000000..8639554
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py
@@ -0,0 +1,47 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class DepPriorityNormalRange(object):
+ """
+ DepPriority properties Index Category
+
+ buildtime HARD
+ runtime 3 MEDIUM
+ runtime_post 2 MEDIUM_SOFT
+ optional 1 SOFT
+ (none of the above) 0 NONE
+ """
+ MEDIUM = 3
+ MEDIUM_SOFT = 2
+ SOFT = 1
+ NONE = 0
+
+ @classmethod
+ def _ignore_optional(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional)
+
+ @classmethod
+ def _ignore_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or priority.runtime_post)
+
+ @classmethod
+ def _ignore_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or not priority.buildtime)
+
+ ignore_medium = _ignore_runtime
+ ignore_medium_soft = _ignore_runtime_post
+ ignore_soft = _ignore_optional
+
+DepPriorityNormalRange.ignore_priority = (
+ None,
+ DepPriorityNormalRange._ignore_optional,
+ DepPriorityNormalRange._ignore_runtime_post,
+ DepPriorityNormalRange._ignore_runtime
+)
diff --git a/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py b/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py
new file mode 100644
index 0000000..edb29df
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py
@@ -0,0 +1,85 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class DepPrioritySatisfiedRange(object):
+ """
+ DepPriority Index Category
+
+ not satisfied and buildtime HARD
+ not satisfied and runtime 6 MEDIUM
+ not satisfied and runtime_post 5 MEDIUM_SOFT
+ satisfied and buildtime 4 SOFT
+ satisfied and runtime 3 SOFT
+ satisfied and runtime_post 2 SOFT
+ optional 1 SOFT
+ (none of the above) 0 NONE
+ """
+ MEDIUM = 6
+ MEDIUM_SOFT = 5
+ SOFT = 4
+ NONE = 0
+
+ @classmethod
+ def _ignore_optional(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional)
+
+ @classmethod
+ def _ignore_satisfied_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ if priority.optional:
+ return True
+ if not priority.satisfied:
+ return False
+ return bool(priority.runtime_post)
+
+ @classmethod
+ def _ignore_satisfied_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ if priority.optional:
+ return True
+ if not priority.satisfied:
+ return False
+ return not priority.buildtime
+
+ @classmethod
+ def _ignore_satisfied_buildtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or \
+ priority.satisfied)
+
+ @classmethod
+ def _ignore_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or \
+ priority.satisfied or \
+ priority.runtime_post)
+
+ @classmethod
+ def _ignore_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.satisfied or \
+ priority.optional or \
+ not priority.buildtime)
+
+ ignore_medium = _ignore_runtime
+ ignore_medium_soft = _ignore_runtime_post
+ ignore_soft = _ignore_satisfied_buildtime
+
+
+DepPrioritySatisfiedRange.ignore_priority = (
+ None,
+ DepPrioritySatisfiedRange._ignore_optional,
+ DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
+ DepPrioritySatisfiedRange._ignore_satisfied_runtime,
+ DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
+ DepPrioritySatisfiedRange._ignore_runtime_post,
+ DepPrioritySatisfiedRange._ignore_runtime
+)
diff --git a/portage_with_autodep/pym/_emerge/Dependency.py b/portage_with_autodep/pym/_emerge/Dependency.py
new file mode 100644
index 0000000..0f746b6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Dependency.py
@@ -0,0 +1,20 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+from _emerge.SlotObject import SlotObject
+class Dependency(SlotObject):
+ __slots__ = ("atom", "blocker", "child", "depth",
+ "parent", "onlydeps", "priority", "root",
+ "collapsed_parent", "collapsed_priority")
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ if self.priority is None:
+ self.priority = DepPriority()
+ if self.depth is None:
+ self.depth = 0
+ if self.collapsed_parent is None:
+ self.collapsed_parent = self.parent
+ if self.collapsed_priority is None:
+ self.collapsed_priority = self.priority
+
diff --git a/portage_with_autodep/pym/_emerge/DependencyArg.py b/portage_with_autodep/pym/_emerge/DependencyArg.py
new file mode 100644
index 0000000..861d837
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DependencyArg.py
@@ -0,0 +1,33 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+from portage import _encodings, _unicode_encode, _unicode_decode
+
+class DependencyArg(object):
+ def __init__(self, arg=None, root_config=None):
+ self.arg = arg
+ self.root_config = root_config
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self.arg == other.arg and \
+ self.root_config.root == other.root_config.root
+
+ def __hash__(self):
+ return hash((self.arg, self.root_config.root))
+
+ def __str__(self):
+ # Force unicode format string for python-2.x safety,
+ # ensuring that self.arg.__unicode__() is used
+ # when necessary.
+ return _unicode_decode("%s") % (self.arg,)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(), encoding=_encodings['content'])
diff --git a/portage_with_autodep/pym/_emerge/EbuildBinpkg.py b/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
new file mode 100644
index 0000000..b7d43ba
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
@@ -0,0 +1,46 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildPhase import EbuildPhase
+from portage import os
+
+class EbuildBinpkg(CompositeTask):
+ """
+ This assumes that src_install() has successfully completed.
+ """
+ __slots__ = ('pkg', 'settings') + \
+ ('_binpkg_tmpfile',)
+
+ def _start(self):
+ pkg = self.pkg
+ root_config = pkg.root_config
+ bintree = root_config.trees["bintree"]
+ bintree.prevent_collision(pkg.cpv)
+ binpkg_tmpfile = os.path.join(bintree.pkgdir,
+ pkg.cpv + ".tbz2." + str(os.getpid()))
+ bintree._ensure_dir(os.path.dirname(binpkg_tmpfile))
+
+ self._binpkg_tmpfile = binpkg_tmpfile
+ self.settings["PORTAGE_BINPKG_TMPFILE"] = self._binpkg_tmpfile
+
+ package_phase = EbuildPhase(background=self.background,
+ phase='package', scheduler=self.scheduler,
+ settings=self.settings)
+
+ self._start_task(package_phase, self._package_phase_exit)
+
+ def _package_phase_exit(self, package_phase):
+
+ self.settings.pop("PORTAGE_BINPKG_TMPFILE", None)
+ if self._default_exit(package_phase) != os.EX_OK:
+ self.wait()
+ return
+
+ pkg = self.pkg
+ bintree = pkg.root_config.trees["bintree"]
+ bintree.inject(pkg.cpv, filename=self._binpkg_tmpfile)
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
diff --git a/portage_with_autodep/pym/_emerge/EbuildBuild.py b/portage_with_autodep/pym/_emerge/EbuildBuild.py
new file mode 100644
index 0000000..1c423a3
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBuild.py
@@ -0,0 +1,426 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.EbuildExecuter import EbuildExecuter
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.EbuildBinpkg import EbuildBinpkg
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildMerge import EbuildMerge
+from _emerge.EbuildFetchonly import EbuildFetchonly
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EventsAnalyser import EventsAnalyser, FilterProcGenerator
+from _emerge.EventsLogger import EventsLogger
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from portage.util import writemsg
+import portage
+from portage import os
+from portage.output import colorize
+from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
+
+class EbuildBuild(CompositeTask):
+
+ __slots__ = ("args_set", "config_pool", "find_blockers",
+ "ldpath_mtimes", "logger", "logserver", "opts", "pkg", "pkg_count",
+ "prefetcher", "settings", "world_atom") + \
+ ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
+
+ def _start(self):
+
+ pkg = self.pkg
+ settings = self.settings
+
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ self.returncode = rval
+ self._current_task = None
+ self.wait()
+ return
+
+ root_config = pkg.root_config
+ tree = "porttree"
+ self._tree = tree
+ portdb = root_config.trees[tree].dbapi
+ settings.setcpv(pkg)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ if self.opts.buildpkgonly:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+ else:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
+ self._ebuild_path = ebuild_path
+ portage.doebuild_environment(ebuild_path, 'setup',
+ settings=self.settings, db=portdb)
+
+ # Check the manifest here since with --keep-going mode it's
+ # currently possible to get this far with a broken manifest.
+ if not self._check_manifest():
+ self.returncode = 1
+ self._current_task = None
+ self.wait()
+ return
+
+ prefetcher = self.prefetcher
+ if prefetcher is None:
+ pass
+ elif prefetcher.isAlive() and \
+ prefetcher.poll() is None:
+
+ waiting_msg = "Fetching files " + \
+ "in the background. " + \
+ "To view fetch progress, run `tail -f " + \
+ "/var/log/emerge-fetch.log` in another " + \
+ "terminal."
+ msg_prefix = colorize("GOOD", " * ")
+ from textwrap import wrap
+ waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
+ for line in wrap(waiting_msg, 65))
+ if not self.background:
+ writemsg(waiting_msg, noiselevel=-1)
+
+ self._current_task = prefetcher
+ prefetcher.addExitListener(self._prefetch_exit)
+ return
+
+ self._prefetch_exit(prefetcher)
+
+ def _check_manifest(self):
+ success = True
+
+ settings = self.settings
+ if 'strict' in settings.features:
+ settings['O'] = os.path.dirname(self._ebuild_path)
+ quiet_setting = settings.get('PORTAGE_QUIET')
+ settings['PORTAGE_QUIET'] = '1'
+ try:
+ success = digestcheck([], settings, strict=True)
+ finally:
+ if quiet_setting:
+ settings['PORTAGE_QUIET'] = quiet_setting
+ else:
+ del settings['PORTAGE_QUIET']
+
+ return success
+
+ def _prefetch_exit(self, prefetcher):
+
+ opts = self.opts
+ pkg = self.pkg
+ settings = self.settings
+
+ if opts.fetchonly:
+ if opts.pretend:
+ fetcher = EbuildFetchonly(
+ fetch_all=opts.fetch_all_uri,
+ pkg=pkg, pretend=opts.pretend,
+ settings=settings)
+ retval = fetcher.execute()
+ self.returncode = retval
+ self.wait()
+ return
+ else:
+ fetcher = EbuildFetcher(
+ config_pool=self.config_pool,
+ ebuild_path=self._ebuild_path,
+ fetchall=self.opts.fetch_all_uri,
+ fetchonly=self.opts.fetchonly,
+ background=False,
+ logfile=None,
+ pkg=self.pkg,
+ scheduler=self.scheduler)
+ self._start_task(fetcher, self._fetchonly_exit)
+ return
+
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=settings)
+ self._build_dir.lock()
+
+ # Cleaning needs to happen before fetch, since the build dir
+ # is used for log handling.
+ msg = " === (%s of %s) Cleaning (%s::%s)" % \
+ (self.pkg_count.curval, self.pkg_count.maxval,
+ self.pkg.cpv, self._ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Clean" % \
+ (self.pkg_count.curval, self.pkg_count.maxval, self.pkg.cpv)
+ self.logger.log(msg, short_msg=short_msg)
+
+ pre_clean_phase = EbuildPhase(background=self.background,
+ phase='clean', scheduler=self.scheduler, settings=self.settings)
+ self._start_task(pre_clean_phase, self._pre_clean_exit)
+
+ def _fetchonly_exit(self, fetcher):
+ self._final_exit(fetcher)
+ if self.returncode != os.EX_OK:
+ portdb = self.pkg.root_config.trees[self._tree].dbapi
+ spawn_nofetch(portdb, self._ebuild_path, settings=self.settings)
+ self.wait()
+
+ def _pre_clean_exit(self, pre_clean_phase):
+ if self._default_exit(pre_clean_phase) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ # for log handling
+ portage.prepare_build_dirs(self.pkg.root, self.settings, 1)
+
+ fetcher = EbuildFetcher(config_pool=self.config_pool,
+ ebuild_path=self._ebuild_path,
+ fetchall=self.opts.fetch_all_uri,
+ fetchonly=self.opts.fetchonly,
+ background=self.background,
+ logfile=self.settings.get('PORTAGE_LOG_FILE'),
+ pkg=self.pkg, scheduler=self.scheduler)
+
+ try:
+ already_fetched = fetcher.already_fetched(self.settings)
+ except portage.exception.InvalidDependString as e:
+ msg_lines = []
+ msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \
+ (self.pkg.cpv, e)
+ msg_lines.append(msg)
+ fetcher._eerror(msg_lines)
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self.returncode = 1
+ self._current_task = None
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ if already_fetched:
+ # This case is optimized to skip the fetch queue.
+ fetcher = None
+ self._fetch_exit(fetcher)
+ return
+
+ # Allow the Scheduler's fetch queue to control the
+ # number of concurrent fetchers.
+ fetcher.addExitListener(self._fetch_exit)
+ self._task_queued(fetcher)
+ self.scheduler.fetch.schedule(fetcher)
+
+ def _fetch_exit(self, fetcher):
+
+ if fetcher is not None and \
+ self._default_exit(fetcher) != os.EX_OK:
+ self._fetch_failed()
+ return
+
+ # discard successful fetch log
+ self._build_dir.clean_log()
+ pkg = self.pkg
+ logger = self.logger
+ opts = self.opts
+ pkg_count = self.pkg_count
+ scheduler = self.scheduler
+ settings = self.settings
+ features = settings.features
+ ebuild_path = self._ebuild_path
+ system_set = pkg.root_config.sets["system"]
+
+ #buildsyspkg: Check if we need to _force_ binary package creation
+ self._issyspkg = "buildsyspkg" in features and \
+ system_set.findAtomForPackage(pkg) and \
+ not opts.buildpkg
+
+ if opts.buildpkg or self._issyspkg:
+
+ self._buildpkg = True
+
+ msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Compile" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ else:
+ msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Compile" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ build = EbuildExecuter(background=self.background, pkg=pkg,
+ scheduler=scheduler, settings=settings)
+
+ build.addStartListener(self._build_start)
+ build.addExitListener(self._build_stop)
+
+ self._start_task(build, self._build_exit)
+
+ def _build_start(self,phase):
+ if "depcheck" in self.settings["FEATURES"] or \
+ "depcheckstrict" in self.settings["FEATURES"]:
+ # Lets start a log listening server
+ temp_path=self.settings.get("T",self.settings["PORTAGE_TMPDIR"])
+
+ if "depcheckstrict" not in self.settings["FEATURES"]:
+ # use default filter_proc
+ self.logserver=EventsLogger(socket_dir=temp_path)
+ else:
+ portage.util.writemsg("Getting list of allowed files..." + \
+ "This may take some time\n")
+ filter_gen=FilterProcGenerator(self.pkg.cpv, self.settings)
+ filter_proc=filter_gen.get_filter_proc()
+ self.logserver=EventsLogger(socket_dir=temp_path,
+ filter_proc=filter_proc)
+
+ self.logserver.start()
+
+ # Copy socket path to LOG_SOCKET environment variable
+ env=self.settings.configdict["pkg"]
+ env['LOG_SOCKET'] = self.logserver.socket_name
+
+ #import pdb; pdb.set_trace()
+
+ def _build_stop(self,phase):
+ if "depcheck" in self.settings["FEATURES"] or \
+ "depcheckstrict" in self.settings["FEATURES"]:
+ # Delete LOG_SOCKET from environment
+ env=self.settings.configdict["pkg"]
+ if 'LOG_SOCKET' in env:
+ del env['LOG_SOCKET']
+
+ events=self.logserver.stop()
+ self.logserver=None
+ analyser=EventsAnalyser(self.pkg.cpv, events, self.settings)
+ analyser.display() # show the analyse
+
+ #import pdb; pdb.set_trace()
+
+
+
+ def _fetch_failed(self):
+ # We only call the pkg_nofetch phase if either RESTRICT=fetch
+ # is set or the package has explicitly overridden the default
+ # pkg_nofetch implementation. This allows specialized messages
+ # to be displayed for problematic packages even though they do
+ # not set RESTRICT=fetch (bug #336499).
+
+ if 'fetch' not in self.pkg.metadata.restrict and \
+ 'nofetch' not in self.pkg.metadata.defined_phases:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ self.returncode = None
+ nofetch_phase = EbuildPhase(background=self.background,
+ phase='nofetch', scheduler=self.scheduler, settings=self.settings)
+ self._start_task(nofetch_phase, self._nofetch_exit)
+
+ def _nofetch_exit(self, nofetch_phase):
+ self._final_exit(nofetch_phase)
+ self._unlock_builddir()
+ self.returncode = 1
+ self.wait()
+
+ def _unlock_builddir(self):
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self._build_dir.unlock()
+
+ def _build_exit(self, build):
+ if self._default_exit(build) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ buildpkg = self._buildpkg
+
+ if not buildpkg:
+ self._final_exit(build)
+ self.wait()
+ return
+
+ if self._issyspkg:
+ msg = ">>> This is a system package, " + \
+ "let's pack a rescue tarball.\n"
+ self.scheduler.output(msg,
+ log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+ packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
+ scheduler=self.scheduler, settings=self.settings)
+
+ self._start_task(packager, self._buildpkg_exit)
+
+ def _buildpkg_exit(self, packager):
+ """
+ Released build dir lock when there is a failure or
+ when in buildpkgonly mode. Otherwise, the lock will
+ be released when merge() is called.
+ """
+
+ if self._default_exit(packager) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ if self.opts.buildpkgonly:
+ phase = 'success_hooks'
+ success_hooks = MiscFunctionsProcess(
+ background=self.background,
+ commands=[phase], phase=phase,
+ scheduler=self.scheduler, settings=self.settings)
+ self._start_task(success_hooks,
+ self._buildpkgonly_success_hook_exit)
+ return
+
+ # Continue holding the builddir lock until
+ # after the package has been installed.
+ self._current_task = None
+ self.returncode = packager.returncode
+ self.wait()
+
+ def _buildpkgonly_success_hook_exit(self, success_hooks):
+ self._default_exit(success_hooks)
+ self.returncode = None
+ # Need to call "clean" phase for buildpkgonly mode
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ phase = 'clean'
+ clean_phase = EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler, settings=self.settings)
+ self._start_task(clean_phase, self._clean_exit)
+
+ def _clean_exit(self, clean_phase):
+ if self._final_exit(clean_phase) != os.EX_OK or \
+ self.opts.buildpkgonly:
+ self._unlock_builddir()
+ self.wait()
+
+ def create_install_task(self):
+ """
+ Install the package and then clean up and release locks.
+ Only call this after the build has completed successfully
+ and neither fetchonly nor buildpkgonly mode are enabled.
+ """
+
+ ldpath_mtimes = self.ldpath_mtimes
+ logger = self.logger
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ settings = self.settings
+ world_atom = self.world_atom
+ ebuild_path = self._ebuild_path
+ tree = self._tree
+
+ task = EbuildMerge(find_blockers=self.find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
+ pkg_count=pkg_count, pkg_path=ebuild_path,
+ scheduler=self.scheduler,
+ settings=settings, tree=tree, world_atom=world_atom)
+
+ msg = " === (%s of %s) Merging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval,
+ pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Merge" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ task.addExitListener(self._install_exit)
+ return task
+
+ def _install_exit(self, task):
+ self._unlock_builddir()
diff --git a/portage_with_autodep/pym/_emerge/EbuildBuildDir.py b/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
new file mode 100644
index 0000000..ddc5fe0
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
@@ -0,0 +1,109 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.SlotObject import SlotObject
+import portage
+from portage import os
+from portage.exception import PortageException
+import errno
+
+class EbuildBuildDir(SlotObject):
+
+ __slots__ = ("scheduler", "settings",
+ "locked", "_catdir", "_lock_obj")
+
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self.locked = False
+
+ def lock(self):
+ """
+ This raises an AlreadyLocked exception if lock() is called
+ while a lock is already held. In order to avoid this, call
+ unlock() or check whether the "locked" attribute is True
+ or False before calling lock().
+ """
+ if self._lock_obj is not None:
+ raise self.AlreadyLocked((self._lock_obj,))
+
+ dir_path = self.settings.get('PORTAGE_BUILDDIR')
+ if not dir_path:
+ raise AssertionError('PORTAGE_BUILDDIR is unset')
+ catdir = os.path.dirname(dir_path)
+ self._catdir = catdir
+
+ try:
+ portage.util.ensure_dirs(os.path.dirname(catdir),
+ gid=portage.portage_gid,
+ mode=0o70, mask=0)
+ except PortageException:
+ if not os.path.isdir(os.path.dirname(catdir)):
+ raise
+ catdir_lock = AsynchronousLock(path=catdir, scheduler=self.scheduler)
+ catdir_lock.start()
+ catdir_lock.wait()
+ self._assert_lock(catdir_lock)
+
+ try:
+ try:
+ portage.util.ensure_dirs(catdir,
+ gid=portage.portage_gid,
+ mode=0o70, mask=0)
+ except PortageException:
+ if not os.path.isdir(catdir):
+ raise
+ builddir_lock = AsynchronousLock(path=dir_path,
+ scheduler=self.scheduler)
+ builddir_lock.start()
+ builddir_lock.wait()
+ self._assert_lock(builddir_lock)
+ self._lock_obj = builddir_lock
+ self.settings['PORTAGE_BUILDIR_LOCKED'] = '1'
+ finally:
+ self.locked = self._lock_obj is not None
+ catdir_lock.unlock()
+
+ def _assert_lock(self, async_lock):
+ if async_lock.returncode != os.EX_OK:
+ # TODO: create a better way to propagate this error to the caller
+ raise AssertionError("AsynchronousLock failed with returncode %s" \
+ % (async_lock.returncode,))
+
+ def clean_log(self):
+ """Discard existing log. The log will not be be discarded
+ in cases when it would not make sense, like when FEATURES=keepwork
+ is enabled."""
+ settings = self.settings
+ if 'keepwork' in settings.features:
+ return
+ log_file = settings.get('PORTAGE_LOG_FILE')
+ if log_file is not None and os.path.isfile(log_file):
+ try:
+ os.unlink(log_file)
+ except OSError:
+ pass
+
+ def unlock(self):
+ if self._lock_obj is None:
+ return
+
+ self._lock_obj.unlock()
+ self._lock_obj = None
+ self.locked = False
+ self.settings.pop('PORTAGE_BUILDIR_LOCKED', None)
+ catdir_lock = AsynchronousLock(path=self._catdir, scheduler=self.scheduler)
+ catdir_lock.start()
+ if catdir_lock.wait() == os.EX_OK:
+ try:
+ os.rmdir(self._catdir)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT,
+ errno.ENOTEMPTY, errno.EEXIST, errno.EPERM):
+ raise
+ finally:
+ catdir_lock.unlock()
+
+ class AlreadyLocked(portage.exception.PortageException):
+ pass
+
diff --git a/portage_with_autodep/pym/_emerge/EbuildExecuter.py b/portage_with_autodep/pym/_emerge/EbuildExecuter.py
new file mode 100644
index 0000000..f8febd4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildExecuter.py
@@ -0,0 +1,99 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.TaskSequence import TaskSequence
+from _emerge.CompositeTask import CompositeTask
+import portage
+from portage import os
+from portage.eapi import eapi_has_src_prepare_and_src_configure, \
+ eapi_exports_replace_vars
+from portage.package.ebuild.doebuild import _prepare_fake_distdir
+
+class EbuildExecuter(CompositeTask):
+
+ __slots__ = ("pkg", "scheduler", "settings")
+
+ _phases = ("prepare", "configure", "compile", "test", "install")
+
+ _live_eclasses = frozenset([
+ "bzr",
+ "cvs",
+ "darcs",
+ "git",
+ "git-2",
+ "mercurial",
+ "subversion",
+ "tla",
+ ])
+
+ def _start(self):
+ pkg = self.pkg
+ scheduler = self.scheduler
+ settings = self.settings
+ cleanup = 0
+ portage.prepare_build_dirs(pkg.root, settings, cleanup)
+
+ portdb = pkg.root_config.trees['porttree'].dbapi
+ ebuild_path = settings['EBUILD']
+ alist = settings.configdict["pkg"].get("A", "").split()
+ _prepare_fake_distdir(settings, alist)
+
+ if eapi_exports_replace_vars(settings['EAPI']):
+ vardb = pkg.root_config.trees['vartree'].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(pkg.slot_atom) + \
+ vardb.match('='+pkg.cpv)))
+
+ setup_phase = EbuildPhase(background=self.background,
+ phase="setup", scheduler=scheduler,
+ settings=settings)
+
+ setup_phase.addExitListener(self._setup_exit)
+ self._task_queued(setup_phase)
+ self.scheduler.scheduleSetup(setup_phase)
+
+ def _setup_exit(self, setup_phase):
+
+ if self._default_exit(setup_phase) != os.EX_OK:
+ self.wait()
+ return
+
+ unpack_phase = EbuildPhase(background=self.background,
+ phase="unpack", scheduler=self.scheduler,
+ settings=self.settings)
+
+ if self._live_eclasses.intersection(self.pkg.inherited):
+ # Serialize $DISTDIR access for live ebuilds since
+ # otherwise they can interfere with eachother.
+
+ unpack_phase.addExitListener(self._unpack_exit)
+ self._task_queued(unpack_phase)
+ self.scheduler.scheduleUnpack(unpack_phase)
+
+ else:
+ self._start_task(unpack_phase, self._unpack_exit)
+
+ def _unpack_exit(self, unpack_phase):
+
+ if self._default_exit(unpack_phase) != os.EX_OK:
+ self.wait()
+ return
+
+ ebuild_phases = TaskSequence(scheduler=self.scheduler)
+
+ pkg = self.pkg
+ phases = self._phases
+ eapi = pkg.metadata["EAPI"]
+ if not eapi_has_src_prepare_and_src_configure(eapi):
+ # skip src_prepare and src_configure
+ phases = phases[2:]
+
+ for phase in phases:
+ ebuild_phases.add(EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler,
+ settings=self.settings))
+
+ self._start_task(ebuild_phases, self._default_final_exit)
+
diff --git a/portage_with_autodep/pym/_emerge/EbuildFetcher.py b/portage_with_autodep/pym/_emerge/EbuildFetcher.py
new file mode 100644
index 0000000..feb68d0
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildFetcher.py
@@ -0,0 +1,302 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import traceback
+
+from _emerge.SpawnProcess import SpawnProcess
+import copy
+import io
+import signal
+import sys
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.elog.messages import eerror
+from portage.package.ebuild.fetch import _check_distfile, fetch
+from portage.util._pty import _create_pty_or_pipe
+
+class EbuildFetcher(SpawnProcess):
+
+ __slots__ = ("config_pool", "ebuild_path", "fetchonly", "fetchall",
+ "pkg", "prefetch") + \
+ ("_digests", "_settings", "_uri_map")
+
+ def already_fetched(self, settings):
+ """
+ Returns True if all files already exist locally and have correct
+ digests, otherwise return False. When returning True, appropriate
+ digest checking messages are produced for display and/or logging.
+ When returning False, no messages are produced, since we assume
+ that a fetcher process will later be executed in order to produce
+ such messages. This will raise InvalidDependString if SRC_URI is
+ invalid.
+ """
+
+ uri_map = self._get_uri_map()
+ if not uri_map:
+ return True
+
+ digests = self._get_digests()
+ distdir = settings["DISTDIR"]
+ allow_missing = "allow-missing-manifests" in settings.features
+
+ for filename in uri_map:
+ # Use stat rather than lstat since fetch() creates
+ # symlinks when PORTAGE_RO_DISTDIRS is used.
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError:
+ return False
+ if st.st_size == 0:
+ return False
+ expected_size = digests.get(filename, {}).get('size')
+ if expected_size is None:
+ continue
+ if st.st_size != expected_size:
+ return False
+
+ stdout_orig = sys.stdout
+ stderr_orig = sys.stderr
+ global_havecolor = portage.output.havecolor
+ out = io.StringIO()
+ eout = portage.output.EOutput()
+ eout.quiet = settings.get("PORTAGE_QUIET") == "1"
+ success = True
+ try:
+ sys.stdout = out
+ sys.stderr = out
+ if portage.output.havecolor:
+ portage.output.havecolor = not self.background
+
+ for filename in uri_map:
+ mydigests = digests.get(filename)
+ if mydigests is None:
+ if not allow_missing:
+ success = False
+ break
+ continue
+ ok, st = _check_distfile(os.path.join(distdir, filename),
+ mydigests, eout, show_errors=False)
+ if not ok:
+ success = False
+ break
+ except portage.exception.FileNotFound:
+ # A file disappeared unexpectedly.
+ return False
+ finally:
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+ portage.output.havecolor = global_havecolor
+
+ if success:
+ # When returning unsuccessfully, no messages are produced, since
+ # we assume that a fetcher process will later be executed in order
+ # to produce such messages.
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=self.logfile)
+
+ return success
+
+ def _start(self):
+
+ root_config = self.pkg.root_config
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = self._get_ebuild_path()
+
+ try:
+ uri_map = self._get_uri_map()
+ except portage.exception.InvalidDependString as e:
+ msg_lines = []
+ msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \
+ (self.pkg.cpv, e)
+ msg_lines.append(msg)
+ self._eerror(msg_lines)
+ self._set_returncode((self.pid, 1 << 8))
+ self.wait()
+ return
+
+ if not uri_map:
+ # Nothing to fetch.
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self.wait()
+ return
+
+ settings = self.config_pool.allocate()
+ settings.setcpv(self.pkg)
+ portage.doebuild_environment(ebuild_path, 'fetch',
+ settings=settings, db=portdb)
+
+ if self.prefetch and \
+ self._prefetch_size_ok(uri_map, settings, ebuild_path):
+ self.config_pool.deallocate(settings)
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self.wait()
+ return
+
+ nocolor = settings.get("NOCOLOR")
+
+ if self.prefetch:
+ settings["PORTAGE_PARALLEL_FETCHONLY"] = "1"
+
+ if self.background:
+ nocolor = "true"
+
+ if nocolor is not None:
+ settings["NOCOLOR"] = nocolor
+
+ self._settings = settings
+ SpawnProcess._start(self)
+
+ # Free settings now since it's no longer needed in
+ # this process (the subprocess has a private copy).
+ self.config_pool.deallocate(settings)
+ settings = None
+ self._settings = None
+
+ def _spawn(self, args, fd_pipes=None, **kwargs):
+ """
+ Fork a subprocess, apply local settings, and call fetch().
+ """
+
+ pid = os.fork()
+ if pid != 0:
+ portage.process.spawned_pids.append(pid)
+ return [pid]
+
+ portage.process._setup_pipes(fd_pipes)
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ # Force consistent color output, in case we are capturing fetch
+ # output through a normal pipe due to unavailability of ptys.
+ portage.output.havecolor = self._settings.get('NOCOLOR') \
+ not in ('yes', 'true')
+
+ rval = 1
+ allow_missing = 'allow-missing-manifests' in self._settings.features
+ try:
+ if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
+ digests=copy.deepcopy(self._get_digests()),
+ allow_missing_digests=allow_missing):
+ rval = os.EX_OK
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ finally:
+ # Call os._exit() from finally block, in order to suppress any
+ # finally blocks from earlier in the call stack. See bug #345289.
+ os._exit(rval)
+
+ def _get_ebuild_path(self):
+ if self.ebuild_path is not None:
+ return self.ebuild_path
+ portdb = self.pkg.root_config.trees["porttree"].dbapi
+ self.ebuild_path = portdb.findname(self.pkg.cpv, myrepo=self.pkg.repo)
+ if self.ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % self.pkg.cpv)
+ return self.ebuild_path
+
+ def _get_digests(self):
+ if self._digests is not None:
+ return self._digests
+ self._digests = portage.Manifest(os.path.dirname(
+ self._get_ebuild_path()), None).getTypeDigests("DIST")
+ return self._digests
+
+ def _get_uri_map(self):
+ """
+ This can raise InvalidDependString from portdbapi.getFetchMap().
+ """
+ if self._uri_map is not None:
+ return self._uri_map
+ pkgdir = os.path.dirname(self._get_ebuild_path())
+ mytree = os.path.dirname(os.path.dirname(pkgdir))
+ use = None
+ if not self.fetchall:
+ use = self.pkg.use.enabled
+ portdb = self.pkg.root_config.trees["porttree"].dbapi
+ self._uri_map = portdb.getFetchMap(self.pkg.cpv,
+ useflags=use, mytree=mytree)
+ return self._uri_map
+
+ def _prefetch_size_ok(self, uri_map, settings, ebuild_path):
+ distdir = settings["DISTDIR"]
+
+ sizes = {}
+ for filename in uri_map:
+ # Use stat rather than lstat since portage.fetch() creates
+ # symlinks when PORTAGE_RO_DISTDIRS is used.
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError:
+ return False
+ if st.st_size == 0:
+ return False
+ sizes[filename] = st.st_size
+
+ digests = self._get_digests()
+ for filename, actual_size in sizes.items():
+ size = digests.get(filename, {}).get('size')
+ if size is None:
+ continue
+ if size != actual_size:
+ return False
+
+ # All files are present and sizes are ok. In this case the normal
+ # fetch code will be skipped, so we need to generate equivalent
+ # output here.
+ if self.logfile is not None:
+ f = io.open(_unicode_encode(self.logfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'],
+ errors='backslashreplace')
+ for filename in uri_map:
+ f.write(_unicode_decode((' * %s size ;-) ...' % \
+ filename).ljust(73) + '[ ok ]\n'))
+ f.close()
+
+ return True
+
+ def _pipe(self, fd_pipes):
+ """When appropriate, use a pty so that fetcher progress bars,
+ like wget has, will work properly."""
+ if self.background or not sys.stdout.isatty():
+ # When the output only goes to a log file,
+ # there's no point in creating a pty.
+ return os.pipe()
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _eerror(self, lines):
+ out = io.StringIO()
+ for line in lines:
+ eerror(line, phase="unpack", key=self.pkg.cpv, out=out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=self.logfile)
+
+ def _set_returncode(self, wait_retval):
+ SpawnProcess._set_returncode(self, wait_retval)
+ # Collect elog messages that might have been
+ # created by the pkg_nofetch phase.
+ # Skip elog messages for prefetch, in order to avoid duplicates.
+ if not self.prefetch and self.returncode != os.EX_OK:
+ msg_lines = []
+ msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
+ if self.logfile is not None:
+ msg += ", Log file:"
+ msg_lines.append(msg)
+ if self.logfile is not None:
+ msg_lines.append(" '%s'" % (self.logfile,))
+ self._eerror(msg_lines)
diff --git a/portage_with_autodep/pym/_emerge/EbuildFetchonly.py b/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
new file mode 100644
index 0000000..b898971
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
@@ -0,0 +1,32 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SlotObject import SlotObject
+import portage
+from portage import os
+from portage.elog.messages import eerror
+
+class EbuildFetchonly(SlotObject):
+
+ __slots__ = ("fetch_all", "pkg", "pretend", "settings")
+
+ def execute(self):
+ settings = self.settings
+ pkg = self.pkg
+ portdb = pkg.root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
+ settings.setcpv(pkg)
+ debug = settings.get("PORTAGE_DEBUG") == "1"
+
+ rval = portage.doebuild(ebuild_path, "fetch",
+ settings["ROOT"], settings, debug=debug,
+ listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
+ mydbapi=portdb, tree="porttree")
+
+ if rval != os.EX_OK:
+ msg = "Fetch failed for '%s'" % (pkg.cpv,)
+ eerror(msg, phase="unpack", key=pkg.cpv)
+
+ return rval
diff --git a/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
new file mode 100644
index 0000000..5dabe34
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
@@ -0,0 +1,108 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import pickle
+from portage import os
+from portage.localization import _
+from portage.util import writemsg_level
+from _emerge.FifoIpcDaemon import FifoIpcDaemon
+from _emerge.PollConstants import PollConstants
+
+class EbuildIpcDaemon(FifoIpcDaemon):
+ """
+ This class serves as an IPC daemon, which ebuild processes can use
+ to communicate with portage's main python process.
+
+ Here are a few possible uses:
+
+ 1) Robust subshell/subprocess die support. This allows the ebuild
+ environment to reliably die without having to rely on signal IPC.
+
+ 2) Delegation of portageq calls to the main python process, eliminating
+ performance and userpriv permission issues.
+
+ 3) Reliable ebuild termination in cases when the ebuild has accidentally
+ left orphan processes running in the background (as in bug #278895).
+
+ 4) Detect cases in which bash has exited unexpectedly (as in bug #190128).
+ """
+
+ __slots__ = ('commands',)
+
+ def _input_handler(self, fd, event):
+ # Read the whole pickle in a single atomic read() call.
+ data = None
+ if event & PollConstants.POLLIN:
+ # For maximum portability, use os.read() here since
+ # array.fromfile() and file.read() are both known to
+ # erroneously return an empty string from this
+ # non-blocking fifo stream on FreeBSD (bug #337465).
+ try:
+ data = os.read(fd, self._bufsize)
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ # Assume that another event will be generated
+ # if there's any relevant data.
+
+ if data:
+
+ try:
+ obj = pickle.loads(data)
+ except SystemExit:
+ raise
+ except Exception:
+ # The pickle module can raise practically
+ # any exception when given corrupt data.
+ pass
+ else:
+
+ self._reopen_input()
+
+ cmd_key = obj[0]
+ cmd_handler = self.commands[cmd_key]
+ reply = cmd_handler(obj)
+ try:
+ self._send_reply(reply)
+ except OSError as e:
+ if e.errno == errno.ENXIO:
+ # This happens if the client side has been killed.
+ pass
+ else:
+ raise
+
+ # Allow the command to execute hooks after its reply
+ # has been sent. This hook is used by the 'exit'
+ # command to kill the ebuild process. For some
+ # reason, the ebuild-ipc helper hangs up the
+ # ebuild process if it is waiting for a reply
+ # when we try to kill the ebuild process.
+ reply_hook = getattr(cmd_handler,
+ 'reply_hook', None)
+ if reply_hook is not None:
+ reply_hook()
+
+ def _send_reply(self, reply):
+ # File streams are in unbuffered mode since we do atomic
+ # read and write of whole pickles. Use non-blocking mode so
+ # we don't hang if the client is killed before we can send
+ # the reply. We rely on the client opening the other side
+ # of this fifo before it sends its request, since otherwise
+ # we'd have a race condition with this open call raising
+ # ENXIO if the client hasn't opened the fifo yet.
+ try:
+ output_fd = os.open(self.output_fifo,
+ os.O_WRONLY | os.O_NONBLOCK)
+ try:
+ os.write(output_fd, pickle.dumps(reply))
+ finally:
+ os.close(output_fd)
+ except OSError as e:
+ # This probably means that the client has been killed,
+ # which causes open to fail with ENXIO.
+ writemsg_level(
+ "!!! EbuildIpcDaemon %s: %s\n" % \
+ (_('failed to send reply'), e),
+ level=logging.ERROR, noiselevel=-1)
diff --git a/portage_with_autodep/pym/_emerge/EbuildMerge.py b/portage_with_autodep/pym/_emerge/EbuildMerge.py
new file mode 100644
index 0000000..9c35988
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildMerge.py
@@ -0,0 +1,56 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from portage import os
+from portage.dbapi._MergeProcess import MergeProcess
+
+class EbuildMerge(CompositeTask):
+
+ __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
+ "pkg", "pkg_count", "pkg_path", "pretend",
+ "settings", "tree", "world_atom")
+
+ def _start(self):
+ root_config = self.pkg.root_config
+ settings = self.settings
+ mycat = settings["CATEGORY"]
+ mypkg = settings["PF"]
+ pkgloc = settings["D"]
+ infloc = os.path.join(settings["PORTAGE_BUILDDIR"], "build-info")
+ myebuild = settings["EBUILD"]
+ mydbapi = root_config.trees[self.tree].dbapi
+ vartree = root_config.trees["vartree"]
+ background = (settings.get('PORTAGE_BACKGROUND') == '1')
+ logfile = settings.get('PORTAGE_LOG_FILE')
+
+ merge_task = MergeProcess(
+ mycat=mycat, mypkg=mypkg, settings=settings,
+ treetype=self.tree, vartree=vartree, scheduler=self.scheduler,
+ background=background, blockers=self.find_blockers, pkgloc=pkgloc,
+ infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
+ prev_mtimes=self.ldpath_mtimes, logfile=logfile)
+
+ self._start_task(merge_task, self._merge_exit)
+
+ def _merge_exit(self, merge_task):
+ if self._final_exit(merge_task) != os.EX_OK:
+ self.wait()
+ return
+
+ pkg = self.pkg
+ self.world_atom(pkg)
+ pkg_count = self.pkg_count
+ pkg_path = self.pkg_path
+ logger = self.logger
+ if "noclean" not in self.settings.features:
+ short_msg = "emerge: (%s of %s) %s Clean Post" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log((" === (%s of %s) " + \
+ "Post-Build Cleaning (%s::%s)") % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
+ short_msg=short_msg)
+ logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+ self.wait()
diff --git a/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
new file mode 100644
index 0000000..e53298b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
@@ -0,0 +1,133 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+from _emerge.PollConstants import PollConstants
+import sys
+from portage.cache.mappings import slot_dict_class
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import fcntl
+import io
+
+class EbuildMetadataPhase(SubProcess):
+
+ """
+ Asynchronous interface for the ebuild "depend" phase which is
+ used to extract metadata from the ebuild.
+ """
+
+ __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
+ "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
+ ("_raw_metadata",)
+
+ _file_names = ("ebuild",)
+ _files_dict = slot_dict_class(_file_names, prefix="")
+ _metadata_fd = 9
+
+ def _start(self):
+ settings = self.settings
+ settings.setcpv(self.cpv)
+ ebuild_path = self.ebuild_path
+
+ eapi = None
+ if eapi is None and \
+ 'parse-eapi-ebuild-head' in settings.features:
+ eapi = portage._parse_eapi_ebuild_head(
+ io.open(_unicode_encode(ebuild_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace'))
+
+ if eapi is not None:
+ if not portage.eapi_is_supported(eapi):
+ self.metadata_callback(self.cpv, self.ebuild_path,
+ self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self.wait()
+ return
+
+ settings.configdict['pkg']['EAPI'] = eapi
+
+ debug = settings.get("PORTAGE_DEBUG") == "1"
+ master_fd = None
+ slave_fd = None
+ fd_pipes = None
+ if self.fd_pipes is not None:
+ fd_pipes = self.fd_pipes.copy()
+ else:
+ fd_pipes = {}
+
+ fd_pipes.setdefault(0, sys.stdin.fileno())
+ fd_pipes.setdefault(1, sys.stdout.fileno())
+ fd_pipes.setdefault(2, sys.stderr.fileno())
+
+ # flush any pending output
+ for fd in fd_pipes.values():
+ if fd == sys.stdout.fileno():
+ sys.stdout.flush()
+ if fd == sys.stderr.fileno():
+ sys.stderr.flush()
+
+ fd_pipes_orig = fd_pipes.copy()
+ self._files = self._files_dict()
+ files = self._files
+
+ master_fd, slave_fd = os.pipe()
+ fcntl.fcntl(master_fd, fcntl.F_SETFL,
+ fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ fd_pipes[self._metadata_fd] = slave_fd
+
+ self._raw_metadata = []
+ files.ebuild = os.fdopen(master_fd, 'rb', 0)
+ self._reg_id = self.scheduler.register(files.ebuild.fileno(),
+ self._registered_events, self._output_handler)
+ self._registered = True
+
+ retval = portage.doebuild(ebuild_path, "depend",
+ settings["ROOT"], settings, debug,
+ mydbapi=self.portdb, tree="porttree",
+ fd_pipes=fd_pipes, returnpid=True)
+
+ os.close(slave_fd)
+
+ if isinstance(retval, int):
+ # doebuild failed before spawning
+ self._unregister()
+ self._set_returncode((self.pid, retval << 8))
+ self.wait()
+ return
+
+ self.pid = retval[0]
+ portage.process.spawned_pids.remove(self.pid)
+
+ def _output_handler(self, fd, event):
+
+ if event & PollConstants.POLLIN:
+ self._raw_metadata.append(self._files.ebuild.read())
+ if not self._raw_metadata[-1]:
+ self._unregister()
+ self.wait()
+
+ self._unregister_if_appropriate(event)
+
+ def _set_returncode(self, wait_retval):
+ SubProcess._set_returncode(self, wait_retval)
+ if self.returncode == os.EX_OK:
+ metadata_lines = ''.join(_unicode_decode(chunk,
+ encoding=_encodings['repo.content'], errors='replace')
+ for chunk in self._raw_metadata).splitlines()
+ if len(portage.auxdbkeys) != len(metadata_lines):
+ # Don't trust bash's returncode if the
+ # number of lines is incorrect.
+ self.returncode = 1
+ else:
+ metadata = zip(portage.auxdbkeys, metadata_lines)
+ self.metadata = self.metadata_callback(self.cpv,
+ self.ebuild_path, self.repo_path, metadata,
+ self.ebuild_mtime)
+
diff --git a/portage_with_autodep/pym/_emerge/EbuildPhase.py b/portage_with_autodep/pym/_emerge/EbuildPhase.py
new file mode 100644
index 0000000..82c165d
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildPhase.py
@@ -0,0 +1,350 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import io
+import sys
+import tempfile
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.EbuildProcess import EbuildProcess
+from _emerge.CompositeTask import CompositeTask
+from portage.util import writemsg
+from portage.xml.metadata import MetaDataXML
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.elog:messages@elog_messages',
+ 'portage.package.ebuild.doebuild:_check_build_log,' + \
+ '_post_phase_cmds,_post_phase_userpriv_perms,' + \
+ '_post_src_install_chost_fix,' + \
+ '_post_src_install_soname_symlinks,' + \
+ '_post_src_install_uid_fix,_postinst_bsdflags,' + \
+ '_preinst_bsdflags'
+)
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+class EbuildPhase(CompositeTask):
+
+ __slots__ = ("actionmap", "phase", "settings") + \
+ ("_ebuild_lock",)
+
+ # FEATURES displayed prior to setup phase
+ _features_display = ("ccache", "depcheck", "depcheckstrict" "distcc",
+ "distcc-pump", "fakeroot",
+ "installsources", "keeptemp", "keepwork", "nostrip",
+ "preserve-libs", "sandbox", "selinux", "sesandbox",
+ "splitdebug", "suidctl", "test", "userpriv",
+ "usersandbox")
+
+ # Locked phases
+ _locked_phases = ("setup", "preinst", "postinst", "prerm", "postrm")
+
+ def _start(self):
+
+ need_builddir = self.phase not in EbuildProcess._phases_without_builddir
+
+ if need_builddir:
+ phase_completed_file = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'],
+ ".%sed" % self.phase.rstrip('e'))
+ if not os.path.exists(phase_completed_file):
+ # If the phase is really going to run then we want
+ # to eliminate any stale elog messages that may
+ # exist from a previous run.
+ try:
+ os.unlink(os.path.join(self.settings['T'],
+ 'logging', self.phase))
+ except OSError:
+ pass
+
+ if self.phase in ('nofetch', 'pretend', 'setup'):
+
+ use = self.settings.get('PORTAGE_BUILT_USE')
+ if use is None:
+ use = self.settings['PORTAGE_USE']
+
+ maint_str = ""
+ upstr_str = ""
+ metadata_xml_path = os.path.join(os.path.dirname(self.settings['EBUILD']), "metadata.xml")
+ if os.path.isfile(metadata_xml_path):
+ herds_path = os.path.join(self.settings['PORTDIR'],
+ 'metadata/herds.xml')
+ try:
+ metadata_xml = MetaDataXML(metadata_xml_path, herds_path)
+ maint_str = metadata_xml.format_maintainer_string()
+ upstr_str = metadata_xml.format_upstream_string()
+ except SyntaxError:
+ maint_str = "<invalid metadata.xml>"
+
+ msg = []
+ msg.append("Package: %s" % self.settings.mycpv)
+ if self.settings.get('PORTAGE_REPO_NAME'):
+ msg.append("Repository: %s" % self.settings['PORTAGE_REPO_NAME'])
+ if maint_str:
+ msg.append("Maintainer: %s" % maint_str)
+ if upstr_str:
+ msg.append("Upstream: %s" % upstr_str)
+
+ msg.append("USE: %s" % use)
+ relevant_features = []
+ enabled_features = self.settings.features
+ for x in self._features_display:
+ if x in enabled_features:
+ relevant_features.append(x)
+ if relevant_features:
+ msg.append("FEATURES: %s" % " ".join(relevant_features))
+
+ # Force background=True for this header since it's intended
+ # for the log and it doesn't necessarily need to be visible
+ # elsewhere.
+ self._elog('einfo', msg, background=True)
+
+ if self.phase == 'package':
+ if 'PORTAGE_BINPKG_TMPFILE' not in self.settings:
+ self.settings['PORTAGE_BINPKG_TMPFILE'] = \
+ os.path.join(self.settings['PKGDIR'],
+ self.settings['CATEGORY'], self.settings['PF']) + '.tbz2'
+
+ if self.phase in ("pretend", "prerm"):
+ env_extractor = BinpkgEnvExtractor(background=self.background,
+ scheduler=self.scheduler, settings=self.settings)
+ if env_extractor.saved_env_exists():
+ self._start_task(env_extractor, self._env_extractor_exit)
+ return
+ # If the environment.bz2 doesn't exist, then ebuild.sh will
+ # source the ebuild as a fallback.
+
+ self._start_lock()
+
+ def _env_extractor_exit(self, env_extractor):
+ if self._default_exit(env_extractor) != os.EX_OK:
+ self.wait()
+ return
+
+ self._start_lock()
+
+ def _start_lock(self):
+ if (self.phase in self._locked_phases and
+ "ebuild-locks" in self.settings.features):
+ eroot = self.settings["EROOT"]
+ lock_path = os.path.join(eroot, portage.VDB_PATH + "-ebuild")
+ if os.access(os.path.dirname(lock_path), os.W_OK):
+ self._ebuild_lock = AsynchronousLock(path=lock_path,
+ scheduler=self.scheduler)
+ self._start_task(self._ebuild_lock, self._lock_exit)
+ return
+
+ self._start_ebuild()
+
+ def _lock_exit(self, ebuild_lock):
+ if self._default_exit(ebuild_lock) != os.EX_OK:
+ self.wait()
+ return
+ self._start_ebuild()
+
+ def _start_ebuild(self):
+
+ # Don't open the log file during the clean phase since the
+ # open file can result in an nfs lock on $T/build.log which
+ # prevents the clean phase from removing $T.
+ logfile = None
+ if self.phase not in ("clean", "cleanrm") and \
+ self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+
+ fd_pipes = None
+ if not self.background and self.phase == 'nofetch':
+ # All the pkg_nofetch output goes to stderr since
+ # it's considered to be an error message.
+ fd_pipes = {1 : sys.stderr.fileno()}
+
+ ebuild_process = EbuildProcess(actionmap=self.actionmap,
+ background=self.background, fd_pipes=fd_pipes, logfile=logfile,
+ phase=self.phase, scheduler=self.scheduler,
+ settings=self.settings)
+
+ self._start_task(ebuild_process, self._ebuild_exit)
+
+ def _ebuild_exit(self, ebuild_process):
+
+ if self._ebuild_lock is not None:
+ self._ebuild_lock.unlock()
+ self._ebuild_lock = None
+
+ fail = False
+ if self._default_exit(ebuild_process) != os.EX_OK:
+ if self.phase == "test" and \
+ "test-fail-continue" in self.settings.features:
+ pass
+ else:
+ fail = True
+
+ if not fail:
+ self.returncode = None
+
+ logfile = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+
+ if self.phase == "install":
+ out = io.StringIO()
+ _check_build_log(self.settings, out=out)
+ msg = out.getvalue()
+ self.scheduler.output(msg, log_path=logfile)
+
+ if fail:
+ self._die_hooks()
+ return
+
+ settings = self.settings
+ _post_phase_userpriv_perms(settings)
+
+ if self.phase == "install":
+ out = io.StringIO()
+ _post_src_install_chost_fix(settings)
+ _post_src_install_uid_fix(settings, out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=logfile)
+ elif self.phase == "preinst":
+ _preinst_bsdflags(settings)
+ elif self.phase == "postinst":
+ _postinst_bsdflags(settings)
+
+ post_phase_cmds = _post_phase_cmds.get(self.phase)
+ if post_phase_cmds is not None:
+ if logfile is not None and self.phase in ("install",):
+ # Log to a temporary file, since the code we are running
+ # reads PORTAGE_LOG_FILE for QA checks, and we want to
+ # avoid annoying "gzip: unexpected end of file" messages
+ # when FEATURES=compress-build-logs is enabled.
+ fd, logfile = tempfile.mkstemp()
+ os.close(fd)
+ post_phase = MiscFunctionsProcess(background=self.background,
+ commands=post_phase_cmds, logfile=logfile, phase=self.phase,
+ scheduler=self.scheduler, settings=settings)
+ self._start_task(post_phase, self._post_phase_exit)
+ return
+
+ # this point is not reachable if there was a failure and
+ # we returned for die_hooks above, so returncode must
+ # indicate success (especially if ebuild_process.returncode
+ # is unsuccessful and test-fail-continue came into play)
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+
+ def _post_phase_exit(self, post_phase):
+
+ self._assert_current(post_phase)
+
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+
+ if post_phase.logfile is not None and \
+ post_phase.logfile != log_path:
+ # We were logging to a temp file (see above), so append
+ # temp file to main log and remove temp file.
+ self._append_temp_log(post_phase.logfile, log_path)
+
+ if self._final_exit(post_phase) != os.EX_OK:
+ writemsg("!!! post %s failed; exiting.\n" % self.phase,
+ noiselevel=-1)
+ self._die_hooks()
+ return
+
+ if self.phase == "install":
+ out = io.StringIO()
+ _post_src_install_soname_symlinks(self.settings, out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=log_path)
+
+ self._current_task = None
+ self.wait()
+ return
+
+ def _append_temp_log(self, temp_log, log_path):
+
+ temp_file = open(_unicode_encode(temp_log,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+
+ log_file = self._open_log(log_path)
+
+ for line in temp_file:
+ log_file.write(line)
+
+ temp_file.close()
+ log_file.close()
+ os.unlink(temp_log)
+
+ def _open_log(self, log_path):
+
+ f = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='ab')
+
+ if log_path.endswith('.gz'):
+ f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+ return f
+
+ def _die_hooks(self):
+ self.returncode = None
+ phase = 'die_hooks'
+ die_hooks = MiscFunctionsProcess(background=self.background,
+ commands=[phase], phase=phase,
+ scheduler=self.scheduler, settings=self.settings)
+ self._start_task(die_hooks, self._die_hooks_exit)
+
+ def _die_hooks_exit(self, die_hooks):
+ if self.phase != 'clean' and \
+ 'noclean' not in self.settings.features and \
+ 'fail-clean' in self.settings.features:
+ self._default_exit(die_hooks)
+ self._fail_clean()
+ return
+ self._final_exit(die_hooks)
+ self.returncode = 1
+ self.wait()
+
+ def _fail_clean(self):
+ self.returncode = None
+ portage.elog.elog_process(self.settings.mycpv, self.settings)
+ phase = "clean"
+ clean_phase = EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler, settings=self.settings)
+ self._start_task(clean_phase, self._fail_clean_exit)
+ return
+
+ def _fail_clean_exit(self, clean_phase):
+ self._final_exit(clean_phase)
+ self.returncode = 1
+ self.wait()
+
+ def _elog(self, elog_funcname, lines, background=None):
+ if background is None:
+ background = self.background
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ if msg:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ self.scheduler.output(msg, log_path=log_path,
+ background=background)
diff --git a/portage_with_autodep/pym/_emerge/EbuildProcess.py b/portage_with_autodep/pym/_emerge/EbuildProcess.py
new file mode 100644
index 0000000..ce97aff
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildProcess.py
@@ -0,0 +1,21 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.doebuild:_doebuild_spawn,_spawn_actionmap'
+)
+
+class EbuildProcess(AbstractEbuildProcess):
+
+ __slots__ = ('actionmap',)
+
+ def _spawn(self, args, **kwargs):
+
+ actionmap = self.actionmap
+ if actionmap is None:
+ actionmap = _spawn_actionmap(self.settings)
+
+ return _doebuild_spawn(self.phase, self.settings,
+ actionmap=actionmap, **kwargs)
diff --git a/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py b/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py
new file mode 100644
index 0000000..e1f682a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py
@@ -0,0 +1,16 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+
+class EbuildSpawnProcess(AbstractEbuildProcess):
+ """
+ Used by doebuild.spawn() to manage the spawned process.
+ """
+ _spawn_kwarg_names = AbstractEbuildProcess._spawn_kwarg_names + \
+ ('fakeroot_state',)
+
+ __slots__ = ('fakeroot_state', 'spawn_func')
+
+ def _spawn(self, args, **kwargs):
+ return self.spawn_func(args, env=self.settings.environ(), **kwargs)
diff --git a/portage_with_autodep/pym/_emerge/EventsAnalyser.py b/portage_with_autodep/pym/_emerge/EventsAnalyser.py
new file mode 100644
index 0000000..65ece7b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EventsAnalyser.py
@@ -0,0 +1,511 @@
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage import os
+
+import subprocess
+import re
+
+class PortageUtils:
+ """ class for accessing the portage api """
+ def __init__(self, settings):
+ """ test """
+ self.settings=settings
+ self.vartree=portage.vartree(settings=settings)
+ self.vardbapi=portage.vardbapi(settings=settings, vartree=self.vartree)
+ self.portdbapi=portage.portdbapi(mysettings=settings)
+ self.metadata_keys = [k for k in portage.auxdbkeys if not k.startswith("UNUSED_")]
+ self.use=self.settings["USE"]
+
+ def get_best_visible_pkg(self,pkg):
+ """
+ Gets best candidate on installing. Returns empty string if no found
+
+ :param pkg: package name
+
+ """
+ try:
+ return self.portdbapi.xmatch("bestmatch-visible", pkg)
+ except:
+ return ''
+
+ # non-recursive dependency getter
+ def get_dep(self,pkg,dep_type=["RDEPEND","DEPEND"]):
+ """
+ Gets current dependencies of a package. Looks in portage db
+
+ :param pkg: name of package
+ :param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or
+ ["RDEPEND", "DEPEND"]
+ :returns: **set** of packages names
+ """
+ ret=set()
+
+ pkg = self.get_best_visible_pkg(pkg)
+ if not pkg:
+ return ret
+
+ # we found the best visible match in common tree
+
+
+ metadata = dict(zip(self.metadata_keys,
+ self.portdbapi.aux_get(pkg, self.metadata_keys)))
+ dep_str = " ".join(metadata[k] for k in dep_type)
+
+ # the IUSE default are very important for us
+ iuse_defaults=[
+ u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
+
+ use=self.use.split()
+
+ for u in iuse_defaults:
+ if u not in use:
+ use.append(u)
+
+ success, atoms = portage.dep_check(dep_str, None, self.settings,
+ myuse=use, myroot=self.settings["ROOT"],
+ trees={self.settings["ROOT"]:{"vartree":self.vartree, "porttree": self.vartree}})
+ if not success:
+ return ret
+
+ for atom in atoms:
+ atomname = self.vartree.dep_bestmatch(atom)
+
+ if not atomname:
+ continue
+
+ for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
+ for pkg in self.vartree.dep_match(unvirt_pkg):
+ ret.add(pkg)
+
+ return ret
+
+ # recursive dependency getter
+ def get_deps(self,pkg,dep_type=["RDEPEND","DEPEND"]):
+ """
+ Gets current dependencies of a package on any depth
+ All dependencies **must** be installed
+
+ :param pkg: name of package
+ :param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or
+ ["RDEPEND", "DEPEND"]
+ :returns: **set** of packages names
+ """
+ ret=set()
+
+
+ # get porttree dependencies on the first package
+
+ pkg = self.portdbapi.xmatch("bestmatch-visible", pkg)
+ if not pkg:
+ return ret
+
+ known_packages=set()
+ unknown_packages=self.get_dep(pkg,dep_type)
+ ret=ret.union(unknown_packages)
+
+ while unknown_packages:
+ p=unknown_packages.pop()
+ if p in known_packages:
+ continue
+ known_packages.add(p)
+
+ metadata = dict(zip(self.metadata_keys, self.vardbapi.aux_get(p, self.metadata_keys)))
+
+ dep_str = " ".join(metadata[k] for k in dep_type)
+
+ # the IUSE default are very important for us
+ iuse_defaults=[
+ u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
+
+ use=self.use.split()
+
+ for u in iuse_defaults:
+ if u not in use:
+ use.append(u)
+
+ success, atoms = portage.dep_check(dep_str, None, self.settings,
+ myuse=use, myroot=self.settings["ROOT"],
+ trees={self.settings["ROOT"]:{"vartree":self.vartree,"porttree": self.vartree}})
+
+ if not success:
+ continue
+
+ for atom in atoms:
+ atomname = self.vartree.dep_bestmatch(atom)
+ if not atomname:
+ continue
+
+ for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
+ for pkg in self.vartree.dep_match(unvirt_pkg):
+ ret.add(pkg)
+ unknown_packages.add(pkg)
+ return ret
+
+ def get_deps_for_package_building(self, pkg):
+ """
+ returns buildtime dependencies of current package and
+ all runtime dependencies of that buildtime dependencies
+ """
+ buildtime_deps=self.get_dep(pkg, ["DEPEND"])
+ runtime_deps=set()
+ for dep in buildtime_deps:
+ runtime_deps=runtime_deps.union(self.get_deps(dep,["RDEPEND"]))
+
+ ret=buildtime_deps.union(runtime_deps)
+ return ret
+
+ def get_system_packages_list(self):
+ """
+ returns all packages from system set. They are always implicit dependencies
+
+ :returns: **list** of package names
+ """
+ ret=[]
+ for atom in self.settings.packages:
+ for pre_pkg in self.vartree.dep_match(atom):
+ for unvirt_pkg in expand_new_virt(self.vardbapi,'='+pre_pkg):
+ for pkg in self.vartree.dep_match(unvirt_pkg):
+ ret.append(pkg)
+ return ret
+
+
+class GentoolkitUtils:
+ """
+ Interface with qfile and qlist utils. They are much faster than
+ internals.
+ """
+
+ def getpackagesbyfiles(files):
+ """
+ :param files: list of filenames
+ :returns: **dictionary** file->package, if file doesn't belong to any
+ package it not returned as key of this dictionary
+ """
+ ret={}
+ listtocheck=[]
+ for f in files:
+ if os.path.isdir(f):
+ ret[f]="directory"
+ else:
+ listtocheck.append(f)
+
+ try:
+ proc=subprocess.Popen(['qfile']+['--nocolor','--exact','','--from','-'],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE,
+ bufsize=4096)
+
+ out,err=proc.communicate("\n".join(listtocheck).encode("utf8"))
+
+ lines=out.decode("utf8").split("\n")
+ #print lines
+ line_re=re.compile(r"^([^ ]+)\s+\(([^)]+)\)$")
+ for line in lines:
+ if len(line)==0:
+ continue
+ match=line_re.match(line)
+ if match:
+ ret[match.group(2)]=match.group(1)
+ else:
+ portage.util.writemsg("Util qfile returned unparsable string: %s\n" % line)
+
+ except OSError as e:
+ portage.util.writemsg("Error while launching qfile: %s\n" % e)
+
+
+ return ret
+
+ def getfilesbypackages(packagenames):
+ """
+
+ :param packagename: name of package
+ :returns: **list** of files in package with name *packagename*
+ """
+ ret=[]
+ try:
+ proc=subprocess.Popen(['qlist']+['--nocolor',"--obj"]+packagenames,
+ stdout=subprocess.PIPE,stderr=subprocess.PIPE,
+ bufsize=4096)
+
+ out,err=proc.communicate()
+
+ ret=out.decode("utf8").split("\n")
+ if ret==['']:
+ ret=[]
+ except OSError as e:
+ portage.util.writemsg("Error while launching qfile: %s\n" % e)
+
+ return ret
+
+ def get_all_packages_files():
+ """
+ Memory-hungry operation
+
+ :returns: **set** of all files that belongs to package
+ """
+ ret=[]
+ try:
+ proc=subprocess.Popen(['qlist']+['--all',"--obj"],
+ stdout=subprocess.PIPE,stderr=subprocess.PIPE,
+ bufsize=4096)
+
+ out,err=proc.communicate()
+
+ ret=out.decode("utf8").split("\n")
+ except OSError as e:
+ portage.util.writemsg("Error while launching qfile: %s\n" % e)
+
+ return set(ret)
+
+class FilterProcGenerator:
+ def __init__(self, pkgname, settings):
+ portageutils=PortageUtils(settings=settings)
+
+ deps_all=portageutils.get_deps_for_package_building(pkgname)
+ deps_portage=portageutils.get_dep('portage',["RDEPEND"])
+
+ system_packages=portageutils.get_system_packages_list()
+
+ allfiles=GentoolkitUtils.get_all_packages_files()
+ portage.util.writemsg("All files list recieved, waiting for " \
+ "a list of allowed files\n")
+
+
+ allowedpkgs=system_packages+list(deps_portage)+list(deps_all)
+
+ allowedfiles=GentoolkitUtils.getfilesbypackages(allowedpkgs)
+ #for pkg in allowedpkgs:
+ # allowedfiles+=GentoolkitUtils.getfilesbypackage(pkg)
+
+ #import pdb; pdb.set_trace()
+
+ # manually add all python interpreters to this list
+ allowedfiles+=GentoolkitUtils.getfilesbypackages(['python'])
+ allowedfiles=set(allowedfiles)
+
+ deniedfiles=allfiles-allowedfiles
+
+ def filter_proc(eventname,filename,stage):
+ if filename in deniedfiles:
+ return False
+ return True
+
+ self.filter_proc=filter_proc
+ def get_filter_proc(self):
+ return self.filter_proc
+
+class EventsAnalyser:
+ def __init__(self, pkgname, events, settings):
+ self.pkgname=pkgname
+ self.events=events
+ self.settings=settings
+ self.portageutils=PortageUtils(settings=settings)
+
+ self.deps_all=self.portageutils.get_deps_for_package_building(pkgname)
+ self.deps_direct=self.portageutils.get_dep(pkgname,["DEPEND"])
+ self.deps_portage=self.portageutils.get_dep('portage',["RDEPEND"])
+
+ self.system_packages=self.portageutils.get_system_packages_list()
+ # All analyse work is here
+
+ # get unique filenames
+ filenames=set()
+ for stage in events:
+ succ_events=set(events[stage][0])
+ fail_events=set(events[stage][1])
+ filenames=filenames.union(succ_events)
+ filenames=filenames.union(fail_events)
+ filenames=list(filenames)
+
+ file_to_package=GentoolkitUtils.getpackagesbyfiles(filenames)
+ # This part is completly unreadable.
+ # It converting one complex struct(returned by getfsevents) to another complex
+ # struct which good for generating output.
+ #
+ # Old struct is also used during output
+
+ packagesinfo={}
+
+ for stage in sorted(events):
+ succ_events=events[stage][0]
+ fail_events=events[stage][1]
+
+ for filename in succ_events:
+ if filename in file_to_package:
+ package=file_to_package[filename]
+ else:
+ package="unknown"
+
+ if not package in packagesinfo:
+ packagesinfo[package]={}
+ stageinfo=packagesinfo[package]
+ if not stage in stageinfo:
+ stageinfo[stage]={}
+
+ filesinfo=stageinfo[stage]
+ if not filename in filesinfo:
+ filesinfo[filename]={"found":[],"notfound":[]}
+ filesinfo[filename]["found"]=succ_events[filename]
+
+ for filename in fail_events:
+ if filename in file_to_package:
+ package=file_to_package[filename]
+ else:
+ package="unknown"
+ if not package in packagesinfo:
+ packagesinfo[package]={}
+ stageinfo=packagesinfo[package]
+ if not stage in stageinfo:
+ stageinfo[stage]={}
+
+ filesinfo=stageinfo[stage]
+ if not filename in filesinfo:
+ filesinfo[filename]={"found":[],"notfound":[]}
+ filesinfo[filename]["notfound"]=fail_events[filename]
+ self.packagesinfo=packagesinfo
+
+ def display(self):
+ portage.util.writemsg(
+ portage.output.colorize(
+ "WARN", "\nFile access report for %s:\n" % self.pkgname))
+
+ stagesorder={"clean":1,"setup":2,"unpack":3,"prepare":4,"configure":5,"compile":6,"test":7,
+ "install":8,"preinst":9,"postinst":10,"prerm":11,"postrm":12,"unknown":13}
+ packagesinfo=self.packagesinfo
+ # print information grouped by package
+ for package in sorted(packagesinfo):
+ # not showing special directory package
+ if package=="directory":
+ continue
+
+ if package=="unknown":
+ continue
+
+
+ is_pkg_in_dep=package in self.deps_all
+ is_pkg_in_portage_dep=package in self.deps_portage
+ is_pkg_in_system=package in self.system_packages
+ is_pkg_python="dev-lang/python" in package
+
+ stages=[]
+ for stage in sorted(packagesinfo[package].keys(), key=stagesorder.get):
+ if stage!="unknown":
+ stages.append(stage)
+
+ if len(stages)==0:
+ continue
+
+ filenames={}
+ for stage in stages:
+ for filename in packagesinfo[package][stage]:
+ if len(packagesinfo[package][stage][filename]["found"])!=0:
+ was_readed,was_writed=packagesinfo[package][stage][filename]["found"]
+ if not filename in filenames:
+ filenames[filename]=['ok',was_readed,was_writed]
+ else:
+ status, old_was_readed, old_was_writed=filenames[filename]
+ filenames[filename]=[
+ 'ok',old_was_readed | was_readed, old_was_writed | was_writed
+ ]
+ if len(packagesinfo[package][stage][filename]["notfound"])!=0:
+ was_notfound,was_blocked=packagesinfo[package][stage][filename]["notfound"]
+ if not filename in filenames:
+ filenames[filename]=['err',was_notfound,was_blocked]
+ else:
+ status, old_was_notfound, old_was_blocked=filenames[filename]
+ filenames[filename]=[
+ 'err',old_was_notfound | was_notfound, old_was_blocked | was_blocked
+ ]
+
+
+ if is_pkg_in_dep:
+ portage.util.writemsg("[OK]")
+ elif is_pkg_in_system:
+ portage.util.writemsg("[SYSTEM]")
+ elif is_pkg_in_portage_dep:
+ portage.util.writemsg("[PORTAGE DEP]")
+ elif is_pkg_python:
+ portage.util.writemsg("[INTERPRETER]")
+ elif not self.is_package_useful(package,stages,filenames.keys()):
+ portage.util.writemsg("[LIKELY OK]")
+ else:
+ portage.util.writemsg(portage.output.colorize("BAD", "[NOT IN DEPS]"))
+ # show information about accessed files
+
+ portage.util.writemsg(" %-40s: %s\n" % (package,stages))
+
+ # this is here for readability
+ action={
+ ('ok',False,False):"accessed",
+ ('ok',True,False):"readed",
+ ('ok',False,True):"writed",
+ ('ok',True,True):"readed and writed",
+ ('err',False,False):"other error",
+ ('err',True,False):"not found",
+ ('err',False,True):"blocked",
+ ('err',True,True):"not found and blocked"
+ }
+
+ filescounter=0
+
+ for filename in filenames:
+ event_info=tuple(filenames[filename])
+ portage.util.writemsg(" %-56s %-21s\n" % (filename,action[event_info]))
+ filescounter+=1
+ if filescounter>10:
+ portage.util.writemsg(" ... and %d more ...\n" % (len(filenames)-10))
+ break
+ # ... and one more check. Making sure that direct build time
+ # dependencies were accessed
+ #import pdb; pdb.set_trace()
+ not_accessed_deps=set(self.deps_direct)-set(self.packagesinfo.keys())
+ if not_accessed_deps:
+ portage.util.writemsg(portage.output.colorize("WARN", "!!! "))
+ portage.util.writemsg("Warning! Some build time dependencies " + \
+ "of packages were not accessed: " + \
+ " ".join(not_accessed_deps) + "\n")
+
+ def is_package_useful(self,pkg,stages,files):
+ """ some basic heuristics here to cut part of packages """
+
+ excluded_paths=set(
+ ['/etc/sandbox.d/']
+ )
+
+ excluded_packages=set(
+ # autodep shows these two packages every time
+ ['net-zope/zope-fixers', 'net-zope/zope-interface']
+ )
+
+
+ def is_pkg_excluded(p):
+ for pkg in excluded_packages:
+ if p.startswith(pkg): # if package is excluded
+ return True
+ return False
+
+
+ def is_file_excluded(f):
+ for path in excluded_paths:
+ if f.startswith(path): # if path is excluded
+ return True
+ return False
+
+
+ if is_pkg_excluded(pkg):
+ return False
+
+ for f in files:
+ if is_file_excluded(f):
+ continue
+
+ # test 1: package is not useful if all files are *.desktop or *.xml or *.m4
+ if not (f.endswith(".desktop") or f.endswith(".xml") or f.endswith(".m4") or f.endswith(".pc")):
+ break
+ else:
+ return False # we get here if cycle ends not with break
+
+ return True
+
+ \ No newline at end of file
diff --git a/portage_with_autodep/pym/_emerge/EventsLogger.py b/portage_with_autodep/pym/_emerge/EventsLogger.py
new file mode 100644
index 0000000..68b3c67
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EventsLogger.py
@@ -0,0 +1,180 @@
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import sys
+import stat
+import socket
+import select
+import tempfile
+
+import threading
+
+from portage import os
+
+class EventsLogger(threading.Thread):
+ def default_filter(eventname, filename, stage):
+ return True
+
+ def __init__(self, socket_dir="/tmp/", filter_proc=default_filter):
+ threading.Thread.__init__(self) # init the Thread
+
+ self.alive=False
+
+ self.main_thread=threading.currentThread()
+
+ self.socket_dir=socket_dir
+ self.filter_proc=filter_proc
+
+ self.socket_name=None
+ self.socket_logger=None
+
+ self.events={}
+
+ try:
+ socket_dir_name = tempfile.mkdtemp(dir=self.socket_dir,
+ prefix="log_socket_")
+
+ socket_name = os.path.join(socket_dir_name, 'socket')
+
+ except OSError as e:
+ return
+
+ self.socket_name=socket_name
+
+ #print(self.socket_name)
+
+ try:
+ socket_logger=socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
+ socket_logger.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+ socket_logger.bind(self.socket_name)
+ socket_logger.listen(64)
+
+ except socket.error as e:
+ return
+
+ self.socket_logger=socket_logger
+
+ try:
+ # Allow connecting to socket for anyone
+ os.chmod(socket_dir_name,
+ stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|
+ stat.S_IROTH|stat.S_IWOTH|stat.S_IXOTH)
+ os.chmod(socket_name,
+ stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|
+ stat.S_IROTH|stat.S_IWOTH|stat.S_IXOTH)
+ except OSError as e:
+ return
+
+ def run(self):
+ """ Starts the log server """
+
+ self.alive=True
+ self.listen_thread=threading.currentThread()
+ clients={}
+
+ epoll=select.epoll()
+ epoll.register(self.socket_logger.fileno(), select.EPOLLIN)
+
+ while self.alive:
+ try:
+ sock_events = epoll.poll(3)
+
+ for fileno, sock_event in sock_events:
+ if fileno == self.socket_logger.fileno():
+ ret = self.socket_logger.accept()
+ if ret is None:
+ pass
+ else:
+ (client,addr)=ret
+ epoll.register(client.fileno(), select.EPOLLIN)
+ clients[client.fileno()]=client
+ elif sock_event & select.EPOLLIN:
+ s=clients[fileno]
+ record=s.recv(8192)
+
+ if not record: # if connection was closed
+ epoll.unregister(fileno)
+ clients[fileno].close()
+ del clients[fileno]
+ continue
+
+ #import pdb; pdb.set_trace()
+ try:
+ message=record.decode("utf8").split("\0")
+ except UnicodeDecodeError:
+ print("Bad message %s" % record)
+ continue
+
+ # continue
+
+ #print(message)
+
+ try:
+ if message[4]=="ASKING":
+ if self.filter_proc(message[1],message[2],message[3]):
+ s.sendall(b"ALLOW\0")
+ else:
+ # TODO: log through portage infrastructure
+ #print("Blocking an access to %s" % message[2])
+ s.sendall(b"DENY\0")
+ else:
+ eventname,filename,stage,result=message[1:5]
+
+ if not stage in self.events:
+ self.events[stage]=[{},{}]
+
+ hashofsucesses=self.events[stage][0]
+ hashoffailures=self.events[stage][1]
+
+ if result=="DENIED":
+ print("Blocking an access to %s" % filename)
+
+ if result=="OK":
+ if not filename in hashofsucesses:
+ hashofsucesses[filename]=[False,False]
+
+ readed_or_writed=hashofsucesses[filename]
+
+ if eventname=="read":
+ readed_or_writed[0]=True
+ elif eventname=="write":
+ readed_or_writed[1]=True
+
+ elif result[0:3]=="ERR" or result=="DENIED":
+ if not filename in hashoffailures:
+ hashoffailures[filename]=[False,False]
+ notfound_or_blocked=hashoffailures[filename]
+
+ if result=="ERR/2":
+ notfound_or_blocked[0]=True
+ elif result=="DENIED":
+ notfound_or_blocked[1]=True
+
+ else:
+ print("Error in logger module<->analyser protocol")
+
+ except IndexError:
+ print("IndexError while parsing %s" % record)
+ except IOError as e:
+ if e.errno!=4: # handling "Interrupted system call" errors
+ raise
+
+ # if main thread doesnt exists then exit
+ if not self.main_thread.is_alive():
+ break
+ epoll.unregister(self.socket_logger.fileno())
+ epoll.close()
+ self.socket_logger.close()
+
+ def stop(self):
+ """ Stops the log server. Returns all events """
+
+ self.alive=False
+
+ # Block the main thread until listener exists
+ self.listen_thread.join()
+
+ # We assume portage clears tmp folder, so no deleting a socket file
+ # We assume that no new socket data will arrive after this moment
+ return self.events
diff --git a/portage_with_autodep/pym/_emerge/FakeVartree.py b/portage_with_autodep/pym/_emerge/FakeVartree.py
new file mode 100644
index 0000000..a11966f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/FakeVartree.py
@@ -0,0 +1,265 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+import portage
+from portage import os
+from _emerge.Package import Package
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from portage.const import VDB_PATH
+from portage.dbapi.vartree import vartree
+from portage.repository.config import _gen_valid_repo
+from portage.update import grab_updates, parse_updates, update_dbentries
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class FakeVardbapi(PackageVirtualDbapi):
+ """
+ Implements the vardbapi.getpath() method which is used in error handling
+ code for the Package class and vartree.get_provide().
+ """
+ def getpath(self, cpv, filename=None):
+ path = os.path.join(self.settings['EROOT'], VDB_PATH, cpv)
+ if filename is not None:
+ path =os.path.join(path, filename)
+ return path
+
+class FakeVartree(vartree):
+ """This is implements an in-memory copy of a vartree instance that provides
+ all the interfaces required for use by the depgraph. The vardb is locked
+ during the constructor call just long enough to read a copy of the
+ installed package information. This allows the depgraph to do it's
+ dependency calculations without holding a lock on the vardb. It also
+ allows things like vardb global updates to be done in memory so that the
+ user doesn't necessarily need write access to the vardb in cases where
+ global updates are necessary (updates are performed when necessary if there
+ is not a matching ebuild in the tree). Instances of this class are not
+ populated until the sync() method is called."""
+ def __init__(self, root_config, pkg_cache=None, pkg_root_config=None):
+ self._root_config = root_config
+ if pkg_root_config is None:
+ pkg_root_config = self._root_config
+ self._pkg_root_config = pkg_root_config
+ if pkg_cache is None:
+ pkg_cache = {}
+ real_vartree = root_config.trees["vartree"]
+ self._real_vardb = real_vartree.dbapi
+ portdb = root_config.trees["porttree"].dbapi
+ self.root = real_vartree.root
+ self.settings = real_vartree.settings
+ mykeys = list(real_vartree.dbapi._aux_cache_keys)
+ if "_mtime_" not in mykeys:
+ mykeys.append("_mtime_")
+ self._db_keys = mykeys
+ self._pkg_cache = pkg_cache
+ self.dbapi = FakeVardbapi(real_vartree.settings)
+
+ # Initialize variables needed for lazy cache pulls of the live ebuild
+ # metadata. This ensures that the vardb lock is released ASAP, without
+ # being delayed in case cache generation is triggered.
+ self._aux_get = self.dbapi.aux_get
+ self.dbapi.aux_get = self._aux_get_wrapper
+ self._match = self.dbapi.match
+ self.dbapi.match = self._match_wrapper
+ self._aux_get_history = set()
+ self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
+ self._portdb = portdb
+ self._global_updates = None
+
+ def _match_wrapper(self, cpv, use_cache=1):
+ """
+ Make sure the metadata in Package instances gets updated for any
+ cpv that is returned from a match() call, since the metadata can
+ be accessed directly from the Package instance instead of via
+ aux_get().
+ """
+ matches = self._match(cpv, use_cache=use_cache)
+ for cpv in matches:
+ if cpv in self._aux_get_history:
+ continue
+ self._aux_get_wrapper(cpv, [])
+ return matches
+
+ def _aux_get_wrapper(self, pkg, wants, myrepo=None):
+ if pkg in self._aux_get_history:
+ return self._aux_get(pkg, wants)
+ self._aux_get_history.add(pkg)
+ # We need to check the EAPI, and this also raises
+ # a KeyError to the caller if appropriate.
+ installed_eapi, repo = self._aux_get(pkg, ["EAPI", "repository"])
+ try:
+ # Use the live ebuild metadata if possible.
+ repo = _gen_valid_repo(repo)
+ live_metadata = dict(zip(self._portdb_keys,
+ self._portdb.aux_get(pkg, self._portdb_keys, myrepo=repo)))
+ # Use the metadata from the installed instance if the EAPI
+ # of either instance is unsupported, since if the installed
+ # instance has an unsupported or corrupt EAPI then we don't
+ # want to attempt to do complex operations such as execute
+ # pkg_config, pkg_prerm or pkg_postrm phases. If both EAPIs
+ # are supported then go ahead and use the live_metadata, in
+ # order to respect dep updates without revision bump or EAPI
+ # bump, as in bug #368725.
+ if not (portage.eapi_is_supported(live_metadata["EAPI"]) and \
+ portage.eapi_is_supported(installed_eapi)):
+ raise KeyError(pkg)
+ self.dbapi.aux_update(pkg, live_metadata)
+ except (KeyError, portage.exception.PortageException):
+ if self._global_updates is None:
+ self._global_updates = \
+ grab_global_updates(self._portdb)
+ perform_global_updates(
+ pkg, self.dbapi, self._global_updates)
+ return self._aux_get(pkg, wants)
+
+ def cpv_discard(self, pkg):
+ """
+ Discard a package from the fake vardb if it exists.
+ """
+ old_pkg = self.dbapi.get(pkg)
+ if old_pkg is not None:
+ self.dbapi.cpv_remove(old_pkg)
+ self._pkg_cache.pop(old_pkg, None)
+ self._aux_get_history.discard(old_pkg.cpv)
+
+ def sync(self, acquire_lock=1):
+ """
+ Call this method to synchronize state with the real vardb
+ after one or more packages may have been installed or
+ uninstalled.
+ """
+ locked = False
+ try:
+ if acquire_lock and os.access(self._real_vardb._dbroot, os.W_OK):
+ self._real_vardb.lock()
+ locked = True
+ self._sync()
+ finally:
+ if locked:
+ self._real_vardb.unlock()
+
+ # Populate the old-style virtuals using the cached values.
+ # Skip the aux_get wrapper here, to avoid unwanted
+ # cache generation.
+ try:
+ self.dbapi.aux_get = self._aux_get
+ self.settings._populate_treeVirtuals_if_needed(self)
+ finally:
+ self.dbapi.aux_get = self._aux_get_wrapper
+
+ def _sync(self):
+
+ real_vardb = self._root_config.trees["vartree"].dbapi
+ current_cpv_set = frozenset(real_vardb.cpv_all())
+ pkg_vardb = self.dbapi
+ pkg_cache = self._pkg_cache
+ aux_get_history = self._aux_get_history
+
+ # Remove any packages that have been uninstalled.
+ for pkg in list(pkg_vardb):
+ if pkg.cpv not in current_cpv_set:
+ self.cpv_discard(pkg)
+
+ # Validate counters and timestamps.
+ slot_counters = {}
+ root_config = self._pkg_root_config
+ validation_keys = ["COUNTER", "_mtime_"]
+ for cpv in current_cpv_set:
+
+ pkg_hash_key = Package._gen_hash_key(cpv=cpv,
+ installed=True, root_config=root_config,
+ type_name="installed")
+ pkg = pkg_vardb.get(pkg_hash_key)
+ if pkg is not None:
+ counter, mtime = real_vardb.aux_get(cpv, validation_keys)
+ try:
+ counter = long(counter)
+ except ValueError:
+ counter = 0
+
+ if counter != pkg.counter or \
+ mtime != pkg.mtime:
+ self.cpv_discard(pkg)
+ pkg = None
+
+ if pkg is None:
+ pkg = self._pkg(cpv)
+
+ other_counter = slot_counters.get(pkg.slot_atom)
+ if other_counter is not None:
+ if other_counter > pkg.counter:
+ continue
+
+ slot_counters[pkg.slot_atom] = pkg.counter
+ pkg_vardb.cpv_inject(pkg)
+
+ real_vardb.flush_cache()
+
+ def _pkg(self, cpv):
+ """
+ The RootConfig instance that will become the Package.root_config
+ attribute can be overridden by the FakeVartree pkg_root_config
+ constructory argument, since we want to be consistent with the
+ depgraph._pkg() method which uses a specially optimized
+ RootConfig that has a FakeVartree instead of a real vartree.
+ """
+ pkg = Package(cpv=cpv, built=True, installed=True,
+ metadata=zip(self._db_keys,
+ self._real_vardb.aux_get(cpv, self._db_keys)),
+ root_config=self._pkg_root_config,
+ type_name="installed")
+
+ try:
+ mycounter = long(pkg.metadata["COUNTER"])
+ except ValueError:
+ mycounter = 0
+ pkg.metadata["COUNTER"] = str(mycounter)
+
+ self._pkg_cache[pkg] = pkg
+ return pkg
+
+def grab_global_updates(portdb):
+ retupdates = {}
+
+ for repo_name in portdb.getRepositories():
+ repo = portdb.getRepositoryPath(repo_name)
+ updpath = os.path.join(repo, "profiles", "updates")
+ if not os.path.isdir(updpath):
+ continue
+
+ try:
+ rawupdates = grab_updates(updpath)
+ except portage.exception.DirectoryNotFound:
+ rawupdates = []
+ upd_commands = []
+ for mykey, mystat, mycontent in rawupdates:
+ commands, errors = parse_updates(mycontent)
+ upd_commands.extend(commands)
+ retupdates[repo_name] = upd_commands
+
+ master_repo = portdb.getRepositoryName(portdb.porttree_root)
+ if master_repo in retupdates:
+ retupdates['DEFAULT'] = retupdates[master_repo]
+
+ return retupdates
+
+def perform_global_updates(mycpv, mydb, myupdates):
+ aux_keys = ["DEPEND", "RDEPEND", "PDEPEND", 'repository']
+ aux_dict = dict(zip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
+ repository = aux_dict.pop('repository')
+ try:
+ mycommands = myupdates[repository]
+ except KeyError:
+ try:
+ mycommands = myupdates['DEFAULT']
+ except KeyError:
+ return
+
+ if not mycommands:
+ return
+
+ updates = update_dbentries(mycommands, aux_dict)
+ if updates:
+ mydb.aux_update(mycpv, updates)
diff --git a/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
new file mode 100644
index 0000000..a716dac
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
@@ -0,0 +1,81 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+from portage.cache.mappings import slot_dict_class
+
+class FifoIpcDaemon(AbstractPollTask):
+
+ __slots__ = ("input_fifo", "output_fifo",) + \
+ ("_files", "_reg_id",)
+
+ _file_names = ("pipe_in",)
+ _files_dict = slot_dict_class(_file_names, prefix="")
+
+ def _start(self):
+ self._files = self._files_dict()
+ input_fd = os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
+
+ # File streams are in unbuffered mode since we do atomic
+ # read and write of whole pickles.
+ self._files.pipe_in = os.fdopen(input_fd, 'rb', 0)
+
+ self._reg_id = self.scheduler.register(
+ self._files.pipe_in.fileno(),
+ self._registered_events, self._input_handler)
+
+ self._registered = True
+
+ def _reopen_input(self):
+ """
+ Re-open the input stream, in order to suppress
+ POLLHUP events (bug #339976).
+ """
+ self._files.pipe_in.close()
+ input_fd = os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
+ self._files.pipe_in = os.fdopen(input_fd, 'rb', 0)
+ self.scheduler.unregister(self._reg_id)
+ self._reg_id = self.scheduler.register(
+ self._files.pipe_in.fileno(),
+ self._registered_events, self._input_handler)
+
+ def isAlive(self):
+ return self._registered
+
+ def _cancel(self):
+ if self.returncode is None:
+ self.returncode = 1
+ self._unregister()
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+
+ if self._registered:
+ self.scheduler.schedule(self._reg_id)
+ self._unregister()
+
+ if self.returncode is None:
+ self.returncode = os.EX_OK
+
+ return self.returncode
+
+ def _input_handler(self, fd, event):
+ raise NotImplementedError(self)
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self._reg_id is not None:
+ self.scheduler.unregister(self._reg_id)
+ self._reg_id = None
+
+ if self._files is not None:
+ for f in self._files.values():
+ f.close()
+ self._files = None
diff --git a/portage_with_autodep/pym/_emerge/JobStatusDisplay.py b/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
new file mode 100644
index 0000000..1949232
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
@@ -0,0 +1,292 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import formatter
+import io
+import sys
+import time
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.output import xtermTitle
+
+from _emerge.getloadavg import getloadavg
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class JobStatusDisplay(object):
+
+ _bound_properties = ("curval", "failed", "running")
+
+ # Don't update the display unless at least this much
+ # time has passed, in units of seconds.
+ _min_display_latency = 2
+
+ _default_term_codes = {
+ 'cr' : '\r',
+ 'el' : '\x1b[K',
+ 'nel' : '\n',
+ }
+
+ _termcap_name_map = {
+ 'carriage_return' : 'cr',
+ 'clr_eol' : 'el',
+ 'newline' : 'nel',
+ }
+
+ def __init__(self, quiet=False, xterm_titles=True):
+ object.__setattr__(self, "quiet", quiet)
+ object.__setattr__(self, "xterm_titles", xterm_titles)
+ object.__setattr__(self, "maxval", 0)
+ object.__setattr__(self, "merges", 0)
+ object.__setattr__(self, "_changed", False)
+ object.__setattr__(self, "_displayed", False)
+ object.__setattr__(self, "_last_display_time", 0)
+
+ self.reset()
+
+ isatty = os.environ.get('TERM') != 'dumb' and \
+ hasattr(self.out, 'isatty') and \
+ self.out.isatty()
+ object.__setattr__(self, "_isatty", isatty)
+ if not isatty or not self._init_term():
+ term_codes = {}
+ for k, capname in self._termcap_name_map.items():
+ term_codes[k] = self._default_term_codes[capname]
+ object.__setattr__(self, "_term_codes", term_codes)
+ encoding = sys.getdefaultencoding()
+ for k, v in self._term_codes.items():
+ if not isinstance(v, basestring):
+ self._term_codes[k] = v.decode(encoding, 'replace')
+
+ if self._isatty:
+ width = portage.output.get_term_size()[1]
+ else:
+ width = 80
+ self._set_width(width)
+
+ def _set_width(self, width):
+ if width == getattr(self, 'width', None):
+ return
+ if width <= 0 or width > 80:
+ width = 80
+ object.__setattr__(self, "width", width)
+ object.__setattr__(self, "_jobs_column_width", width - 32)
+
+ @property
+ def out(self):
+ """Use a lazy reference to sys.stdout, in case the API consumer has
+ temporarily overridden stdout."""
+ return sys.stdout
+
+ def _write(self, s):
+ # avoid potential UnicodeEncodeError
+ s = _unicode_encode(s,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ out = self.out
+ if sys.hexversion >= 0x3000000:
+ out = out.buffer
+ out.write(s)
+ out.flush()
+
+ def _init_term(self):
+ """
+ Initialize term control codes.
+ @rtype: bool
+ @returns: True if term codes were successfully initialized,
+ False otherwise.
+ """
+
+ term_type = os.environ.get("TERM", "").strip()
+ if not term_type:
+ return False
+ tigetstr = None
+
+ try:
+ import curses
+ try:
+ curses.setupterm(term_type, self.out.fileno())
+ tigetstr = curses.tigetstr
+ except curses.error:
+ pass
+ except ImportError:
+ pass
+
+ if tigetstr is None:
+ return False
+
+ term_codes = {}
+ for k, capname in self._termcap_name_map.items():
+ code = tigetstr(capname)
+ if code is None:
+ code = self._default_term_codes[capname]
+ term_codes[k] = code
+ object.__setattr__(self, "_term_codes", term_codes)
+ return True
+
+ def _format_msg(self, msg):
+ return ">>> %s" % msg
+
+ def _erase(self):
+ self._write(
+ self._term_codes['carriage_return'] + \
+ self._term_codes['clr_eol'])
+ self._displayed = False
+
+ def _display(self, line):
+ self._write(line)
+ self._displayed = True
+
+ def _update(self, msg):
+
+ if not self._isatty:
+ self._write(self._format_msg(msg) + self._term_codes['newline'])
+ self._displayed = True
+ return
+
+ if self._displayed:
+ self._erase()
+
+ self._display(self._format_msg(msg))
+
+ def displayMessage(self, msg):
+
+ was_displayed = self._displayed
+
+ if self._isatty and self._displayed:
+ self._erase()
+
+ self._write(self._format_msg(msg) + self._term_codes['newline'])
+ self._displayed = False
+
+ if was_displayed:
+ self._changed = True
+ self.display()
+
+ def reset(self):
+ self.maxval = 0
+ self.merges = 0
+ for name in self._bound_properties:
+ object.__setattr__(self, name, 0)
+
+ if self._displayed:
+ self._write(self._term_codes['newline'])
+ self._displayed = False
+
+ def __setattr__(self, name, value):
+ old_value = getattr(self, name)
+ if value == old_value:
+ return
+ object.__setattr__(self, name, value)
+ if name in self._bound_properties:
+ self._property_change(name, old_value, value)
+
+ def _property_change(self, name, old_value, new_value):
+ self._changed = True
+ self.display()
+
+ def _load_avg_str(self):
+ try:
+ avg = getloadavg()
+ except OSError:
+ return 'unknown'
+
+ max_avg = max(avg)
+
+ if max_avg < 10:
+ digits = 2
+ elif max_avg < 100:
+ digits = 1
+ else:
+ digits = 0
+
+ return ", ".join(("%%.%df" % digits ) % x for x in avg)
+
+ def display(self):
+ """
+ Display status on stdout, but only if something has
+ changed since the last call.
+ """
+
+ if self.quiet:
+ return
+
+ current_time = time.time()
+ time_delta = current_time - self._last_display_time
+ if self._displayed and \
+ not self._changed:
+ if not self._isatty:
+ return
+ if time_delta < self._min_display_latency:
+ return
+
+ self._last_display_time = current_time
+ self._changed = False
+ self._display_status()
+
+ def _display_status(self):
+ # Don't use len(self._completed_tasks) here since that also
+ # can include uninstall tasks.
+ curval_str = str(self.curval)
+ maxval_str = str(self.maxval)
+ running_str = str(self.running)
+ failed_str = str(self.failed)
+ load_avg_str = self._load_avg_str()
+
+ color_output = io.StringIO()
+ plain_output = io.StringIO()
+ style_file = portage.output.ConsoleStyleFile(color_output)
+ style_file.write_listener = plain_output
+ style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
+ style_writer.style_listener = style_file.new_styles
+ f = formatter.AbstractFormatter(style_writer)
+
+ number_style = "INFORM"
+ f.add_literal_data(_unicode_decode("Jobs: "))
+ f.push_style(number_style)
+ f.add_literal_data(_unicode_decode(curval_str))
+ f.pop_style()
+ f.add_literal_data(_unicode_decode(" of "))
+ f.push_style(number_style)
+ f.add_literal_data(_unicode_decode(maxval_str))
+ f.pop_style()
+ f.add_literal_data(_unicode_decode(" complete"))
+
+ if self.running:
+ f.add_literal_data(_unicode_decode(", "))
+ f.push_style(number_style)
+ f.add_literal_data(_unicode_decode(running_str))
+ f.pop_style()
+ f.add_literal_data(_unicode_decode(" running"))
+
+ if self.failed:
+ f.add_literal_data(_unicode_decode(", "))
+ f.push_style(number_style)
+ f.add_literal_data(_unicode_decode(failed_str))
+ f.pop_style()
+ f.add_literal_data(_unicode_decode(" failed"))
+
+ padding = self._jobs_column_width - len(plain_output.getvalue())
+ if padding > 0:
+ f.add_literal_data(padding * _unicode_decode(" "))
+
+ f.add_literal_data(_unicode_decode("Load avg: "))
+ f.add_literal_data(_unicode_decode(load_avg_str))
+
+ # Truncate to fit width, to avoid making the terminal scroll if the
+ # line overflows (happens when the load average is large).
+ plain_output = plain_output.getvalue()
+ if self._isatty and len(plain_output) > self.width:
+ # Use plain_output here since it's easier to truncate
+ # properly than the color output which contains console
+ # color codes.
+ self._update(plain_output[:self.width])
+ else:
+ self._update(color_output.getvalue())
+
+ if self.xterm_titles:
+ xtermTitle(" ".join(plain_output.split()))
diff --git a/portage_with_autodep/pym/_emerge/MergeListItem.py b/portage_with_autodep/pym/_emerge/MergeListItem.py
new file mode 100644
index 0000000..2176bf6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MergeListItem.py
@@ -0,0 +1,135 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.output import colorize
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.Binpkg import Binpkg
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildBuild import EbuildBuild
+from _emerge.PackageUninstall import PackageUninstall
+
+class MergeListItem(CompositeTask):
+
+ """
+ TODO: For parallel scheduling, everything here needs asynchronous
+ execution support (start, poll, and wait methods).
+ """
+
+ __slots__ = ("args_set",
+ "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
+ "find_blockers", "logger", "mtimedb", "pkg",
+ "pkg_count", "pkg_to_replace", "prefetcher",
+ "settings", "statusMessage", "world_atom") + \
+ ("_install_task",)
+
+ def _start(self):
+
+ pkg = self.pkg
+ build_opts = self.build_opts
+
+ if pkg.installed:
+ # uninstall, executed by self.merge()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ args_set = self.args_set
+ find_blockers = self.find_blockers
+ logger = self.logger
+ mtimedb = self.mtimedb
+ pkg_count = self.pkg_count
+ scheduler = self.scheduler
+ settings = self.settings
+ world_atom = self.world_atom
+ ldpath_mtimes = mtimedb["ldpath"]
+
+ action_desc = "Emerging"
+ preposition = "for"
+ if pkg.type_name == "binary":
+ action_desc += " binary"
+
+ if build_opts.fetchonly:
+ action_desc = "Fetching"
+
+ msg = "%s (%s of %s) %s" % \
+ (action_desc,
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
+ colorize("GOOD", pkg.cpv))
+
+ portdb = pkg.root_config.trees["porttree"].dbapi
+ portdir_repo_name = portdb.getRepositoryName(portdb.porttree_root)
+ if portdir_repo_name:
+ pkg_repo_name = pkg.repo
+ if pkg_repo_name != portdir_repo_name:
+ if pkg_repo_name == pkg.UNKNOWN_REPO:
+ pkg_repo_name = "unknown repo"
+ msg += " from %s" % pkg_repo_name
+
+ if pkg.root != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ if not build_opts.pretend:
+ self.statusMessage(msg)
+ logger.log(" >>> emerge (%s of %s) %s to %s" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+ if pkg.type_name == "ebuild":
+
+ build = EbuildBuild(args_set=args_set,
+ background=self.background,
+ config_pool=self.config_pool,
+ find_blockers=find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger,
+ opts=build_opts, pkg=pkg, pkg_count=pkg_count,
+ prefetcher=self.prefetcher, scheduler=scheduler,
+ settings=settings, world_atom=world_atom)
+
+ self._install_task = build
+ self._start_task(build, self._default_final_exit)
+ return
+
+ elif pkg.type_name == "binary":
+
+ binpkg = Binpkg(background=self.background,
+ find_blockers=find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger,
+ opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
+ prefetcher=self.prefetcher, settings=settings,
+ scheduler=scheduler, world_atom=world_atom)
+
+ self._install_task = binpkg
+ self._start_task(binpkg, self._default_final_exit)
+ return
+
+ def create_install_task(self):
+
+ pkg = self.pkg
+ build_opts = self.build_opts
+ mtimedb = self.mtimedb
+ scheduler = self.scheduler
+ settings = self.settings
+ world_atom = self.world_atom
+ ldpath_mtimes = mtimedb["ldpath"]
+
+ if pkg.installed:
+ if not (build_opts.buildpkgonly or \
+ build_opts.fetchonly or build_opts.pretend):
+
+ task = PackageUninstall(background=self.background,
+ ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
+ pkg=pkg, scheduler=scheduler, settings=settings,
+ world_atom=world_atom)
+
+ else:
+ task = AsynchronousTask()
+
+ elif build_opts.fetchonly or \
+ build_opts.buildpkgonly:
+ task = AsynchronousTask()
+ else:
+ task = self._install_task.create_install_task()
+
+ return task
diff --git a/portage_with_autodep/pym/_emerge/MetadataRegen.py b/portage_with_autodep/pym/_emerge/MetadataRegen.py
new file mode 100644
index 0000000..8103175
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MetadataRegen.py
@@ -0,0 +1,184 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from _emerge.PollScheduler import PollScheduler
+
+class MetadataRegen(PollScheduler):
+
+ def __init__(self, portdb, cp_iter=None, consumer=None,
+ max_jobs=None, max_load=None):
+ PollScheduler.__init__(self)
+ self._portdb = portdb
+ self._global_cleanse = False
+ if cp_iter is None:
+ cp_iter = self._iter_every_cp()
+ # We can globally cleanse stale cache only if we
+ # iterate over every single cp.
+ self._global_cleanse = True
+ self._cp_iter = cp_iter
+ self._consumer = consumer
+
+ if max_jobs is None:
+ max_jobs = 1
+
+ self._max_jobs = max_jobs
+ self._max_load = max_load
+
+ self._valid_pkgs = set()
+ self._cp_set = set()
+ self._process_iter = self._iter_metadata_processes()
+ self.returncode = os.EX_OK
+ self._error_count = 0
+ self._running_tasks = set()
+
+ def _terminate_tasks(self):
+ while self._running_tasks:
+ self._running_tasks.pop().cancel()
+
+ def _iter_every_cp(self):
+ portage.writemsg_stdout("Listing available packages...\n")
+ every_cp = self._portdb.cp_all()
+ portage.writemsg_stdout("Regenerating cache entries...\n")
+ every_cp.sort(reverse=True)
+ try:
+ while not self._terminated_tasks:
+ yield every_cp.pop()
+ except IndexError:
+ pass
+
+ def _iter_metadata_processes(self):
+ portdb = self._portdb
+ valid_pkgs = self._valid_pkgs
+ cp_set = self._cp_set
+ consumer = self._consumer
+
+ for cp in self._cp_iter:
+ if self._terminated_tasks:
+ break
+ cp_set.add(cp)
+ portage.writemsg_stdout("Processing %s\n" % cp)
+ cpv_list = portdb.cp_list(cp)
+ for cpv in cpv_list:
+ if self._terminated_tasks:
+ break
+ valid_pkgs.add(cpv)
+ ebuild_path, repo_path = portdb.findname2(cpv)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % cpv)
+ metadata, st, emtime = portdb._pull_valid_cache(
+ cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ if consumer is not None:
+ consumer(cpv, ebuild_path,
+ repo_path, metadata)
+ continue
+
+ yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
+ ebuild_mtime=emtime,
+ metadata_callback=portdb._metadata_callback,
+ portdb=portdb, repo_path=repo_path,
+ settings=portdb.doebuild_settings)
+
+ def run(self):
+
+ portdb = self._portdb
+ from portage.cache.cache_errors import CacheError
+ dead_nodes = {}
+
+ while self._schedule():
+ self._poll_loop()
+
+ while self._jobs:
+ self._poll_loop()
+
+ if self._terminated_tasks:
+ self.returncode = 1
+ return
+
+ if self._global_cleanse:
+ for mytree in portdb.porttrees:
+ try:
+ dead_nodes[mytree] = set(portdb.auxdb[mytree])
+ except CacheError as e:
+ portage.writemsg("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (mytree, e),
+ noiselevel=-1)
+ del e
+ dead_nodes = None
+ break
+ else:
+ cp_set = self._cp_set
+ cpv_getkey = portage.cpv_getkey
+ for mytree in portdb.porttrees:
+ try:
+ dead_nodes[mytree] = set(cpv for cpv in \
+ portdb.auxdb[mytree] \
+ if cpv_getkey(cpv) in cp_set)
+ except CacheError as e:
+ portage.writemsg("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (mytree, e),
+ noiselevel=-1)
+ del e
+ dead_nodes = None
+ break
+
+ if dead_nodes:
+ for y in self._valid_pkgs:
+ for mytree in portdb.porttrees:
+ if portdb.findname2(y, mytree=mytree)[0]:
+ dead_nodes[mytree].discard(y)
+
+ for mytree, nodes in dead_nodes.items():
+ auxdb = portdb.auxdb[mytree]
+ for y in nodes:
+ try:
+ del auxdb[y]
+ except (KeyError, CacheError):
+ pass
+
+ def _schedule_tasks(self):
+ """
+ @rtype: bool
+ @returns: True if there may be remaining tasks to schedule,
+ False otherwise.
+ """
+ if self._terminated_tasks:
+ return False
+
+ while self._can_add_job():
+ try:
+ metadata_process = next(self._process_iter)
+ except StopIteration:
+ return False
+
+ self._jobs += 1
+ self._running_tasks.add(metadata_process)
+ metadata_process.scheduler = self.sched_iface
+ metadata_process.addExitListener(self._metadata_exit)
+ metadata_process.start()
+ return True
+
+ def _metadata_exit(self, metadata_process):
+ self._jobs -= 1
+ self._running_tasks.discard(metadata_process)
+ if metadata_process.returncode != os.EX_OK:
+ self.returncode = 1
+ self._error_count += 1
+ self._valid_pkgs.discard(metadata_process.cpv)
+ if not self._terminated_tasks:
+ portage.writemsg("Error processing %s, continuing...\n" % \
+ (metadata_process.cpv,), noiselevel=-1)
+
+ if self._consumer is not None:
+ # On failure, still notify the consumer (in this case the metadata
+ # argument is None).
+ self._consumer(metadata_process.cpv,
+ metadata_process.ebuild_path,
+ metadata_process.repo_path,
+ metadata_process.metadata)
+
+ self._schedule()
+
diff --git a/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
new file mode 100644
index 0000000..ce0ab14
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
@@ -0,0 +1,33 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.doebuild:spawn'
+)
+from portage import os
+
+class MiscFunctionsProcess(AbstractEbuildProcess):
+ """
+ Spawns misc-functions.sh with an existing ebuild environment.
+ """
+
+ __slots__ = ('commands',)
+
+ def _start(self):
+ settings = self.settings
+ portage_bin_path = settings["PORTAGE_BIN_PATH"]
+ misc_sh_binary = os.path.join(portage_bin_path,
+ os.path.basename(portage.const.MISC_SH_BINARY))
+
+ self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
+ if self.logfile is None and \
+ self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ self.logfile = settings.get("PORTAGE_LOG_FILE")
+
+ AbstractEbuildProcess._start(self)
+
+ def _spawn(self, args, **kwargs):
+ self.settings.pop("EBUILD_PHASE", None)
+ return spawn(" ".join(args), self.settings, **kwargs)
diff --git a/portage_with_autodep/pym/_emerge/Package.py b/portage_with_autodep/pym/_emerge/Package.py
new file mode 100644
index 0000000..20c72b4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Package.py
@@ -0,0 +1,700 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from itertools import chain
+import portage
+from portage import _encodings, _unicode_decode, _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.const import EBUILD_PHASES
+from portage.dep import Atom, check_required_use, use_reduce, \
+ paren_enclose, _slot_re, _slot_separator, _repo_separator
+from portage.eapi import eapi_has_iuse_defaults, eapi_has_required_use
+from portage.exception import InvalidDependString
+from portage.repository.config import _gen_valid_repo
+from _emerge.Task import Task
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class Package(Task):
+
+ __hash__ = Task.__hash__
+ __slots__ = ("built", "cpv", "depth",
+ "installed", "metadata", "onlydeps", "operation",
+ "root_config", "type_name",
+ "category", "counter", "cp", "cpv_split",
+ "inherited", "invalid", "iuse", "masks", "mtime",
+ "pf", "pv_split", "root", "slot", "slot_atom", "visible",) + \
+ ("_raw_metadata", "_use",)
+
+ metadata_keys = [
+ "BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "EAPI",
+ "INHERITED", "IUSE", "KEYWORDS",
+ "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
+ "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE",
+ "_mtime_", "DEFINED_PHASES", "REQUIRED_USE"]
+
+ _dep_keys = ('DEPEND', 'PDEPEND', 'RDEPEND',)
+ _use_conditional_misc_keys = ('LICENSE', 'PROPERTIES', 'RESTRICT')
+ UNKNOWN_REPO = "__unknown__"
+
+ def __init__(self, **kwargs):
+ Task.__init__(self, **kwargs)
+ # the SlotObject constructor assigns self.root_config from keyword args
+ # and is an instance of a '_emerge.RootConfig.RootConfig class
+ self.root = self.root_config.root
+ self._raw_metadata = _PackageMetadataWrapperBase(self.metadata)
+ self.metadata = _PackageMetadataWrapper(self, self._raw_metadata)
+ if not self.built:
+ self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
+ self.cp = portage.cpv_getkey(self.cpv)
+ slot = self.slot
+ if _slot_re.match(slot) is None:
+ self._invalid_metadata('SLOT.invalid',
+ "SLOT: invalid value: '%s'" % slot)
+ # Avoid an InvalidAtom exception when creating slot_atom.
+ # This package instance will be masked due to empty SLOT.
+ slot = '0'
+ if (self.iuse.enabled or self.iuse.disabled) and \
+ not eapi_has_iuse_defaults(self.metadata["EAPI"]):
+ if not self.installed:
+ self._invalid_metadata('EAPI.incompatible',
+ "IUSE contains defaults, but EAPI doesn't allow them")
+ self.slot_atom = portage.dep.Atom("%s%s%s" % (self.cp, _slot_separator, slot))
+ self.category, self.pf = portage.catsplit(self.cpv)
+ self.cpv_split = portage.catpkgsplit(self.cpv)
+ self.pv_split = self.cpv_split[1:]
+ if self.inherited is None:
+ self.inherited = frozenset()
+ repo = _gen_valid_repo(self.metadata.get('repository', ''))
+ if not repo:
+ repo = self.UNKNOWN_REPO
+ self.metadata['repository'] = repo
+
+ self._validate_deps()
+ self.masks = self._masks()
+ self.visible = self._visible(self.masks)
+ if self.operation is None:
+ if self.onlydeps or self.installed:
+ self.operation = "nomerge"
+ else:
+ self.operation = "merge"
+
+ self._hash_key = Package._gen_hash_key(cpv=self.cpv,
+ installed=self.installed, onlydeps=self.onlydeps,
+ operation=self.operation, repo_name=repo,
+ root_config=self.root_config,
+ type_name=self.type_name)
+ self._hash_value = hash(self._hash_key)
+
+ @classmethod
+ def _gen_hash_key(cls, cpv=None, installed=None, onlydeps=None,
+ operation=None, repo_name=None, root_config=None,
+ type_name=None, **kwargs):
+
+ if operation is None:
+ if installed or onlydeps:
+ operation = "nomerge"
+ else:
+ operation = "merge"
+
+ root = None
+ if root_config is not None:
+ root = root_config.root
+ else:
+ raise TypeError("root_config argument is required")
+
+ # For installed (and binary) packages we don't care for the repo
+ # when it comes to hashing, because there can only be one cpv.
+ # So overwrite the repo_key with type_name.
+ if type_name is None:
+ raise TypeError("type_name argument is required")
+ elif type_name == "ebuild":
+ if repo_name is None:
+ raise AssertionError(
+ "Package._gen_hash_key() " + \
+ "called without 'repo_name' argument")
+ repo_key = repo_name
+ else:
+ # For installed (and binary) packages we don't care for the repo
+ # when it comes to hashing, because there can only be one cpv.
+ # So overwrite the repo_key with type_name.
+ repo_key = type_name
+
+ return (type_name, root, cpv, operation, repo_key)
+
+ def _validate_deps(self):
+ """
+ Validate deps. This does not trigger USE calculation since that
+ is expensive for ebuilds and therefore we want to avoid doing
+ in unnecessarily (like for masked packages).
+ """
+ eapi = self.metadata['EAPI']
+ dep_eapi = eapi
+ dep_valid_flag = self.iuse.is_valid_flag
+ if self.installed:
+ # Ignore EAPI.incompatible and conditionals missing
+ # from IUSE for installed packages since these issues
+ # aren't relevant now (re-evaluate when new EAPIs are
+ # deployed).
+ dep_eapi = None
+ dep_valid_flag = None
+
+ for k in self._dep_keys:
+ v = self.metadata.get(k)
+ if not v:
+ continue
+ try:
+ use_reduce(v, eapi=dep_eapi, matchall=True,
+ is_valid_flag=dep_valid_flag, token_class=Atom)
+ except InvalidDependString as e:
+ self._metadata_exception(k, e)
+
+ k = 'PROVIDE'
+ v = self.metadata.get(k)
+ if v:
+ try:
+ use_reduce(v, eapi=dep_eapi, matchall=True,
+ is_valid_flag=dep_valid_flag, token_class=Atom)
+ except InvalidDependString as e:
+ self._invalid_metadata("PROVIDE.syntax",
+ _unicode_decode("%s: %s") % (k, e))
+
+ for k in self._use_conditional_misc_keys:
+ v = self.metadata.get(k)
+ if not v:
+ continue
+ try:
+ use_reduce(v, eapi=dep_eapi, matchall=True,
+ is_valid_flag=dep_valid_flag)
+ except InvalidDependString as e:
+ self._metadata_exception(k, e)
+
+ k = 'REQUIRED_USE'
+ v = self.metadata.get(k)
+ if v:
+ if not eapi_has_required_use(eapi):
+ self._invalid_metadata('EAPI.incompatible',
+ "REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
+ else:
+ try:
+ check_required_use(v, (),
+ self.iuse.is_valid_flag)
+ except InvalidDependString as e:
+ # Force unicode format string for python-2.x safety,
+ # ensuring that PortageException.__unicode__() is used
+ # when necessary.
+ self._invalid_metadata(k + ".syntax",
+ _unicode_decode("%s: %s") % (k, e))
+
+ k = 'SRC_URI'
+ v = self.metadata.get(k)
+ if v:
+ try:
+ use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
+ is_valid_flag=self.iuse.is_valid_flag)
+ except InvalidDependString as e:
+ if not self.installed:
+ self._metadata_exception(k, e)
+
+ def copy(self):
+ return Package(built=self.built, cpv=self.cpv, depth=self.depth,
+ installed=self.installed, metadata=self._raw_metadata,
+ onlydeps=self.onlydeps, operation=self.operation,
+ root_config=self.root_config, type_name=self.type_name)
+
+ def _masks(self):
+ masks = {}
+ settings = self.root_config.settings
+
+ if self.invalid is not None:
+ masks['invalid'] = self.invalid
+
+ if not settings._accept_chost(self.cpv, self.metadata):
+ masks['CHOST'] = self.metadata['CHOST']
+
+ eapi = self.metadata["EAPI"]
+ if not portage.eapi_is_supported(eapi):
+ masks['EAPI.unsupported'] = eapi
+ if portage._eapi_is_deprecated(eapi):
+ masks['EAPI.deprecated'] = eapi
+
+ missing_keywords = settings._getMissingKeywords(
+ self.cpv, self.metadata)
+ if missing_keywords:
+ masks['KEYWORDS'] = missing_keywords
+
+ try:
+ missing_properties = settings._getMissingProperties(
+ self.cpv, self.metadata)
+ if missing_properties:
+ masks['PROPERTIES'] = missing_properties
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ mask_atom = settings._getMaskAtom(self.cpv, self.metadata)
+ if mask_atom is not None:
+ masks['package.mask'] = mask_atom
+
+ system_mask = settings._getProfileMaskAtom(
+ self.cpv, self.metadata)
+ if system_mask is not None:
+ masks['profile.system'] = system_mask
+
+ try:
+ missing_licenses = settings._getMissingLicenses(
+ self.cpv, self.metadata)
+ if missing_licenses:
+ masks['LICENSE'] = missing_licenses
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ if not masks:
+ masks = None
+
+ return masks
+
+ def _visible(self, masks):
+
+ if masks is not None:
+
+ if 'EAPI.unsupported' in masks:
+ return False
+
+ if 'invalid' in masks:
+ return False
+
+ if not self.installed and ( \
+ 'CHOST' in masks or \
+ 'EAPI.deprecated' in masks or \
+ 'KEYWORDS' in masks or \
+ 'PROPERTIES' in masks):
+ return False
+
+ if 'package.mask' in masks or \
+ 'profile.system' in masks or \
+ 'LICENSE' in masks:
+ return False
+
+ return True
+
+ def get_keyword_mask(self):
+ """returns None, 'missing', or 'unstable'."""
+
+ missing = self.root_config.settings._getRawMissingKeywords(
+ self.cpv, self.metadata)
+
+ if not missing:
+ return None
+
+ if '**' in missing:
+ return 'missing'
+
+ global_accept_keywords = frozenset(
+ self.root_config.settings.get("ACCEPT_KEYWORDS", "").split())
+
+ for keyword in missing:
+ if keyword.lstrip("~") in global_accept_keywords:
+ return 'unstable'
+
+ return 'missing'
+
+ def isHardMasked(self):
+ """returns a bool if the cpv is in the list of
+ expanded pmaskdict[cp] available ebuilds"""
+ pmask = self.root_config.settings._getRawMaskAtom(
+ self.cpv, self.metadata)
+ return pmask is not None
+
+ def _metadata_exception(self, k, e):
+
+ # For unicode safety with python-2.x we need to avoid
+ # using the string format operator with a non-unicode
+ # format string, since that will result in the
+ # PortageException.__str__() method being invoked,
+ # followed by unsafe decoding that may result in a
+ # UnicodeDecodeError. Therefore, use _unicode_decode()
+ # to ensure that format strings are unicode, so that
+ # PortageException.__unicode__() is used when necessary
+ # in python-2.x.
+ if not self.installed:
+ categorized_error = False
+ if e.errors:
+ for error in e.errors:
+ if getattr(error, 'category', None) is None:
+ continue
+ categorized_error = True
+ self._invalid_metadata(error.category,
+ _unicode_decode("%s: %s") % (k, error))
+
+ if not categorized_error:
+ self._invalid_metadata(k + ".syntax",
+ _unicode_decode("%s: %s") % (k, e))
+ else:
+ # For installed packages, show the path of the file
+ # containing the invalid metadata, since the user may
+ # want to fix the deps by hand.
+ vardb = self.root_config.trees['vartree'].dbapi
+ path = vardb.getpath(self.cpv, filename=k)
+ self._invalid_metadata(k + ".syntax",
+ _unicode_decode("%s: %s in '%s'") % (k, e, path))
+
+ def _invalid_metadata(self, msg_type, msg):
+ if self.invalid is None:
+ self.invalid = {}
+ msgs = self.invalid.get(msg_type)
+ if msgs is None:
+ msgs = []
+ self.invalid[msg_type] = msgs
+ msgs.append(msg)
+
+ def __str__(self):
+ if self.operation == "merge":
+ if self.type_name == "binary":
+ cpv_color = "PKG_BINARY_MERGE"
+ else:
+ cpv_color = "PKG_MERGE"
+ elif self.operation == "uninstall":
+ cpv_color = "PKG_UNINSTALL"
+ else:
+ cpv_color = "PKG_NOMERGE"
+
+ s = "(%s, %s" \
+ % (portage.output.colorize(cpv_color, self.cpv + _repo_separator + self.repo) , self.type_name)
+
+ if self.type_name == "installed":
+ if self.root != "/":
+ s += " in '%s'" % self.root
+ if self.operation == "uninstall":
+ s += " scheduled for uninstall"
+ else:
+ if self.operation == "merge":
+ s += " scheduled for merge"
+ if self.root != "/":
+ s += " to '%s'" % self.root
+ s += ")"
+ return s
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+ class _use_class(object):
+
+ __slots__ = ("enabled", "_expand", "_expand_hidden",
+ "_force", "_pkg", "_mask")
+
+ # Share identical frozenset instances when available.
+ _frozensets = {}
+
+ def __init__(self, pkg, use_str):
+ self._pkg = pkg
+ self._expand = None
+ self._expand_hidden = None
+ self._force = None
+ self._mask = None
+ self.enabled = frozenset(use_str.split())
+ if pkg.built:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption).
+ missing_iuse = pkg.iuse.get_missing_iuse(self.enabled)
+ if missing_iuse:
+ self.enabled = self.enabled.difference(missing_iuse)
+
+ def _init_force_mask(self):
+ pkgsettings = self._pkg._get_pkgsettings()
+ frozensets = self._frozensets
+ s = frozenset(
+ pkgsettings.get("USE_EXPAND", "").lower().split())
+ self._expand = frozensets.setdefault(s, s)
+ s = frozenset(
+ pkgsettings.get("USE_EXPAND_HIDDEN", "").lower().split())
+ self._expand_hidden = frozensets.setdefault(s, s)
+ s = pkgsettings.useforce
+ self._force = frozensets.setdefault(s, s)
+ s = pkgsettings.usemask
+ self._mask = frozensets.setdefault(s, s)
+
+ @property
+ def expand(self):
+ if self._expand is None:
+ self._init_force_mask()
+ return self._expand
+
+ @property
+ def expand_hidden(self):
+ if self._expand_hidden is None:
+ self._init_force_mask()
+ return self._expand_hidden
+
+ @property
+ def force(self):
+ if self._force is None:
+ self._init_force_mask()
+ return self._force
+
+ @property
+ def mask(self):
+ if self._mask is None:
+ self._init_force_mask()
+ return self._mask
+
+ @property
+ def repo(self):
+ return self.metadata['repository']
+
+ @property
+ def repo_priority(self):
+ repo_info = self.root_config.settings.repositories.prepos.get(self.repo)
+ if repo_info is None:
+ return None
+ return repo_info.priority
+
+ @property
+ def use(self):
+ if self._use is None:
+ self.metadata._init_use()
+ return self._use
+
+ def _get_pkgsettings(self):
+ pkgsettings = self.root_config.trees[
+ 'porttree'].dbapi.doebuild_settings
+ pkgsettings.setcpv(self)
+ return pkgsettings
+
+ class _iuse(object):
+
+ __slots__ = ("__weakref__", "all", "enabled", "disabled",
+ "tokens") + ("_iuse_implicit_match",)
+
+ def __init__(self, tokens, iuse_implicit_match):
+ self.tokens = tuple(tokens)
+ self._iuse_implicit_match = iuse_implicit_match
+ enabled = []
+ disabled = []
+ other = []
+ for x in tokens:
+ prefix = x[:1]
+ if prefix == "+":
+ enabled.append(x[1:])
+ elif prefix == "-":
+ disabled.append(x[1:])
+ else:
+ other.append(x)
+ self.enabled = frozenset(enabled)
+ self.disabled = frozenset(disabled)
+ self.all = frozenset(chain(enabled, disabled, other))
+
+ def is_valid_flag(self, flags):
+ """
+ @returns: True if all flags are valid USE values which may
+ be specified in USE dependencies, False otherwise.
+ """
+ if isinstance(flags, basestring):
+ flags = [flags]
+
+ for flag in flags:
+ if not flag in self.all and \
+ not self._iuse_implicit_match(flag):
+ return False
+ return True
+
+ def get_missing_iuse(self, flags):
+ """
+ @returns: A list of flags missing from IUSE.
+ """
+ if isinstance(flags, basestring):
+ flags = [flags]
+ missing_iuse = []
+ for flag in flags:
+ if not flag in self.all and \
+ not self._iuse_implicit_match(flag):
+ missing_iuse.append(flag)
+ return missing_iuse
+
+ def __len__(self):
+ return 4
+
+ def __iter__(self):
+ """
+ This is used to generate mtimedb resume mergelist entries, so we
+ limit it to 4 items for backward compatibility.
+ """
+ return iter(self._hash_key[:4])
+
+ def __lt__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
+ return True
+ return False
+
+ def __le__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
+ return True
+ return False
+
+ def __gt__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
+ return True
+ return False
+
+ def __ge__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
+ return True
+ return False
+
+_all_metadata_keys = set(x for x in portage.auxdbkeys \
+ if not x.startswith("UNUSED_"))
+_all_metadata_keys.update(Package.metadata_keys)
+_all_metadata_keys = frozenset(_all_metadata_keys)
+
+_PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
+
+class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
+ """
+ Detect metadata updates and synchronize Package attributes.
+ """
+
+ __slots__ = ("_pkg",)
+ _wrapped_keys = frozenset(
+ ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
+ _use_conditional_keys = frozenset(
+ ['LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',])
+
+ def __init__(self, pkg, metadata):
+ _PackageMetadataWrapperBase.__init__(self)
+ self._pkg = pkg
+ if not pkg.built:
+ # USE is lazy, but we want it to show up in self.keys().
+ _PackageMetadataWrapperBase.__setitem__(self, 'USE', '')
+
+ self.update(metadata)
+
+ def _init_use(self):
+ if self._pkg.built:
+ use_str = self['USE']
+ self._pkg._use = self._pkg._use_class(
+ self._pkg, use_str)
+ else:
+ try:
+ use_str = _PackageMetadataWrapperBase.__getitem__(self, 'USE')
+ except KeyError:
+ use_str = None
+ calculated_use = False
+ if not use_str:
+ use_str = self._pkg._get_pkgsettings()["PORTAGE_USE"]
+ calculated_use = True
+ _PackageMetadataWrapperBase.__setitem__(self, 'USE', use_str)
+ self._pkg._use = self._pkg._use_class(
+ self._pkg, use_str)
+ # Initialize these now, since USE access has just triggered
+ # setcpv, and we want to cache the result of the force/mask
+ # calculations that were done.
+ if calculated_use:
+ self._pkg._use._init_force_mask()
+
+ return use_str
+
+ def __getitem__(self, k):
+ v = _PackageMetadataWrapperBase.__getitem__(self, k)
+ if k in self._use_conditional_keys:
+ if self._pkg.root_config.settings.local_config and '?' in v:
+ try:
+ v = paren_enclose(use_reduce(v, uselist=self._pkg.use.enabled, \
+ is_valid_flag=self._pkg.iuse.is_valid_flag))
+ except InvalidDependString:
+ # This error should already have been registered via
+ # self._pkg._invalid_metadata().
+ pass
+ else:
+ self[k] = v
+
+ elif k == 'USE' and not self._pkg.built:
+ if not v:
+ # This is lazy because it's expensive.
+ v = self._init_use()
+
+ return v
+
+ def __setitem__(self, k, v):
+ _PackageMetadataWrapperBase.__setitem__(self, k, v)
+ if k in self._wrapped_keys:
+ getattr(self, "_set_" + k.lower())(k, v)
+
+ def _set_inherited(self, k, v):
+ if isinstance(v, basestring):
+ v = frozenset(v.split())
+ self._pkg.inherited = v
+
+ def _set_iuse(self, k, v):
+ self._pkg.iuse = self._pkg._iuse(
+ v.split(), self._pkg.root_config.settings._iuse_implicit_match)
+
+ def _set_slot(self, k, v):
+ self._pkg.slot = v
+
+ def _set_counter(self, k, v):
+ if isinstance(v, basestring):
+ try:
+ v = long(v.strip())
+ except ValueError:
+ v = 0
+ self._pkg.counter = v
+
+ def _set_use(self, k, v):
+ # Force regeneration of _use attribute
+ self._pkg._use = None
+ # Use raw metadata to restore USE conditional values
+ # to unevaluated state
+ raw_metadata = self._pkg._raw_metadata
+ for x in self._use_conditional_keys:
+ try:
+ self[x] = raw_metadata[x]
+ except KeyError:
+ pass
+
+ def _set__mtime_(self, k, v):
+ if isinstance(v, basestring):
+ try:
+ v = long(v.strip())
+ except ValueError:
+ v = 0
+ self._pkg.mtime = v
+
+ @property
+ def properties(self):
+ return self['PROPERTIES'].split()
+
+ @property
+ def restrict(self):
+ return self['RESTRICT'].split()
+
+ @property
+ def defined_phases(self):
+ """
+ Returns tokens from DEFINED_PHASES metadata if it is defined,
+ otherwise returns a tuple containing all possible phases. This
+ makes it easy to do containment checks to see if it's safe to
+ skip execution of a given phase.
+ """
+ s = self['DEFINED_PHASES']
+ if s:
+ return s.split()
+ return EBUILD_PHASES
diff --git a/portage_with_autodep/pym/_emerge/PackageArg.py b/portage_with_autodep/pym/_emerge/PackageArg.py
new file mode 100644
index 0000000..ebfe4b2
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageArg.py
@@ -0,0 +1,19 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DependencyArg import DependencyArg
+from _emerge.Package import Package
+import portage
+from portage._sets.base import InternalPackageSet
+from portage.dep import _repo_separator
+
+class PackageArg(DependencyArg):
+ def __init__(self, package=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.package = package
+ atom = "=" + package.cpv
+ if package.repo != Package.UNKNOWN_REPO:
+ atom += _repo_separator + package.repo
+ self.atom = portage.dep.Atom(atom, allow_repo=True)
+ self.pset = InternalPackageSet(initial_atoms=(self.atom,),
+ allow_repo=True)
diff --git a/portage_with_autodep/pym/_emerge/PackageMerge.py b/portage_with_autodep/pym/_emerge/PackageMerge.py
new file mode 100644
index 0000000..f8fa04a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageMerge.py
@@ -0,0 +1,40 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from portage.output import colorize
+class PackageMerge(CompositeTask):
+ __slots__ = ("merge",)
+
+ def _start(self):
+
+ self.scheduler = self.merge.scheduler
+ pkg = self.merge.pkg
+ pkg_count = self.merge.pkg_count
+
+ if pkg.installed:
+ action_desc = "Uninstalling"
+ preposition = "from"
+ counter_str = ""
+ else:
+ action_desc = "Installing"
+ preposition = "to"
+ counter_str = "(%s of %s) " % \
+ (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
+
+ msg = "%s %s%s" % \
+ (action_desc,
+ counter_str,
+ colorize("GOOD", pkg.cpv))
+
+ if pkg.root != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ if not self.merge.build_opts.fetchonly and \
+ not self.merge.build_opts.pretend and \
+ not self.merge.build_opts.buildpkgonly:
+ self.merge.statusMessage(msg)
+
+ task = self.merge.create_install_task()
+ self._start_task(task, self._default_final_exit)
diff --git a/portage_with_autodep/pym/_emerge/PackageUninstall.py b/portage_with_autodep/pym/_emerge/PackageUninstall.py
new file mode 100644
index 0000000..eb6a947
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageUninstall.py
@@ -0,0 +1,110 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import portage
+from portage import os
+from portage.dbapi._MergeProcess import MergeProcess
+from portage.exception import UnsupportedAPIException
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.emergelog import emergelog
+from _emerge.CompositeTask import CompositeTask
+from _emerge.unmerge import _unmerge_display
+
+class PackageUninstall(CompositeTask):
+ """
+ Uninstall a package asynchronously in a subprocess. When
+ both parallel-install and ebuild-locks FEATURES are enabled,
+ it is essential for the ebuild-locks code to execute in a
+ subprocess, since the portage.locks module does not behave
+ as desired if we try to lock the same file multiple times
+ concurrently from the same process for ebuild-locks phases
+ such as pkg_setup, pkg_prerm, and pkg_postrm.
+ """
+
+ __slots__ = ("world_atom", "ldpath_mtimes", "opts",
+ "pkg", "settings", "_builddir_lock")
+
+ def _start(self):
+
+ vardb = self.pkg.root_config.trees["vartree"].dbapi
+ dbdir = vardb.getpath(self.pkg.cpv)
+ if not os.path.exists(dbdir):
+ # Apparently the package got uninstalled
+ # already, so we can safely return early.
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self.settings.setcpv(self.pkg)
+ cat, pf = portage.catsplit(self.pkg.cpv)
+ myebuildpath = os.path.join(dbdir, pf + ".ebuild")
+
+ try:
+ portage.doebuild_environment(myebuildpath, "prerm",
+ settings=self.settings, db=vardb)
+ except UnsupportedAPIException:
+ # This is safe to ignore since this function is
+ # guaranteed to set PORTAGE_BUILDDIR even though
+ # it raises UnsupportedAPIException. The error
+ # will be logged when it prevents the pkg_prerm
+ # and pkg_postrm phases from executing.
+ pass
+
+ self._builddir_lock = EbuildBuildDir(
+ scheduler=self.scheduler, settings=self.settings)
+ self._builddir_lock.lock()
+
+ portage.prepare_build_dirs(
+ settings=self.settings, cleanup=True)
+
+ # Output only gets logged if it comes after prepare_build_dirs()
+ # which initializes PORTAGE_LOG_FILE.
+ retval, pkgmap = _unmerge_display(self.pkg.root_config,
+ self.opts, "unmerge", [self.pkg.cpv], clean_delay=0,
+ writemsg_level=self._writemsg_level)
+
+ if retval != os.EX_OK:
+ self._builddir_lock.unlock()
+ self.returncode = retval
+ self.wait()
+ return
+
+ self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv,),
+ noiselevel=-1)
+ self._emergelog("=== Unmerging... (%s)" % (self.pkg.cpv,))
+
+ unmerge_task = MergeProcess(
+ mycat=cat, mypkg=pf, settings=self.settings,
+ treetype="vartree", vartree=self.pkg.root_config.trees["vartree"],
+ scheduler=self.scheduler, background=self.background,
+ mydbapi=self.pkg.root_config.trees["vartree"].dbapi,
+ prev_mtimes=self.ldpath_mtimes,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"), unmerge=True)
+
+ self._start_task(unmerge_task, self._unmerge_exit)
+
+ def _unmerge_exit(self, unmerge_task):
+ if self._final_exit(unmerge_task) != os.EX_OK:
+ self._emergelog(" !!! unmerge FAILURE: %s" % (self.pkg.cpv,))
+ else:
+ self._emergelog(" >>> unmerge success: %s" % (self.pkg.cpv,))
+ self.world_atom(self.pkg)
+ self._builddir_lock.unlock()
+ self.wait()
+
+ def _emergelog(self, msg):
+ emergelog("notitles" not in self.settings.features, msg)
+
+ def _writemsg_level(self, msg, level=0, noiselevel=0):
+
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ background = self.background
+
+ if log_path is None:
+ if not (background and level < logging.WARNING):
+ portage.util.writemsg_level(msg,
+ level=level, noiselevel=noiselevel)
+ else:
+ self.scheduler.output(msg, log_path=log_path,
+ level=level, noiselevel=noiselevel)
diff --git a/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
new file mode 100644
index 0000000..a692bb6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
@@ -0,0 +1,145 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.dbapi import dbapi
+
+class PackageVirtualDbapi(dbapi):
+ """
+ A dbapi-like interface class that represents the state of the installed
+ package database as new packages are installed, replacing any packages
+ that previously existed in the same slot. The main difference between
+ this class and fakedbapi is that this one uses Package instances
+ internally (passed in via cpv_inject() and cpv_remove() calls).
+ """
+ def __init__(self, settings):
+ dbapi.__init__(self)
+ self.settings = settings
+ self._match_cache = {}
+ self._cp_map = {}
+ self._cpv_map = {}
+
+ def clear(self):
+ """
+ Remove all packages.
+ """
+ if self._cpv_map:
+ self._clear_cache()
+ self._cp_map.clear()
+ self._cpv_map.clear()
+
+ def copy(self):
+ obj = PackageVirtualDbapi(self.settings)
+ obj._match_cache = self._match_cache.copy()
+ obj._cp_map = self._cp_map.copy()
+ for k, v in obj._cp_map.items():
+ obj._cp_map[k] = v[:]
+ obj._cpv_map = self._cpv_map.copy()
+ return obj
+
+ def __bool__(self):
+ return bool(self._cpv_map)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __iter__(self):
+ return iter(self._cpv_map.values())
+
+ def __contains__(self, item):
+ existing = self._cpv_map.get(item.cpv)
+ if existing is not None and \
+ existing == item:
+ return True
+ return False
+
+ def get(self, item, default=None):
+ cpv = getattr(item, "cpv", None)
+ if cpv is None:
+ if len(item) != 5:
+ return default
+ type_name, root, cpv, operation, repo_key = item
+
+ existing = self._cpv_map.get(cpv)
+ if existing is not None and \
+ existing == item:
+ return existing
+ return default
+
+ def match_pkgs(self, atom):
+ return [self._cpv_map[cpv] for cpv in self.match(atom)]
+
+ def _clear_cache(self):
+ if self._categories is not None:
+ self._categories = None
+ if self._match_cache:
+ self._match_cache = {}
+
+ def match(self, origdep, use_cache=1):
+ result = self._match_cache.get(origdep)
+ if result is not None:
+ return result[:]
+ result = dbapi.match(self, origdep, use_cache=use_cache)
+ self._match_cache[origdep] = result
+ return result[:]
+
+ def cpv_exists(self, cpv, myrepo=None):
+ return cpv in self._cpv_map
+
+ def cp_list(self, mycp, use_cache=1):
+ cachelist = self._match_cache.get(mycp)
+ # cp_list() doesn't expand old-style virtuals
+ if cachelist and cachelist[0].startswith(mycp):
+ return cachelist[:]
+ cpv_list = self._cp_map.get(mycp)
+ if cpv_list is None:
+ cpv_list = []
+ else:
+ cpv_list = [pkg.cpv for pkg in cpv_list]
+ self._cpv_sort_ascending(cpv_list)
+ if not (not cpv_list and mycp.startswith("virtual/")):
+ self._match_cache[mycp] = cpv_list
+ return cpv_list[:]
+
+ def cp_all(self):
+ return list(self._cp_map)
+
+ def cpv_all(self):
+ return list(self._cpv_map)
+
+ def cpv_inject(self, pkg):
+ cp_list = self._cp_map.get(pkg.cp)
+ if cp_list is None:
+ cp_list = []
+ self._cp_map[pkg.cp] = cp_list
+ e_pkg = self._cpv_map.get(pkg.cpv)
+ if e_pkg is not None:
+ if e_pkg == pkg:
+ return
+ self.cpv_remove(e_pkg)
+ for e_pkg in cp_list:
+ if e_pkg.slot_atom == pkg.slot_atom:
+ if e_pkg == pkg:
+ return
+ self.cpv_remove(e_pkg)
+ break
+ cp_list.append(pkg)
+ self._cpv_map[pkg.cpv] = pkg
+ self._clear_cache()
+
+ def cpv_remove(self, pkg):
+ old_pkg = self._cpv_map.get(pkg.cpv)
+ if old_pkg != pkg:
+ raise KeyError(pkg)
+ self._cp_map[pkg.cp].remove(pkg)
+ del self._cpv_map[pkg.cpv]
+ self._clear_cache()
+
+ def aux_get(self, cpv, wants, myrepo=None):
+ metadata = self._cpv_map[cpv].metadata
+ return [metadata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ self._cpv_map[cpv].metadata.update(values)
+ self._clear_cache()
+
diff --git a/portage_with_autodep/pym/_emerge/PipeReader.py b/portage_with_autodep/pym/_emerge/PipeReader.py
new file mode 100644
index 0000000..375c98f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PipeReader.py
@@ -0,0 +1,96 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+from _emerge.PollConstants import PollConstants
+import fcntl
+import array
+
+class PipeReader(AbstractPollTask):
+
+ """
+ Reads output from one or more files and saves it in memory,
+ for retrieval via the getvalue() method. This is driven by
+ the scheduler's poll() loop, so it runs entirely within the
+ current process.
+ """
+
+ __slots__ = ("input_files",) + \
+ ("_read_data", "_reg_ids")
+
+ def _start(self):
+ self._reg_ids = set()
+ self._read_data = []
+ for k, f in self.input_files.items():
+ fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
+ fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+ self._reg_ids.add(self.scheduler.register(f.fileno(),
+ self._registered_events, self._output_handler))
+ self._registered = True
+
+ def isAlive(self):
+ return self._registered
+
+ def _cancel(self):
+ if self.returncode is None:
+ self.returncode = 1
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+
+ if self._registered:
+ self.scheduler.schedule(self._reg_ids)
+ self._unregister()
+
+ self.returncode = os.EX_OK
+ return self.returncode
+
+ def getvalue(self):
+ """Retrieve the entire contents"""
+ return b''.join(self._read_data)
+
+ def close(self):
+ """Free the memory buffer."""
+ self._read_data = None
+
+ def _output_handler(self, fd, event):
+
+ if event & PollConstants.POLLIN:
+
+ for f in self.input_files.values():
+ if fd == f.fileno():
+ break
+
+ buf = array.array('B')
+ try:
+ buf.fromfile(f, self._bufsize)
+ except (EOFError, IOError):
+ pass
+
+ if buf:
+ self._read_data.append(buf.tostring())
+ else:
+ self._unregister()
+ self.wait()
+
+ self._unregister_if_appropriate(event)
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self._reg_ids is not None:
+ for reg_id in self._reg_ids:
+ self.scheduler.unregister(reg_id)
+ self._reg_ids = None
+
+ if self.input_files is not None:
+ for f in self.input_files.values():
+ f.close()
+ self.input_files = None
+
diff --git a/portage_with_autodep/pym/_emerge/PollConstants.py b/portage_with_autodep/pym/_emerge/PollConstants.py
new file mode 100644
index 0000000..d0270a9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PollConstants.py
@@ -0,0 +1,18 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import select
+class PollConstants(object):
+
+ """
+ Provides POLL* constants that are equivalent to those from the
+ select module, for use by PollSelectAdapter.
+ """
+
+ names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
+ v = 1
+ for k in names:
+ locals()[k] = getattr(select, k, v)
+ v *= 2
+ del k, v
+
diff --git a/portage_with_autodep/pym/_emerge/PollScheduler.py b/portage_with_autodep/pym/_emerge/PollScheduler.py
new file mode 100644
index 0000000..a2b5c24
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PollScheduler.py
@@ -0,0 +1,398 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import errno
+import logging
+import select
+import time
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+from portage import _encodings
+from portage import _unicode_encode
+from portage.util import writemsg_level
+
+from _emerge.SlotObject import SlotObject
+from _emerge.getloadavg import getloadavg
+from _emerge.PollConstants import PollConstants
+from _emerge.PollSelectAdapter import PollSelectAdapter
+
+class PollScheduler(object):
+
+ class _sched_iface_class(SlotObject):
+ __slots__ = ("output", "register", "schedule", "unregister")
+
+ def __init__(self):
+ self._terminated = threading.Event()
+ self._terminated_tasks = False
+ self._max_jobs = 1
+ self._max_load = None
+ self._jobs = 0
+ self._poll_event_queue = []
+ self._poll_event_handlers = {}
+ self._poll_event_handler_ids = {}
+ # Increment id for each new handler.
+ self._event_handler_id = 0
+ self._poll_obj = create_poll_instance()
+ self._scheduling = False
+ self._background = False
+ self.sched_iface = self._sched_iface_class(
+ output=self._task_output,
+ register=self._register,
+ schedule=self._schedule_wait,
+ unregister=self._unregister)
+
+ def terminate(self):
+ """
+ Schedules asynchronous, graceful termination of the scheduler
+ at the earliest opportunity.
+
+ This method is thread-safe (and safe for signal handlers).
+ """
+ self._terminated.set()
+
+ def _terminate_tasks(self):
+ """
+ Send signals to terminate all tasks. This is called once
+ from self._schedule() in the event dispatching thread. This
+ prevents it from being called while the _schedule_tasks()
+ implementation is running, in order to avoid potential
+ interference. All tasks should be cleaned up at the earliest
+ opportunity, but not necessarily before this method returns.
+ """
+ raise NotImplementedError()
+
+ def _schedule_tasks(self):
+ """
+ This is called from inside the _schedule() method, which
+ guarantees the following:
+
+ 1) It will not be called recursively.
+ 2) _terminate_tasks() will not be called while it is running.
+ 3) The state of the boolean _terminated_tasks variable will
+ not change while it is running.
+
+ Unless this method is used to perform user interface updates,
+ or something like that, the first thing it should do is check
+ the state of _terminated_tasks and if that is True then it
+ should return False immediately (since there's no need to
+ schedule anything after _terminate_tasks() has been called).
+ """
+ raise NotImplementedError()
+
+ def _schedule(self):
+ """
+ Calls _schedule_tasks() and automatically returns early from
+ any recursive calls to this method that the _schedule_tasks()
+ call might trigger. This makes _schedule() safe to call from
+ inside exit listeners.
+ """
+ if self._scheduling:
+ return False
+ self._scheduling = True
+ try:
+
+ if self._terminated.is_set() and \
+ not self._terminated_tasks:
+ self._terminated_tasks = True
+ self._terminate_tasks()
+
+ return self._schedule_tasks()
+ finally:
+ self._scheduling = False
+
+ def _running_job_count(self):
+ return self._jobs
+
+ def _can_add_job(self):
+ if self._terminated_tasks:
+ return False
+
+ max_jobs = self._max_jobs
+ max_load = self._max_load
+
+ if self._max_jobs is not True and \
+ self._running_job_count() >= self._max_jobs:
+ return False
+
+ if max_load is not None and \
+ (max_jobs is True or max_jobs > 1) and \
+ self._running_job_count() >= 1:
+ try:
+ avg1, avg5, avg15 = getloadavg()
+ except OSError:
+ return False
+
+ if avg1 >= max_load:
+ return False
+
+ return True
+
+ def _poll(self, timeout=None):
+ """
+ All poll() calls pass through here. The poll events
+ are added directly to self._poll_event_queue.
+ In order to avoid endless blocking, this raises
+ StopIteration if timeout is None and there are
+ no file descriptors to poll.
+ """
+ if not self._poll_event_handlers:
+ self._schedule()
+ if timeout is None and \
+ not self._poll_event_handlers:
+ raise StopIteration(
+ "timeout is None and there are no poll() event handlers")
+
+ # The following error is known to occur with Linux kernel versions
+ # less than 2.6.24:
+ #
+ # select.error: (4, 'Interrupted system call')
+ #
+ # This error has been observed after a SIGSTOP, followed by SIGCONT.
+ # Treat it similar to EAGAIN if timeout is None, otherwise just return
+ # without any events.
+ while True:
+ try:
+ self._poll_event_queue.extend(self._poll_obj.poll(timeout))
+ break
+ except select.error as e:
+ writemsg_level("\n!!! select error: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ del e
+ if timeout is not None:
+ break
+
+ def _next_poll_event(self, timeout=None):
+ """
+ Since the _schedule_wait() loop is called by event
+ handlers from _poll_loop(), maintain a central event
+ queue for both of them to share events from a single
+ poll() call. In order to avoid endless blocking, this
+ raises StopIteration if timeout is None and there are
+ no file descriptors to poll.
+ """
+ if not self._poll_event_queue:
+ self._poll(timeout)
+ if not self._poll_event_queue:
+ raise StopIteration()
+ return self._poll_event_queue.pop()
+
+ def _poll_loop(self):
+
+ event_handlers = self._poll_event_handlers
+ event_handled = False
+
+ try:
+ while event_handlers:
+ f, event = self._next_poll_event()
+ handler, reg_id = event_handlers[f]
+ handler(f, event)
+ event_handled = True
+ except StopIteration:
+ event_handled = True
+
+ if not event_handled:
+ raise AssertionError("tight loop")
+
+ def _schedule_yield(self):
+ """
+ Schedule for a short period of time chosen by the scheduler based
+ on internal state. Synchronous tasks should call this periodically
+ in order to allow the scheduler to service pending poll events. The
+ scheduler will call poll() exactly once, without blocking, and any
+ resulting poll events will be serviced.
+ """
+ event_handlers = self._poll_event_handlers
+ events_handled = 0
+
+ if not event_handlers:
+ return bool(events_handled)
+
+ if not self._poll_event_queue:
+ self._poll(0)
+
+ try:
+ while event_handlers and self._poll_event_queue:
+ f, event = self._next_poll_event()
+ handler, reg_id = event_handlers[f]
+ handler(f, event)
+ events_handled += 1
+ except StopIteration:
+ events_handled += 1
+
+ return bool(events_handled)
+
+ def _register(self, f, eventmask, handler):
+ """
+ @rtype: Integer
+ @return: A unique registration id, for use in schedule() or
+ unregister() calls.
+ """
+ if f in self._poll_event_handlers:
+ raise AssertionError("fd %d is already registered" % f)
+ self._event_handler_id += 1
+ reg_id = self._event_handler_id
+ self._poll_event_handler_ids[reg_id] = f
+ self._poll_event_handlers[f] = (handler, reg_id)
+ self._poll_obj.register(f, eventmask)
+ return reg_id
+
+ def _unregister(self, reg_id):
+ f = self._poll_event_handler_ids[reg_id]
+ self._poll_obj.unregister(f)
+ if self._poll_event_queue:
+ # Discard any unhandled events that belong to this file,
+ # in order to prevent these events from being erroneously
+ # delivered to a future handler that is using a reallocated
+ # file descriptor of the same numeric value (causing
+ # extremely confusing bugs).
+ remaining_events = []
+ discarded_events = False
+ for event in self._poll_event_queue:
+ if event[0] == f:
+ discarded_events = True
+ else:
+ remaining_events.append(event)
+
+ if discarded_events:
+ self._poll_event_queue[:] = remaining_events
+
+ del self._poll_event_handlers[f]
+ del self._poll_event_handler_ids[reg_id]
+
+ def _schedule_wait(self, wait_ids=None, timeout=None, condition=None):
+ """
+ Schedule until wait_id is not longer registered
+ for poll() events.
+ @type wait_id: int
+ @param wait_id: a task id to wait for
+ """
+ event_handlers = self._poll_event_handlers
+ handler_ids = self._poll_event_handler_ids
+ event_handled = False
+
+ if isinstance(wait_ids, int):
+ wait_ids = frozenset([wait_ids])
+
+ start_time = None
+ remaining_timeout = timeout
+ timed_out = False
+ if timeout is not None:
+ start_time = time.time()
+ try:
+ while (wait_ids is None and event_handlers) or \
+ (wait_ids is not None and wait_ids.intersection(handler_ids)):
+ f, event = self._next_poll_event(timeout=remaining_timeout)
+ handler, reg_id = event_handlers[f]
+ handler(f, event)
+ event_handled = True
+ if condition is not None and condition():
+ break
+ if timeout is not None:
+ elapsed_time = time.time() - start_time
+ if elapsed_time < 0:
+ # The system clock has changed such that start_time
+ # is now in the future, so just assume that the
+ # timeout has already elapsed.
+ timed_out = True
+ break
+ remaining_timeout = timeout - 1000 * elapsed_time
+ if remaining_timeout <= 0:
+ timed_out = True
+ break
+ except StopIteration:
+ event_handled = True
+
+ return event_handled
+
+ def _task_output(self, msg, log_path=None, background=None,
+ level=0, noiselevel=-1):
+ """
+ Output msg to stdout if not self._background. If log_path
+ is not None then append msg to the log (appends with
+ compression if the filename extension of log_path
+ corresponds to a supported compression type).
+ """
+
+ if background is None:
+ # If the task does not have a local background value
+ # (like for parallel-fetch), then use the global value.
+ background = self._background
+
+ msg_shown = False
+ if not background:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ msg_shown = True
+
+ if log_path is not None:
+ try:
+ f = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='ab')
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ if not msg_shown:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+
+ if log_path.endswith('.gz'):
+ # NOTE: The empty filename argument prevents us from
+ # triggering a bug in python3 which causes GzipFile
+ # to raise AttributeError if fileobj.name is bytes
+ # instead of unicode.
+ f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+ f.write(_unicode_encode(msg))
+ f.close()
+
+_can_poll_device = None
+
+def can_poll_device():
+ """
+ Test if it's possible to use poll() on a device such as a pty. This
+ is known to fail on Darwin.
+ @rtype: bool
+ @returns: True if poll() on a device succeeds, False otherwise.
+ """
+
+ global _can_poll_device
+ if _can_poll_device is not None:
+ return _can_poll_device
+
+ if not hasattr(select, "poll"):
+ _can_poll_device = False
+ return _can_poll_device
+
+ try:
+ dev_null = open('/dev/null', 'rb')
+ except IOError:
+ _can_poll_device = False
+ return _can_poll_device
+
+ p = select.poll()
+ p.register(dev_null.fileno(), PollConstants.POLLIN)
+
+ invalid_request = False
+ for f, event in p.poll():
+ if event & PollConstants.POLLNVAL:
+ invalid_request = True
+ break
+ dev_null.close()
+
+ _can_poll_device = not invalid_request
+ return _can_poll_device
+
+def create_poll_instance():
+ """
+ Create an instance of select.poll, or an instance of
+ PollSelectAdapter there is no poll() implementation or
+ it is broken somehow.
+ """
+ if can_poll_device():
+ return select.poll()
+ return PollSelectAdapter()
diff --git a/portage_with_autodep/pym/_emerge/PollSelectAdapter.py b/portage_with_autodep/pym/_emerge/PollSelectAdapter.py
new file mode 100644
index 0000000..c11dab8
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PollSelectAdapter.py
@@ -0,0 +1,73 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.PollConstants import PollConstants
+import select
+class PollSelectAdapter(PollConstants):
+
+ """
+ Use select to emulate a poll object, for
+ systems that don't support poll().
+ """
+
+ def __init__(self):
+ self._registered = {}
+ self._select_args = [[], [], []]
+
+ def register(self, fd, *args):
+ """
+ Only POLLIN is currently supported!
+ """
+ if len(args) > 1:
+ raise TypeError(
+ "register expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+
+ eventmask = PollConstants.POLLIN | \
+ PollConstants.POLLPRI | PollConstants.POLLOUT
+ if args:
+ eventmask = args[0]
+
+ self._registered[fd] = eventmask
+ self._select_args = None
+
+ def unregister(self, fd):
+ self._select_args = None
+ del self._registered[fd]
+
+ def poll(self, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "poll expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+
+ timeout = None
+ if args:
+ timeout = args[0]
+
+ select_args = self._select_args
+ if select_args is None:
+ select_args = [list(self._registered), [], []]
+
+ if timeout is not None:
+ select_args = select_args[:]
+ # Translate poll() timeout args to select() timeout args:
+ #
+ # | units | value(s) for indefinite block
+ # ---------|--------------|------------------------------
+ # poll | milliseconds | omitted, negative, or None
+ # ---------|--------------|------------------------------
+ # select | seconds | omitted
+ # ---------|--------------|------------------------------
+
+ if timeout is not None and timeout < 0:
+ timeout = None
+ if timeout is not None:
+ select_args.append(timeout / 1000)
+
+ select_events = select.select(*select_args)
+ poll_events = []
+ for fd in select_events[0]:
+ poll_events.append((fd, PollConstants.POLLIN))
+ return poll_events
+
diff --git a/portage_with_autodep/pym/_emerge/ProgressHandler.py b/portage_with_autodep/pym/_emerge/ProgressHandler.py
new file mode 100644
index 0000000..f5afe6d
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/ProgressHandler.py
@@ -0,0 +1,22 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+class ProgressHandler(object):
+ def __init__(self):
+ self.curval = 0
+ self.maxval = 0
+ self._last_update = 0
+ self.min_latency = 0.2
+
+ def onProgress(self, maxval, curval):
+ self.maxval = maxval
+ self.curval = curval
+ cur_time = time.time()
+ if cur_time - self._last_update >= self.min_latency:
+ self._last_update = cur_time
+ self.display()
+
+ def display(self):
+ raise NotImplementedError(self)
+
diff --git a/portage_with_autodep/pym/_emerge/QueueScheduler.py b/portage_with_autodep/pym/_emerge/QueueScheduler.py
new file mode 100644
index 0000000..a4ab328
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/QueueScheduler.py
@@ -0,0 +1,116 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+
+from _emerge.PollScheduler import PollScheduler
+
+class QueueScheduler(PollScheduler):
+
+ """
+ Add instances of SequentialTaskQueue and then call run(). The
+ run() method returns when no tasks remain.
+ """
+
+ def __init__(self, max_jobs=None, max_load=None):
+ PollScheduler.__init__(self)
+
+ if max_jobs is None:
+ max_jobs = 1
+
+ self._max_jobs = max_jobs
+ self._max_load = max_load
+
+ self._queues = []
+ self._schedule_listeners = []
+
+ def add(self, q):
+ self._queues.append(q)
+
+ def remove(self, q):
+ self._queues.remove(q)
+
+ def clear(self):
+ for q in self._queues:
+ q.clear()
+
+ def run(self, timeout=None):
+
+ start_time = None
+ timed_out = False
+ remaining_timeout = timeout
+ if timeout is not None:
+ start_time = time.time()
+
+ while self._schedule():
+ self._schedule_wait(timeout=remaining_timeout)
+ if timeout is not None:
+ elapsed_time = time.time() - start_time
+ if elapsed_time < 0:
+ # The system clock has changed such that start_time
+ # is now in the future, so just assume that the
+ # timeout has already elapsed.
+ timed_out = True
+ break
+ remaining_timeout = timeout - 1000 * elapsed_time
+ if remaining_timeout <= 0:
+ timed_out = True
+ break
+
+ if timeout is None or not timed_out:
+ while self._running_job_count():
+ self._schedule_wait(timeout=remaining_timeout)
+ if timeout is not None:
+ elapsed_time = time.time() - start_time
+ if elapsed_time < 0:
+ # The system clock has changed such that start_time
+ # is now in the future, so just assume that the
+ # timeout has already elapsed.
+ timed_out = True
+ break
+ remaining_timeout = timeout - 1000 * elapsed_time
+ if remaining_timeout <= 0:
+ timed_out = True
+ break
+
+ def _schedule_tasks(self):
+ """
+ @rtype: bool
+ @returns: True if there may be remaining tasks to schedule,
+ False otherwise.
+ """
+ if self._terminated_tasks:
+ return False
+
+ while self._can_add_job():
+ n = self._max_jobs - self._running_job_count()
+ if n < 1:
+ break
+
+ if not self._start_next_job(n):
+ return False
+
+ for q in self._queues:
+ if q:
+ return True
+ return False
+
+ def _running_job_count(self):
+ job_count = 0
+ for q in self._queues:
+ job_count += len(q.running_tasks)
+ self._jobs = job_count
+ return job_count
+
+ def _start_next_job(self, n=1):
+ started_count = 0
+ for q in self._queues:
+ initial_job_count = len(q.running_tasks)
+ q.schedule()
+ final_job_count = len(q.running_tasks)
+ if final_job_count > initial_job_count:
+ started_count += (final_job_count - initial_job_count)
+ if started_count >= n:
+ break
+ return started_count
+
diff --git a/portage_with_autodep/pym/_emerge/RootConfig.py b/portage_with_autodep/pym/_emerge/RootConfig.py
new file mode 100644
index 0000000..d84f108
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/RootConfig.py
@@ -0,0 +1,34 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class RootConfig(object):
+ """This is used internally by depgraph to track information about a
+ particular $ROOT."""
+ __slots__ = ("root", "setconfig", "sets", "settings", "trees")
+
+ pkg_tree_map = {
+ "ebuild" : "porttree",
+ "binary" : "bintree",
+ "installed" : "vartree"
+ }
+
+ tree_pkg_map = {}
+ for k, v in pkg_tree_map.items():
+ tree_pkg_map[v] = k
+
+ def __init__(self, settings, trees, setconfig):
+ self.trees = trees
+ self.settings = settings
+ self.root = self.settings["ROOT"]
+ self.setconfig = setconfig
+ if setconfig is None:
+ self.sets = {}
+ else:
+ self.sets = self.setconfig.getSets()
+
+ def update(self, other):
+ """
+ Shallow copy all attributes from another instance.
+ """
+ for k in self.__slots__:
+ setattr(self, k, getattr(other, k))
diff --git a/portage_with_autodep/pym/_emerge/Scheduler.py b/portage_with_autodep/pym/_emerge/Scheduler.py
new file mode 100644
index 0000000..6412d82
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Scheduler.py
@@ -0,0 +1,1975 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from collections import deque
+import gc
+import gzip
+import logging
+import shutil
+import signal
+import sys
+import tempfile
+import textwrap
+import time
+import warnings
+import weakref
+import zlib
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.elog.messages import eerror
+from portage.localization import _
+from portage.output import colorize, create_color_func, red
+bad = create_color_func("BAD")
+from portage._sets import SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import writemsg, writemsg_level
+from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.digestgen import digestgen
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+
+import _emerge
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.Blocker import Blocker
+from _emerge.BlockerDB import BlockerDB
+from _emerge.clear_caches import clear_caches
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.create_world_atom import create_world_atom
+from _emerge.DepPriority import DepPriority
+from _emerge.depgraph import depgraph, resume_depgraph
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog
+from _emerge.FakeVartree import FakeVartree
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
+from _emerge.JobStatusDisplay import JobStatusDisplay
+from _emerge.MergeListItem import MergeListItem
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.Package import Package
+from _emerge.PackageMerge import PackageMerge
+from _emerge.PollScheduler import PollScheduler
+from _emerge.RootConfig import RootConfig
+from _emerge.SlotObject import SlotObject
+from _emerge.SequentialTaskQueue import SequentialTaskQueue
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class Scheduler(PollScheduler):
+
+ # max time between display status updates (milliseconds)
+ _max_display_latency = 3000
+
+ _opts_ignore_blockers = \
+ frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri",
+ "--nodeps", "--pretend"])
+
+ _opts_no_background = \
+ frozenset(["--pretend",
+ "--fetchonly", "--fetch-all-uri"])
+
+ _opts_no_restart = frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri", "--pretend"])
+
+ _bad_resume_opts = set(["--ask", "--changelog",
+ "--resume", "--skipfirst"])
+
+ class _iface_class(SlotObject):
+ __slots__ = ("fetch",
+ "output", "register", "schedule",
+ "scheduleSetup", "scheduleUnpack", "scheduleYield",
+ "unregister")
+
+ class _fetch_iface_class(SlotObject):
+ __slots__ = ("log_file", "schedule")
+
+ _task_queues_class = slot_dict_class(
+ ("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
+
+ class _build_opts_class(SlotObject):
+ __slots__ = ("buildpkg", "buildpkgonly",
+ "fetch_all_uri", "fetchonly", "pretend")
+
+ class _binpkg_opts_class(SlotObject):
+ __slots__ = ("fetchonly", "getbinpkg", "pretend")
+
+ class _pkg_count_class(SlotObject):
+ __slots__ = ("curval", "maxval")
+
+ class _emerge_log_class(SlotObject):
+ __slots__ = ("xterm_titles",)
+
+ def log(self, *pargs, **kwargs):
+ if not self.xterm_titles:
+ # Avoid interference with the scheduler's status display.
+ kwargs.pop("short_msg", None)
+ emergelog(self.xterm_titles, *pargs, **kwargs)
+
+ class _failed_pkg(SlotObject):
+ __slots__ = ("build_dir", "build_log", "pkg", "returncode")
+
+ class _ConfigPool(object):
+ """Interface for a task to temporarily allocate a config
+ instance from a pool. This allows a task to be constructed
+ long before the config instance actually becomes needed, like
+ when prefetchers are constructed for the whole merge list."""
+ __slots__ = ("_root", "_allocate", "_deallocate")
+ def __init__(self, root, allocate, deallocate):
+ self._root = root
+ self._allocate = allocate
+ self._deallocate = deallocate
+ def allocate(self):
+ return self._allocate(self._root)
+ def deallocate(self, settings):
+ self._deallocate(settings)
+
+ class _unknown_internal_error(portage.exception.PortageException):
+ """
+ Used internally to terminate scheduling. The specific reason for
+ the failure should have been dumped to stderr.
+ """
+ def __init__(self, value=""):
+ portage.exception.PortageException.__init__(self, value)
+
+ def __init__(self, settings, trees, mtimedb, myopts,
+ spinner, mergelist=None, favorites=None, graph_config=None):
+ PollScheduler.__init__(self)
+
+ if mergelist is not None:
+ warnings.warn("The mergelist parameter of the " + \
+ "_emerge.Scheduler constructor is now unused. Use " + \
+ "the graph_config parameter instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.settings = settings
+ self.target_root = settings["ROOT"]
+ self.trees = trees
+ self.myopts = myopts
+ self._spinner = spinner
+ self._mtimedb = mtimedb
+ self._favorites = favorites
+ self._args_set = InternalPackageSet(favorites, allow_repo=True)
+ self._build_opts = self._build_opts_class()
+ for k in self._build_opts.__slots__:
+ setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
+ self._binpkg_opts = self._binpkg_opts_class()
+ for k in self._binpkg_opts.__slots__:
+ setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
+
+ self.curval = 0
+ self._logger = self._emerge_log_class()
+ self._task_queues = self._task_queues_class()
+ for k in self._task_queues.allowed_keys:
+ setattr(self._task_queues, k,
+ SequentialTaskQueue())
+
+ # Holds merges that will wait to be executed when no builds are
+ # executing. This is useful for system packages since dependencies
+ # on system packages are frequently unspecified. For example, see
+ # bug #256616.
+ self._merge_wait_queue = deque()
+ # Holds merges that have been transfered from the merge_wait_queue to
+ # the actual merge queue. They are removed from this list upon
+ # completion. Other packages can start building only when this list is
+ # empty.
+ self._merge_wait_scheduled = []
+
+ # Holds system packages and their deep runtime dependencies. Before
+ # being merged, these packages go to merge_wait_queue, to be merged
+ # when no other packages are building.
+ self._deep_system_deps = set()
+
+ # Holds packages to merge which will satisfy currently unsatisfied
+ # deep runtime dependencies of system packages. If this is not empty
+ # then no parallel builds will be spawned until it is empty. This
+ # minimizes the possibility that a build will fail due to the system
+ # being in a fragile state. For example, see bug #259954.
+ self._unsatisfied_system_deps = set()
+
+ self._status_display = JobStatusDisplay(
+ xterm_titles=('notitles' not in settings.features))
+ self._max_load = myopts.get("--load-average")
+ max_jobs = myopts.get("--jobs")
+ if max_jobs is None:
+ max_jobs = 1
+ self._set_max_jobs(max_jobs)
+
+ # The root where the currently running
+ # portage instance is installed.
+ self._running_root = trees["/"]["root_config"]
+ self.edebug = 0
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ self.edebug = 1
+ self.pkgsettings = {}
+ self._config_pool = {}
+ for root in self.trees:
+ self._config_pool[root] = []
+
+ self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
+ 'emerge-fetch.log')
+ fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
+ schedule=self._schedule_fetch)
+ self._sched_iface = self._iface_class(
+ fetch=fetch_iface, output=self._task_output,
+ register=self._register,
+ schedule=self._schedule_wait,
+ scheduleSetup=self._schedule_setup,
+ scheduleUnpack=self._schedule_unpack,
+ scheduleYield=self._schedule_yield,
+ unregister=self._unregister)
+
+ self._prefetchers = weakref.WeakValueDictionary()
+ self._pkg_queue = []
+ self._running_tasks = {}
+ self._completed_tasks = set()
+
+ self._failed_pkgs = []
+ self._failed_pkgs_all = []
+ self._failed_pkgs_die_msgs = []
+ self._post_mod_echo_msgs = []
+ self._parallel_fetch = False
+ self._init_graph(graph_config)
+ merge_count = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._pkg_count = self._pkg_count_class(
+ curval=0, maxval=merge_count)
+ self._status_display.maxval = self._pkg_count.maxval
+
+ # The load average takes some time to respond when new
+ # jobs are added, so we need to limit the rate of adding
+ # new jobs.
+ self._job_delay_max = 10
+ self._job_delay_factor = 1.0
+ self._job_delay_exp = 1.5
+ self._previous_job_start_time = None
+
+ # This is used to memoize the _choose_pkg() result when
+ # no packages can be chosen until one of the existing
+ # jobs completes.
+ self._choose_pkg_return_early = False
+
+ features = self.settings.features
+ if "parallel-fetch" in features and \
+ not ("--pretend" in self.myopts or \
+ "--fetch-all-uri" in self.myopts or \
+ "--fetchonly" in self.myopts):
+ if "distlocks" not in features:
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ portage.writemsg(red("!!!")+" parallel-fetching " + \
+ "requires the distlocks feature enabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+" you have it disabled, " + \
+ "thus parallel-fetching is being disabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ elif merge_count > 1:
+ self._parallel_fetch = True
+
+ if self._parallel_fetch:
+ # clear out existing fetch log if it exists
+ try:
+ open(self._fetch_log, 'w')
+ except EnvironmentError:
+ pass
+
+ self._running_portage = None
+ portage_match = self._running_root.trees["vartree"].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM)
+ if portage_match:
+ cpv = portage_match.pop()
+ self._running_portage = self._pkg(cpv, "installed",
+ self._running_root, installed=True)
+
+ def _terminate_tasks(self):
+ self._status_display.quiet = True
+ while self._running_tasks:
+ task_id, task = self._running_tasks.popitem()
+ task.cancel()
+ for q in self._task_queues.values():
+ q.clear()
+
+ def _init_graph(self, graph_config):
+ """
+ Initialization structures used for dependency calculations
+ involving currently installed packages.
+ """
+ self._set_graph_config(graph_config)
+ self._blocker_db = {}
+ for root in self.trees:
+ if graph_config is None:
+ fake_vartree = FakeVartree(self.trees[root]["root_config"],
+ pkg_cache=self._pkg_cache)
+ fake_vartree.sync()
+ else:
+ fake_vartree = graph_config.trees[root]['vartree']
+ self._blocker_db[root] = BlockerDB(fake_vartree)
+
+ def _destroy_graph(self):
+ """
+ Use this to free memory at the beginning of _calc_resume_list().
+ After _calc_resume_list(), the _init_graph() method
+ must to be called in order to re-generate the structures that
+ this method destroys.
+ """
+ self._blocker_db = None
+ self._set_graph_config(None)
+ gc.collect()
+
+ def _poll(self, timeout=None):
+
+ self._schedule()
+
+ if timeout is None:
+ while True:
+ if not self._poll_event_handlers:
+ self._schedule()
+ if not self._poll_event_handlers:
+ raise StopIteration(
+ "timeout is None and there are no poll() event handlers")
+ previous_count = len(self._poll_event_queue)
+ PollScheduler._poll(self, timeout=self._max_display_latency)
+ self._status_display.display()
+ if previous_count != len(self._poll_event_queue):
+ break
+
+ elif timeout <= self._max_display_latency:
+ PollScheduler._poll(self, timeout=timeout)
+ if timeout == 0:
+ # The display is updated by _schedule() above, so it would be
+ # redundant to update it here when timeout is 0.
+ pass
+ else:
+ self._status_display.display()
+
+ else:
+ remaining_timeout = timeout
+ start_time = time.time()
+ while True:
+ previous_count = len(self._poll_event_queue)
+ PollScheduler._poll(self,
+ timeout=min(self._max_display_latency, remaining_timeout))
+ self._status_display.display()
+ if previous_count != len(self._poll_event_queue):
+ break
+ elapsed_time = time.time() - start_time
+ if elapsed_time < 0:
+ # The system clock has changed such that start_time
+ # is now in the future, so just assume that the
+ # timeout has already elapsed.
+ break
+ remaining_timeout = timeout - 1000 * elapsed_time
+ if remaining_timeout <= 0:
+ break
+
+ def _set_max_jobs(self, max_jobs):
+ self._max_jobs = max_jobs
+ self._task_queues.jobs.max_jobs = max_jobs
+ if "parallel-install" in self.settings.features:
+ self._task_queues.merge.max_jobs = max_jobs
+
+ def _background_mode(self):
+ """
+ Check if background mode is enabled and adjust states as necessary.
+
+ @rtype: bool
+ @returns: True if background mode is enabled, False otherwise.
+ """
+ background = (self._max_jobs is True or \
+ self._max_jobs > 1 or "--quiet" in self.myopts \
+ or "--quiet-build" in self.myopts) and \
+ not bool(self._opts_no_background.intersection(self.myopts))
+
+ if background:
+ interactive_tasks = self._get_interactive_tasks()
+ if interactive_tasks:
+ background = False
+ writemsg_level(">>> Sending package output to stdio due " + \
+ "to interactive package(s):\n",
+ level=logging.INFO, noiselevel=-1)
+ msg = [""]
+ for pkg in interactive_tasks:
+ pkg_str = " " + colorize("INFORM", str(pkg.cpv))
+ if pkg.root != "/":
+ pkg_str += " for " + pkg.root
+ msg.append(pkg_str)
+ msg.append("")
+ writemsg_level("".join("%s\n" % (l,) for l in msg),
+ level=logging.INFO, noiselevel=-1)
+ if self._max_jobs is True or self._max_jobs > 1:
+ self._set_max_jobs(1)
+ writemsg_level(">>> Setting --jobs=1 due " + \
+ "to the above interactive package(s)\n",
+ level=logging.INFO, noiselevel=-1)
+ writemsg_level(">>> In order to temporarily mask " + \
+ "interactive updates, you may\n" + \
+ ">>> specify --accept-properties=-interactive\n",
+ level=logging.INFO, noiselevel=-1)
+ self._status_display.quiet = \
+ not background or \
+ ("--quiet" in self.myopts and \
+ "--verbose" not in self.myopts)
+
+ self._logger.xterm_titles = \
+ "notitles" not in self.settings.features and \
+ self._status_display.quiet
+
+ return background
+
+ def _get_interactive_tasks(self):
+ interactive_tasks = []
+ for task in self._mergelist:
+ if not (isinstance(task, Package) and \
+ task.operation == "merge"):
+ continue
+ if 'interactive' in task.metadata.properties:
+ interactive_tasks.append(task)
+ return interactive_tasks
+
+ def _set_graph_config(self, graph_config):
+
+ if graph_config is None:
+ self._graph_config = None
+ self._pkg_cache = {}
+ self._digraph = None
+ self._mergelist = []
+ self._deep_system_deps.clear()
+ return
+
+ self._graph_config = graph_config
+ self._pkg_cache = graph_config.pkg_cache
+ self._digraph = graph_config.graph
+ self._mergelist = graph_config.mergelist
+
+ if "--nodeps" in self.myopts or \
+ (self._max_jobs is not True and self._max_jobs < 2):
+ # save some memory
+ self._digraph = None
+ graph_config.graph = None
+ graph_config.pkg_cache.clear()
+ self._deep_system_deps.clear()
+ for pkg in self._mergelist:
+ self._pkg_cache[pkg] = pkg
+ return
+
+ self._find_system_deps()
+ self._prune_digraph()
+ self._prevent_builddir_collisions()
+ if '--debug' in self.myopts:
+ writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
+ self._digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ def _find_system_deps(self):
+ """
+ Find system packages and their deep runtime dependencies. Before being
+ merged, these packages go to merge_wait_queue, to be merged when no
+ other packages are building.
+ NOTE: This can only find deep system deps if the system set has been
+ added to the graph and traversed deeply (the depgraph "complete"
+ parameter will do this, triggered by emerge --complete-graph option).
+ """
+ deep_system_deps = self._deep_system_deps
+ deep_system_deps.clear()
+ deep_system_deps.update(
+ _find_deep_system_runtime_deps(self._digraph))
+ deep_system_deps.difference_update([pkg for pkg in \
+ deep_system_deps if pkg.operation != "merge"])
+
+ def _prune_digraph(self):
+ """
+ Prune any root nodes that are irrelevant.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+ removed_nodes = set()
+ while True:
+ for node in graph.root_nodes():
+ if not isinstance(node, Package) or \
+ (node.installed and node.operation == "nomerge") or \
+ node.onlydeps or \
+ node in completed_tasks:
+ removed_nodes.add(node)
+ if removed_nodes:
+ graph.difference_update(removed_nodes)
+ if not removed_nodes:
+ break
+ removed_nodes.clear()
+
+ def _prevent_builddir_collisions(self):
+ """
+ When building stages, sometimes the same exact cpv needs to be merged
+ to both $ROOTs. Add edges to the digraph in order to avoid collisions
+ in the builddir. Currently, normal file locks would be inappropriate
+ for this purpose since emerge holds all of it's build dir locks from
+ the main process.
+ """
+ cpv_map = {}
+ for pkg in self._mergelist:
+ if not isinstance(pkg, Package):
+ # a satisfied blocker
+ continue
+ if pkg.installed:
+ continue
+ if pkg.cpv not in cpv_map:
+ cpv_map[pkg.cpv] = [pkg]
+ continue
+ for earlier_pkg in cpv_map[pkg.cpv]:
+ self._digraph.add(earlier_pkg, pkg,
+ priority=DepPriority(buildtime=True))
+ cpv_map[pkg.cpv].append(pkg)
+
+ class _pkg_failure(portage.exception.PortageException):
+ """
+ An instance of this class is raised by unmerge() when
+ an uninstallation fails.
+ """
+ status = 1
+ def __init__(self, *pargs):
+ portage.exception.PortageException.__init__(self, pargs)
+ if pargs:
+ self.status = pargs[0]
+
+ def _schedule_fetch(self, fetcher):
+ """
+ Schedule a fetcher, in order to control the number of concurrent
+ fetchers. If self._max_jobs is greater than 1 then the fetch
+ queue is bypassed and the fetcher is started immediately,
+ otherwise it is added to the front of the parallel-fetch queue.
+ NOTE: The parallel-fetch queue is currently used to serialize
+ access to the parallel-fetch log, so changes in the log handling
+ would be required before it would be possible to enable
+ concurrent fetching within the parallel-fetch queue.
+ """
+ if self._max_jobs > 1:
+ fetcher.start()
+ else:
+ self._task_queues.fetch.addFront(fetcher)
+
+ def _schedule_setup(self, setup_phase):
+ """
+ Schedule a setup phase on the merge queue, in order to
+ serialize unsandboxed access to the live filesystem.
+ """
+ if self._task_queues.merge.max_jobs > 1 and \
+ "ebuild-locks" in self.settings.features:
+ # Use a separate queue for ebuild-locks when the merge
+ # queue allows more than 1 job (due to parallel-install),
+ # since the portage.locks module does not behave as desired
+ # if we try to lock the same file multiple times
+ # concurrently from the same process.
+ self._task_queues.ebuild_locks.add(setup_phase)
+ else:
+ self._task_queues.merge.add(setup_phase)
+ self._schedule()
+
+ def _schedule_unpack(self, unpack_phase):
+ """
+ Schedule an unpack phase on the unpack queue, in order
+ to serialize $DISTDIR access for live ebuilds.
+ """
+ self._task_queues.unpack.add(unpack_phase)
+
+ def _find_blockers(self, new_pkg):
+ """
+ Returns a callable.
+ """
+ def get_blockers():
+ return self._find_blockers_impl(new_pkg)
+ return get_blockers
+
+ def _find_blockers_impl(self, new_pkg):
+ if self._opts_ignore_blockers.intersection(self.myopts):
+ return None
+
+ blocker_db = self._blocker_db[new_pkg.root]
+
+ blocker_dblinks = []
+ for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
+ if new_pkg.slot_atom == blocking_pkg.slot_atom:
+ continue
+ if new_pkg.cpv == blocking_pkg.cpv:
+ continue
+ blocker_dblinks.append(portage.dblink(
+ blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
+ self.pkgsettings[blocking_pkg.root], treetype="vartree",
+ vartree=self.trees[blocking_pkg.root]["vartree"]))
+
+ return blocker_dblinks
+
+ def _generate_digests(self):
+ """
+ Generate digests if necessary for --digests or FEATURES=digest.
+ In order to avoid interference, this must done before parallel
+ tasks are started.
+ """
+
+ if '--fetchonly' in self.myopts:
+ return os.EX_OK
+
+ digest = '--digest' in self.myopts
+ if not digest:
+ for pkgsettings in self.pkgsettings.values():
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if 'digest' in pkgsettings.features:
+ digest = True
+ break
+
+ if not digest:
+ return os.EX_OK
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != 'ebuild' or \
+ x.operation != 'merge':
+ continue
+ pkgsettings = self.pkgsettings[x.root]
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if '--digest' not in self.myopts and \
+ 'digest' not in pkgsettings.features:
+ continue
+ portdb = x.root_config.trees['porttree'].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ pkgsettings['O'] = os.path.dirname(ebuild_path)
+ if not digestgen(mysettings=pkgsettings, myportdb=portdb):
+ writemsg_level(
+ "!!! Unable to generate manifest for '%s'.\n" \
+ % x.cpv, level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ return os.EX_OK
+
+ def _env_sanity_check(self):
+ """
+ Verify a sane environment before trying to build anything from source.
+ """
+ have_src_pkg = False
+ for x in self._mergelist:
+ if isinstance(x, Package) and not x.built:
+ have_src_pkg = True
+ break
+
+ if not have_src_pkg:
+ return os.EX_OK
+
+ for settings in self.pkgsettings.values():
+ for var in ("ARCH", ):
+ value = settings.get(var)
+ if value and value.strip():
+ continue
+ msg = _("%(var)s is not set... "
+ "Are you missing the '%(configroot)setc/make.profile' symlink? "
+ "Is the symlink correct? "
+ "Is your portage tree complete?") % \
+ {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
+
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 70):
+ out.eerror(line)
+ return 1
+
+ return os.EX_OK
+
+ def _check_manifests(self):
+ # Verify all the manifests now so that the user is notified of failure
+ # as soon as possible.
+ if "strict" not in self.settings.features or \
+ "--fetchonly" in self.myopts or \
+ "--fetch-all-uri" in self.myopts:
+ return os.EX_OK
+
+ shown_verifying_msg = False
+ quiet_settings = {}
+ for myroot, pkgsettings in self.pkgsettings.items():
+ quiet_config = portage.config(clone=pkgsettings)
+ quiet_config["PORTAGE_QUIET"] = "1"
+ quiet_config.backup_changes("PORTAGE_QUIET")
+ quiet_settings[myroot] = quiet_config
+ del quiet_config
+
+ failures = 0
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != "ebuild":
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if not shown_verifying_msg:
+ shown_verifying_msg = True
+ self._status_msg("Verifying ebuild manifests")
+
+ root_config = x.root_config
+ portdb = root_config.trees["porttree"].dbapi
+ quiet_config = quiet_settings[root_config.root]
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ quiet_config["O"] = os.path.dirname(ebuild_path)
+ if not digestcheck([], quiet_config, strict=True):
+ failures |= 1
+
+ if failures:
+ return 1
+ return os.EX_OK
+
+ def _add_prefetchers(self):
+
+ if not self._parallel_fetch:
+ return
+
+ if self._parallel_fetch:
+ self._status_msg("Starting parallel fetch")
+
+ prefetchers = self._prefetchers
+ getbinpkg = "--getbinpkg" in self.myopts
+
+ for pkg in self._mergelist:
+ # mergelist can contain solved Blocker instances
+ if not isinstance(pkg, Package) or pkg.operation == "uninstall":
+ continue
+ prefetcher = self._create_prefetcher(pkg)
+ if prefetcher is not None:
+ self._task_queues.fetch.add(prefetcher)
+ prefetchers[pkg] = prefetcher
+
+ # Start the first prefetcher immediately so that self._task()
+ # won't discard it. This avoids a case where the first
+ # prefetcher is discarded, causing the second prefetcher to
+ # occupy the fetch queue before the first fetcher has an
+ # opportunity to execute.
+ self._task_queues.fetch.schedule()
+
+ def _create_prefetcher(self, pkg):
+ """
+ @return: a prefetcher, or None if not applicable
+ """
+ prefetcher = None
+
+ if not isinstance(pkg, Package):
+ pass
+
+ elif pkg.type_name == "ebuild":
+
+ prefetcher = EbuildFetcher(background=True,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ fetchonly=1, logfile=self._fetch_log,
+ pkg=pkg, prefetch=True, scheduler=self._sched_iface)
+
+ elif pkg.type_name == "binary" and \
+ "--getbinpkg" in self.myopts and \
+ pkg.root_config.trees["bintree"].isremote(pkg.cpv):
+
+ prefetcher = BinpkgPrefetcher(background=True,
+ pkg=pkg, scheduler=self._sched_iface)
+
+ return prefetcher
+
+ def _is_restart_scheduled(self):
+ """
+ Check if the merge list contains a replacement
+ for the current running instance, that will result
+ in restart after merge.
+ @rtype: bool
+ @returns: True if a restart is scheduled, False otherwise.
+ """
+ if self._opts_no_restart.intersection(self.myopts):
+ return False
+
+ mergelist = self._mergelist
+
+ for i, pkg in enumerate(mergelist):
+ if self._is_restart_necessary(pkg) and \
+ i != len(mergelist) - 1:
+ return True
+
+ return False
+
+ def _is_restart_necessary(self, pkg):
+ """
+ @return: True if merging the given package
+ requires restart, False otherwise.
+ """
+
+ # Figure out if we need a restart.
+ if pkg.root == self._running_root.root and \
+ portage.match_from_list(
+ portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
+ if self._running_portage is None:
+ return True
+ elif pkg.cpv != self._running_portage.cpv or \
+ '9999' in pkg.cpv or \
+ 'git' in pkg.inherited or \
+ 'git-2' in pkg.inherited:
+ return True
+ return False
+
+ def _restart_if_necessary(self, pkg):
+ """
+ Use execv() to restart emerge. This happens
+ if portage upgrades itself and there are
+ remaining packages in the list.
+ """
+
+ if self._opts_no_restart.intersection(self.myopts):
+ return
+
+ if not self._is_restart_necessary(pkg):
+ return
+
+ if pkg == self._mergelist[-1]:
+ return
+
+ self._main_loop_cleanup()
+
+ logger = self._logger
+ pkg_count = self._pkg_count
+ mtimedb = self._mtimedb
+ bad_resume_opts = self._bad_resume_opts
+
+ logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+ logger.log(" *** RESTARTING " + \
+ "emerge via exec() after change of " + \
+ "portage version.")
+
+ mtimedb["resume"]["mergelist"].remove(list(pkg))
+ mtimedb.commit()
+ portage.run_exitfuncs()
+ # Don't trust sys.argv[0] here because eselect-python may modify it.
+ emerge_binary = os.path.join(portage.const.PORTAGE_BIN_PATH, 'emerge')
+ mynewargv = [emerge_binary, "--resume"]
+ resume_opts = self.myopts.copy()
+ # For automatic resume, we need to prevent
+ # any of bad_resume_opts from leaking in
+ # via EMERGE_DEFAULT_OPTS.
+ resume_opts["--ignore-default-opts"] = True
+ for myopt, myarg in resume_opts.items():
+ if myopt not in bad_resume_opts:
+ if myarg is True:
+ mynewargv.append(myopt)
+ elif isinstance(myarg, list):
+ # arguments like --exclude that use 'append' action
+ for x in myarg:
+ mynewargv.append("%s=%s" % (myopt, x))
+ else:
+ mynewargv.append("%s=%s" % (myopt, myarg))
+ # priority only needs to be adjusted on the first run
+ os.environ["PORTAGE_NICENESS"] = "0"
+ os.execv(mynewargv[0], mynewargv)
+
+ def _run_pkg_pretend(self):
+ """
+ Since pkg_pretend output may be important, this method sends all
+ output directly to stdout (regardless of options like --quiet or
+ --jobs).
+ """
+
+ failures = 0
+
+ # Use a local PollScheduler instance here, since we don't
+ # want tasks here to trigger the usual Scheduler callbacks
+ # that handle job scheduling and status display.
+ sched_iface = PollScheduler().sched_iface
+
+ for x in self._mergelist:
+ if not isinstance(x, Package):
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if x.metadata["EAPI"] in ("0", "1", "2", "3"):
+ continue
+
+ if "pretend" not in x.metadata.defined_phases:
+ continue
+
+ out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
+ portage.util.writemsg_stdout(out_str, noiselevel=-1)
+
+ root_config = x.root_config
+ settings = self.pkgsettings[root_config.root]
+ settings.setcpv(x)
+ tmpdir = tempfile.mkdtemp()
+ tmpdir_orig = settings["PORTAGE_TMPDIR"]
+ settings["PORTAGE_TMPDIR"] = tmpdir
+
+ try:
+ if x.built:
+ tree = "bintree"
+ bintree = root_config.trees["bintree"].dbapi.bintree
+ fetched = False
+
+ # Display fetch on stdout, so that it's always clear what
+ # is consuming time here.
+ if bintree.isremote(x.cpv):
+ fetcher = BinpkgFetcher(pkg=x,
+ scheduler=sched_iface)
+ fetcher.start()
+ if fetcher.wait() != os.EX_OK:
+ failures += 1
+ continue
+ fetched = fetcher.pkg_path
+
+ verifier = BinpkgVerifier(pkg=x,
+ scheduler=sched_iface)
+ verifier.start()
+ if verifier.wait() != os.EX_OK:
+ failures += 1
+ continue
+
+ if fetched:
+ bintree.inject(x.cpv, filename=fetched)
+ tbz2_file = bintree.getname(x.cpv)
+ infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
+ os.makedirs(infloc)
+ portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ else:
+ tree = "porttree"
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ if self._build_opts.buildpkgonly:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+ else:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+
+ portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
+ "pretend", settings=settings,
+ db=self.trees[settings["ROOT"]][tree].dbapi)
+ prepare_build_dirs(root_config.root, settings, cleanup=0)
+
+ vardb = root_config.trees['vartree'].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(x.slot_atom) + \
+ vardb.match('='+x.cpv)))
+ pretend_phase = EbuildPhase(
+ phase="pretend", scheduler=sched_iface,
+ settings=settings)
+
+ pretend_phase.start()
+ ret = pretend_phase.wait()
+ if ret != os.EX_OK:
+ failures += 1
+ portage.elog.elog_process(x.cpv, settings)
+ finally:
+ shutil.rmtree(tmpdir)
+ settings["PORTAGE_TMPDIR"] = tmpdir_orig
+
+ if failures:
+ return 1
+ return os.EX_OK
+
+ def merge(self):
+ if "--resume" in self.myopts:
+ # We're resuming.
+ portage.writemsg_stdout(
+ colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
+ self._logger.log(" *** Resuming merge...")
+
+ self._save_resume_list()
+
+ try:
+ self._background = self._background_mode()
+ except self._unknown_internal_error:
+ return 1
+
+ for root in self.trees:
+ root_config = self.trees[root]["root_config"]
+
+ # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
+ # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
+ # for ensuring sane $PWD (bug #239560) and storing elog messages.
+ tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
+ if not tmpdir or not os.path.isdir(tmpdir):
+ msg = "The directory specified in your " + \
+ "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
+ "does not exist. Please create this " + \
+ "directory or correct your PORTAGE_TMPDIR setting."
+ msg = textwrap.wrap(msg, 70)
+ out = portage.output.EOutput()
+ for l in msg:
+ out.eerror(l)
+ return 1
+
+ if self._background:
+ root_config.settings.unlock()
+ root_config.settings["PORTAGE_BACKGROUND"] = "1"
+ root_config.settings.backup_changes("PORTAGE_BACKGROUND")
+ root_config.settings.lock()
+
+ self.pkgsettings[root] = portage.config(
+ clone=root_config.settings)
+
+ keep_going = "--keep-going" in self.myopts
+ fetchonly = self._build_opts.fetchonly
+ mtimedb = self._mtimedb
+ failed_pkgs = self._failed_pkgs
+
+ rval = self._generate_digests()
+ if rval != os.EX_OK:
+ return rval
+
+ rval = self._env_sanity_check()
+ if rval != os.EX_OK:
+ return rval
+
+ # TODO: Immediately recalculate deps here if --keep-going
+ # is enabled and corrupt manifests are detected.
+ rval = self._check_manifests()
+ if rval != os.EX_OK and not keep_going:
+ return rval
+
+ if not fetchonly:
+ rval = self._run_pkg_pretend()
+ if rval != os.EX_OK:
+ return rval
+
+ while True:
+
+ received_signal = []
+
+ def sighandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
+ {"signal":signum})
+ self.terminate()
+ received_signal.append(128 + signum)
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+
+ try:
+ rval = self._merge()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ if received_signal:
+ sys.exit(received_signal[0])
+
+ if rval == os.EX_OK or fetchonly or not keep_going:
+ break
+ if "resume" not in mtimedb:
+ break
+ mergelist = self._mtimedb["resume"].get("mergelist")
+ if not mergelist:
+ break
+
+ if not failed_pkgs:
+ break
+
+ for failed_pkg in failed_pkgs:
+ mergelist.remove(list(failed_pkg.pkg))
+
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ if not mergelist:
+ break
+
+ if not self._calc_resume_list():
+ break
+
+ clear_caches(self.trees)
+ if not self._mergelist:
+ break
+
+ self._save_resume_list()
+ self._pkg_count.curval = 0
+ self._pkg_count.maxval = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._status_display.maxval = self._pkg_count.maxval
+
+ self._logger.log(" *** Finished. Cleaning up...")
+
+ if failed_pkgs:
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ printer = portage.output.EOutput()
+ background = self._background
+ failure_log_shown = False
+ if background and len(self._failed_pkgs_all) == 1:
+ # If only one package failed then just show it's
+ # whole log for easy viewing.
+ failed_pkg = self._failed_pkgs_all[-1]
+ build_dir = failed_pkg.build_dir
+ log_file = None
+
+ log_paths = [failed_pkg.build_log]
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ try:
+ log_file = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'), mode='rb')
+ except IOError:
+ pass
+ else:
+ if log_path.endswith('.gz'):
+ log_file = gzip.GzipFile(filename='',
+ mode='rb', fileobj=log_file)
+
+ if log_file is not None:
+ try:
+ for line in log_file:
+ writemsg_level(line, noiselevel=-1)
+ except zlib.error as e:
+ writemsg_level("%s\n" % (e,), level=logging.ERROR,
+ noiselevel=-1)
+ finally:
+ log_file.close()
+ failure_log_shown = True
+
+ # Dump mod_echo output now since it tends to flood the terminal.
+ # This allows us to avoid having more important output, generated
+ # later, from being swept away by the mod_echo output.
+ mod_echo_output = _flush_elog_mod_echo()
+
+ if background and not failure_log_shown and \
+ self._failed_pkgs_all and \
+ self._failed_pkgs_die_msgs and \
+ not mod_echo_output:
+
+ for mysettings, key, logentries in self._failed_pkgs_die_msgs:
+ root_msg = ""
+ if mysettings["ROOT"] != "/":
+ root_msg = " merged to %s" % mysettings["ROOT"]
+ print()
+ printer.einfo("Error messages for package %s%s:" % \
+ (colorize("INFORM", key), root_msg))
+ print()
+ for phase in portage.const.EBUILD_PHASES:
+ if phase not in logentries:
+ continue
+ for msgtype, msgcontent in logentries[phase]:
+ if isinstance(msgcontent, basestring):
+ msgcontent = [msgcontent]
+ for line in msgcontent:
+ printer.eerror(line.strip("\n"))
+
+ if self._post_mod_echo_msgs:
+ for msg in self._post_mod_echo_msgs:
+ msg()
+
+ if len(self._failed_pkgs_all) > 1 or \
+ (self._failed_pkgs_all and keep_going):
+ if len(self._failed_pkgs_all) > 1:
+ msg = "The following %d packages have " % \
+ len(self._failed_pkgs_all) + \
+ "failed to build or install:"
+ else:
+ msg = "The following package has " + \
+ "failed to build or install:"
+
+ printer.eerror("")
+ for line in textwrap.wrap(msg, 72):
+ printer.eerror(line)
+ printer.eerror("")
+ for failed_pkg in self._failed_pkgs_all:
+ # Use _unicode_decode() to force unicode format string so
+ # that Package.__unicode__() is called in python2.
+ msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ printer.eerror(msg)
+ if log_path is not None:
+ printer.eerror(" '%s'" % colorize('INFORM', log_path))
+ printer.eerror("")
+
+ if self._failed_pkgs_all:
+ return 1
+ return os.EX_OK
+
+ def _elog_listener(self, mysettings, key, logentries, fulltext):
+ errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
+ if errors:
+ self._failed_pkgs_die_msgs.append(
+ (mysettings, key, errors))
+
+ def _locate_failure_log(self, failed_pkg):
+
+ build_dir = failed_pkg.build_dir
+ log_file = None
+
+ log_paths = [failed_pkg.build_log]
+
+ for log_path in log_paths:
+ if not log_path:
+ continue
+
+ try:
+ log_size = os.stat(log_path).st_size
+ except OSError:
+ continue
+
+ if log_size == 0:
+ continue
+
+ return log_path
+
+ return None
+
+ def _add_packages(self):
+ pkg_queue = self._pkg_queue
+ for pkg in self._mergelist:
+ if isinstance(pkg, Package):
+ pkg_queue.append(pkg)
+ elif isinstance(pkg, Blocker):
+ pass
+
+ def _system_merge_started(self, merge):
+ """
+ Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
+ In general, this keeps track of installed system packages with
+ unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
+ a fragile situation, so we don't execute any unrelated builds until
+ the circular dependencies are built and installed.
+ """
+ graph = self._digraph
+ if graph is None:
+ return
+ pkg = merge.merge.pkg
+
+ # Skip this if $ROOT != / since it shouldn't matter if there
+ # are unsatisfied system runtime deps in this case.
+ if pkg.root != '/':
+ return
+
+ completed_tasks = self._completed_tasks
+ unsatisfied = self._unsatisfied_system_deps
+
+ def ignore_non_runtime_or_satisfied(priority):
+ """
+ Ignore non-runtime and satisfied runtime priorities.
+ """
+ if isinstance(priority, DepPriority) and \
+ not priority.satisfied and \
+ (priority.runtime or priority.runtime_post):
+ return False
+ return True
+
+ # When checking for unsatisfied runtime deps, only check
+ # direct deps since indirect deps are checked when the
+ # corresponding parent is merged.
+ for child in graph.child_nodes(pkg,
+ ignore_priority=ignore_non_runtime_or_satisfied):
+ if not isinstance(child, Package) or \
+ child.operation == 'uninstall':
+ continue
+ if child is pkg:
+ continue
+ if child.operation == 'merge' and \
+ child not in completed_tasks:
+ unsatisfied.add(child)
+
+ def _merge_wait_exit_handler(self, task):
+ self._merge_wait_scheduled.remove(task)
+ self._merge_exit(task)
+
+ def _merge_exit(self, merge):
+ self._running_tasks.pop(id(merge), None)
+ self._do_merge_exit(merge)
+ self._deallocate_config(merge.merge.settings)
+ if merge.returncode == os.EX_OK and \
+ not merge.merge.pkg.installed:
+ self._status_display.curval += 1
+ self._status_display.merges = len(self._task_queues.merge)
+ self._schedule()
+
+ def _do_merge_exit(self, merge):
+ pkg = merge.merge.pkg
+ if merge.returncode != os.EX_OK:
+ settings = merge.merge.settings
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=pkg,
+ returncode=merge.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
+ self._status_display.failed = len(self._failed_pkgs)
+ return
+
+ self._task_complete(pkg)
+ pkg_to_replace = merge.merge.pkg_to_replace
+ if pkg_to_replace is not None:
+ # When a package is replaced, mark it's uninstall
+ # task complete (if any).
+ if self._digraph is not None and \
+ pkg_to_replace in self._digraph:
+ try:
+ self._pkg_queue.remove(pkg_to_replace)
+ except ValueError:
+ pass
+ self._task_complete(pkg_to_replace)
+ else:
+ self._pkg_cache.pop(pkg_to_replace, None)
+
+ if pkg.installed:
+ return
+
+ self._restart_if_necessary(pkg)
+
+ # Call mtimedb.commit() after each merge so that
+ # --resume still works after being interrupted
+ # by reboot, sigkill or similar.
+ mtimedb = self._mtimedb
+ mtimedb["resume"]["mergelist"].remove(list(pkg))
+ if not mtimedb["resume"]["mergelist"]:
+ del mtimedb["resume"]
+ mtimedb.commit()
+
+ def _build_exit(self, build):
+ self._running_tasks.pop(id(build), None)
+ if build.returncode == os.EX_OK and self._terminated_tasks:
+ # We've been interrupted, so we won't
+ # add this to the merge queue.
+ self.curval += 1
+ self._deallocate_config(build.settings)
+ elif build.returncode == os.EX_OK:
+ self.curval += 1
+ merge = PackageMerge(merge=build)
+ self._running_tasks[id(merge)] = merge
+ if not build.build_opts.buildpkgonly and \
+ build.pkg in self._deep_system_deps:
+ # Since dependencies on system packages are frequently
+ # unspecified, merge them only when no builds are executing.
+ self._merge_wait_queue.append(merge)
+ merge.addStartListener(self._system_merge_started)
+ else:
+ merge.addExitListener(self._merge_exit)
+ self._task_queues.merge.add(merge)
+ self._status_display.merges = len(self._task_queues.merge)
+ else:
+ settings = build.settings
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=build.pkg,
+ returncode=build.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
+ self._status_display.failed = len(self._failed_pkgs)
+ self._deallocate_config(build.settings)
+ self._jobs -= 1
+ self._status_display.running = self._jobs
+ self._schedule()
+
+ def _extract_exit(self, build):
+ self._build_exit(build)
+
+ def _task_complete(self, pkg):
+ self._completed_tasks.add(pkg)
+ self._unsatisfied_system_deps.discard(pkg)
+ self._choose_pkg_return_early = False
+ blocker_db = self._blocker_db[pkg.root]
+ blocker_db.discardBlocker(pkg)
+
+ def _merge(self):
+
+ self._add_prefetchers()
+ self._add_packages()
+ pkg_queue = self._pkg_queue
+ failed_pkgs = self._failed_pkgs
+ portage.locks._quiet = self._background
+ portage.elog.add_listener(self._elog_listener)
+ rval = os.EX_OK
+
+ try:
+ self._main_loop()
+ finally:
+ self._main_loop_cleanup()
+ portage.locks._quiet = False
+ portage.elog.remove_listener(self._elog_listener)
+ if failed_pkgs:
+ rval = failed_pkgs[-1].returncode
+
+ return rval
+
+ def _main_loop_cleanup(self):
+ del self._pkg_queue[:]
+ self._completed_tasks.clear()
+ self._deep_system_deps.clear()
+ self._unsatisfied_system_deps.clear()
+ self._choose_pkg_return_early = False
+ self._status_display.reset()
+ self._digraph = None
+ self._task_queues.fetch.clear()
+ self._prefetchers.clear()
+
+ def _choose_pkg(self):
+ """
+ Choose a task that has all its dependencies satisfied. This is used
+ for parallel build scheduling, and ensures that we don't build
+ anything with deep dependencies that have yet to be merged.
+ """
+
+ if self._choose_pkg_return_early:
+ return None
+
+ if self._digraph is None:
+ if self._is_work_scheduled() and \
+ not ("--nodeps" in self.myopts and \
+ (self._max_jobs is True or self._max_jobs > 1)):
+ self._choose_pkg_return_early = True
+ return None
+ return self._pkg_queue.pop(0)
+
+ if not self._is_work_scheduled():
+ return self._pkg_queue.pop(0)
+
+ self._prune_digraph()
+
+ chosen_pkg = None
+
+ # Prefer uninstall operations when available.
+ graph = self._digraph
+ for pkg in self._pkg_queue:
+ if pkg.operation == 'uninstall' and \
+ not graph.child_nodes(pkg):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is None:
+ later = set(self._pkg_queue)
+ for pkg in self._pkg_queue:
+ later.remove(pkg)
+ if not self._dependent_on_scheduled_merges(pkg, later):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is not None:
+ self._pkg_queue.remove(chosen_pkg)
+
+ if chosen_pkg is None:
+ # There's no point in searching for a package to
+ # choose until at least one of the existing jobs
+ # completes.
+ self._choose_pkg_return_early = True
+
+ return chosen_pkg
+
+ def _dependent_on_scheduled_merges(self, pkg, later):
+ """
+ Traverse the subgraph of the given packages deep dependencies
+ to see if it contains any scheduled merges.
+ @param pkg: a package to check dependencies for
+ @type pkg: Package
+ @param later: packages for which dependence should be ignored
+ since they will be merged later than pkg anyway and therefore
+ delaying the merge of pkg will not result in a more optimal
+ merge order
+ @type later: set
+ @rtype: bool
+ @returns: True if the package is dependent, False otherwise.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+
+ dependent = False
+ traversed_nodes = set([pkg])
+ direct_deps = graph.child_nodes(pkg)
+ node_stack = direct_deps
+ direct_deps = frozenset(direct_deps)
+ while node_stack:
+ node = node_stack.pop()
+ if node in traversed_nodes:
+ continue
+ traversed_nodes.add(node)
+ if not ((node.installed and node.operation == "nomerge") or \
+ (node.operation == "uninstall" and \
+ node not in direct_deps) or \
+ node in completed_tasks or \
+ node in later):
+ dependent = True
+ break
+
+ # Don't traverse children of uninstall nodes since
+ # those aren't dependencies in the usual sense.
+ if node.operation != "uninstall":
+ node_stack.extend(graph.child_nodes(node))
+
+ return dependent
+
+ def _allocate_config(self, root):
+ """
+ Allocate a unique config instance for a task in order
+ to prevent interference between parallel tasks.
+ """
+ if self._config_pool[root]:
+ temp_settings = self._config_pool[root].pop()
+ else:
+ temp_settings = portage.config(clone=self.pkgsettings[root])
+ # Since config.setcpv() isn't guaranteed to call config.reset() due to
+ # performance reasons, call it here to make sure all settings from the
+ # previous package get flushed out (such as PORTAGE_LOG_FILE).
+ temp_settings.reload()
+ temp_settings.reset()
+ return temp_settings
+
+ def _deallocate_config(self, settings):
+ self._config_pool[settings["ROOT"]].append(settings)
+
+ def _main_loop(self):
+
+ # Only allow 1 job max if a restart is scheduled
+ # due to portage update.
+ if self._is_restart_scheduled() or \
+ self._opts_no_background.intersection(self.myopts):
+ self._set_max_jobs(1)
+
+ while self._schedule():
+ self._poll_loop()
+
+ while True:
+ self._schedule()
+ if not self._is_work_scheduled():
+ break
+ self._poll_loop()
+
+ def _keep_scheduling(self):
+ return bool(not self._terminated_tasks and self._pkg_queue and \
+ not (self._failed_pkgs and not self._build_opts.fetchonly))
+
+ def _is_work_scheduled(self):
+ return bool(self._running_tasks)
+
+ def _schedule_tasks(self):
+
+ while True:
+
+ # When the number of jobs and merges drops to zero,
+ # process a single merge from _merge_wait_queue if
+ # it's not empty. We only process one since these are
+ # special packages and we want to ensure that
+ # parallel-install does not cause more than one of
+ # them to install at the same time.
+ if (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge):
+ task = self._merge_wait_queue.popleft()
+ task.addExitListener(self._merge_wait_exit_handler)
+ self._task_queues.merge.add(task)
+ self._status_display.merges = len(self._task_queues.merge)
+ self._merge_wait_scheduled.append(task)
+
+ self._schedule_tasks_imp()
+ self._status_display.display()
+
+ state_change = 0
+ for q in self._task_queues.values():
+ if q.schedule():
+ state_change += 1
+
+ # Cancel prefetchers if they're the only reason
+ # the main poll loop is still running.
+ if self._failed_pkgs and not self._build_opts.fetchonly and \
+ not self._is_work_scheduled() and \
+ self._task_queues.fetch:
+ self._task_queues.fetch.clear()
+ state_change += 1
+
+ if not (state_change or \
+ (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge)):
+ break
+
+ return self._keep_scheduling()
+
+ def _job_delay(self):
+ """
+ @rtype: bool
+ @returns: True if job scheduling should be delayed, False otherwise.
+ """
+
+ if self._jobs and self._max_load is not None:
+
+ current_time = time.time()
+
+ delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
+ if delay > self._job_delay_max:
+ delay = self._job_delay_max
+ if (current_time - self._previous_job_start_time) < delay:
+ return True
+
+ return False
+
+ def _schedule_tasks_imp(self):
+ """
+ @rtype: bool
+ @returns: True if state changed, False otherwise.
+ """
+
+ state_change = 0
+
+ while True:
+
+ if not self._keep_scheduling():
+ return bool(state_change)
+
+ if self._choose_pkg_return_early or \
+ self._merge_wait_scheduled or \
+ (self._jobs and self._unsatisfied_system_deps) or \
+ not self._can_add_job() or \
+ self._job_delay():
+ return bool(state_change)
+
+ pkg = self._choose_pkg()
+ if pkg is None:
+ return bool(state_change)
+
+ state_change += 1
+
+ if not pkg.installed:
+ self._pkg_count.curval += 1
+
+ task = self._task(pkg)
+
+ if pkg.installed:
+ merge = PackageMerge(merge=task)
+ self._running_tasks[id(merge)] = merge
+ merge.addExitListener(self._merge_exit)
+ self._task_queues.merge.addFront(merge)
+
+ elif pkg.built:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.addExitListener(self._extract_exit)
+ self._task_queues.jobs.add(task)
+
+ else:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.addExitListener(self._build_exit)
+ self._task_queues.jobs.add(task)
+
+ return bool(state_change)
+
+ def _task(self, pkg):
+
+ pkg_to_replace = None
+ if pkg.operation != "uninstall":
+ vardb = pkg.root_config.trees["vartree"].dbapi
+ previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
+ if portage.cpv_getkey(x) == pkg.cp]
+ if not previous_cpv and vardb.cpv_exists(pkg.cpv):
+ # same cpv, different SLOT
+ previous_cpv = [pkg.cpv]
+ if previous_cpv:
+ previous_cpv = previous_cpv.pop()
+ pkg_to_replace = self._pkg(previous_cpv,
+ "installed", pkg.root_config, installed=True,
+ operation="uninstall")
+
+ prefetcher = self._prefetchers.pop(pkg, None)
+ if prefetcher is not None and not prefetcher.isAlive():
+ try:
+ self._task_queues.fetch._task_queue.remove(prefetcher)
+ except ValueError:
+ pass
+ prefetcher = None
+
+ task = MergeListItem(args_set=self._args_set,
+ background=self._background, binpkg_opts=self._binpkg_opts,
+ build_opts=self._build_opts,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ emerge_opts=self.myopts,
+ find_blockers=self._find_blockers(pkg), logger=self._logger,
+ mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
+ pkg_to_replace=pkg_to_replace,
+ prefetcher=prefetcher,
+ scheduler=self._sched_iface,
+ settings=self._allocate_config(pkg.root),
+ statusMessage=self._status_msg,
+ world_atom=self._world_atom)
+
+ return task
+
+ def _failed_pkg_msg(self, failed_pkg, action, preposition):
+ pkg = failed_pkg.pkg
+ msg = "%s to %s %s" % \
+ (bad("Failed"), action, colorize("INFORM", pkg.cpv))
+ if pkg.root != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ self._status_msg(msg)
+
+ if log_path is not None:
+ self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
+
+ def _status_msg(self, msg):
+ """
+ Display a brief status message (no newlines) in the status display.
+ This is called by tasks to provide feedback to the user. This
+ delegates the resposibility of generating \r and \n control characters,
+ to guarantee that lines are created or erased when necessary and
+ appropriate.
+
+ @type msg: str
+ @param msg: a brief status message (no newlines allowed)
+ """
+ if not self._background:
+ writemsg_level("\n")
+ self._status_display.displayMessage(msg)
+
+ def _save_resume_list(self):
+ """
+ Do this before verifying the ebuild Manifests since it might
+ be possible for the user to use --resume --skipfirst get past
+ a non-essential package with a broken digest.
+ """
+ mtimedb = self._mtimedb
+
+ mtimedb["resume"] = {}
+ # Stored as a dict starting with portage-2.1.6_rc1, and supported
+ # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
+ # a list type for options.
+ mtimedb["resume"]["myopts"] = self.myopts.copy()
+
+ # Convert Atom instances to plain str.
+ mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
+ mtimedb["resume"]["mergelist"] = [list(x) \
+ for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"]
+
+ mtimedb.commit()
+
+ def _calc_resume_list(self):
+ """
+ Use the current resume list to calculate a new one,
+ dropping any packages with unsatisfied deps.
+ @rtype: bool
+ @returns: True if successful, False otherwise.
+ """
+ print(colorize("GOOD", "*** Resuming merge..."))
+
+ # free some memory before creating
+ # the resume depgraph
+ self._destroy_graph()
+
+ myparams = create_depgraph_params(self.myopts, None)
+ success = False
+ e = None
+ try:
+ success, mydepgraph, dropped_tasks = resume_depgraph(
+ self.settings, self.trees, self._mtimedb, self.myopts,
+ myparams, self._spinner)
+ except depgraph.UnsatisfiedResumeDep as exc:
+ # rename variable to avoid python-3.0 error:
+ # SyntaxError: can not delete variable 'e' referenced in nested
+ # scope
+ e = exc
+ mydepgraph = e.depgraph
+ dropped_tasks = set()
+
+ if e is not None:
+ def unsatisfied_resume_dep_msg():
+ mydepgraph.display_problems()
+ out = portage.output.EOutput()
+ out.eerror("One or more packages are either masked or " + \
+ "have missing dependencies:")
+ out.eerror("")
+ indent = " "
+ show_parents = set()
+ for dep in e.value:
+ if dep.parent in show_parents:
+ continue
+ show_parents.add(dep.parent)
+ if dep.atom is None:
+ out.eerror(indent + "Masked package:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ else:
+ out.eerror(indent + str(dep.atom) + " pulled in by:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ msg = "The resume list contains packages " + \
+ "that are either masked or have " + \
+ "unsatisfied dependencies. " + \
+ "Please restart/continue " + \
+ "the operation manually, or use --skipfirst " + \
+ "to skip the first package in the list and " + \
+ "any other packages that may be " + \
+ "masked or have missing dependencies."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+ self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
+ return False
+
+ if success and self._show_list():
+ mylist = mydepgraph.altlist()
+ if mylist:
+ if "--tree" in self.myopts:
+ mylist.reverse()
+ mydepgraph.display(mylist, favorites=self._favorites)
+
+ if not success:
+ self._post_mod_echo_msgs.append(mydepgraph.display_problems)
+ return False
+ mydepgraph.display_problems()
+ self._init_graph(mydepgraph.schedulerGraph())
+
+ msg_width = 75
+ for task in dropped_tasks:
+ if not (isinstance(task, Package) and task.operation == "merge"):
+ continue
+ pkg = task
+ msg = "emerge --keep-going:" + \
+ " %s" % (pkg.cpv,)
+ if pkg.root != "/":
+ msg += " for %s" % (pkg.root,)
+ msg += " dropped due to unsatisfied dependency."
+ for line in textwrap.wrap(msg, msg_width):
+ eerror(line, phase="other", key=pkg.cpv)
+ settings = self.pkgsettings[pkg.root]
+ # Ensure that log collection from $T is disabled inside
+ # elog_process(), since any logs that might exist are
+ # not valid here.
+ settings.pop("T", None)
+ portage.elog.elog_process(pkg.cpv, settings)
+ self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
+
+ return True
+
+ def _show_list(self):
+ myopts = self.myopts
+ if "--quiet" not in myopts and \
+ ("--ask" in myopts or "--tree" in myopts or \
+ "--verbose" in myopts):
+ return True
+ return False
+
+ def _world_atom(self, pkg):
+ """
+ Add or remove the package to the world file, but only if
+ it's supposed to be added or removed. Otherwise, do nothing.
+ """
+
+ if set(("--buildpkgonly", "--fetchonly",
+ "--fetch-all-uri",
+ "--oneshot", "--onlydeps",
+ "--pretend")).intersection(self.myopts):
+ return
+
+ if pkg.root != self.target_root:
+ return
+
+ args_set = self._args_set
+ if not args_set.findAtomForPackage(pkg):
+ return
+
+ logger = self._logger
+ pkg_count = self._pkg_count
+ root_config = pkg.root_config
+ world_set = root_config.sets["selected"]
+ world_locked = False
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
+ try:
+ if hasattr(world_set, "load"):
+ world_set.load() # maybe it's changed on disk
+
+ if pkg.operation == "uninstall":
+ if hasattr(world_set, "cleanPackage"):
+ world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
+ pkg.cpv)
+ if hasattr(world_set, "remove"):
+ for s in pkg.root_config.setconfig.active:
+ world_set.remove(SETPREFIX+s)
+ else:
+ atom = create_world_atom(pkg, args_set, root_config)
+ if atom:
+ if hasattr(world_set, "add"):
+ self._status_msg(('Recording %s in "world" ' + \
+ 'favorites file...') % atom)
+ logger.log(" === (%s of %s) Updating world file (%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv))
+ world_set.add(atom)
+ else:
+ writemsg_level('\n!!! Unable to record %s in "world"\n' % \
+ (atom,), level=logging.WARN, noiselevel=-1)
+ finally:
+ if world_locked:
+ world_set.unlock()
+
+ def _pkg(self, cpv, type_name, root_config, installed=False,
+ operation=None, myrepo=None):
+ """
+ Get a package instance from the cache, or create a new
+ one if necessary. Raises KeyError from aux_get if it
+ failures for some reason (package does not exist or is
+ corrupt).
+ """
+
+ # Reuse existing instance when available.
+ pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
+ type_name=type_name, repo_name=myrepo, root_config=root_config,
+ installed=installed, operation=operation))
+
+ if pkg is not None:
+ return pkg
+
+ tree_type = depgraph.pkg_tree_map[type_name]
+ db = root_config.trees[tree_type].dbapi
+ db_keys = list(self.trees[root_config.root][
+ tree_type].dbapi._aux_cache_keys)
+ metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+ pkg = Package(built=(type_name != "ebuild"),
+ cpv=cpv, installed=installed, metadata=metadata,
+ root_config=root_config, type_name=type_name)
+ self._pkg_cache[pkg] = pkg
+ return pkg
diff --git a/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
new file mode 100644
index 0000000..c1c98c4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
@@ -0,0 +1,89 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from _emerge.SlotObject import SlotObject
+from collections import deque
+class SequentialTaskQueue(SlotObject):
+
+ __slots__ = ("max_jobs", "running_tasks") + \
+ ("_dirty", "_scheduling", "_task_queue")
+
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self._task_queue = deque()
+ self.running_tasks = set()
+ if self.max_jobs is None:
+ self.max_jobs = 1
+ self._dirty = True
+
+ def add(self, task):
+ self._task_queue.append(task)
+ self._dirty = True
+
+ def addFront(self, task):
+ self._task_queue.appendleft(task)
+ self._dirty = True
+
+ def schedule(self):
+
+ if not self._dirty:
+ return False
+
+ if not self:
+ return False
+
+ if self._scheduling:
+ # Ignore any recursive schedule() calls triggered via
+ # self._task_exit().
+ return False
+
+ self._scheduling = True
+
+ task_queue = self._task_queue
+ running_tasks = self.running_tasks
+ max_jobs = self.max_jobs
+ state_changed = False
+
+ while task_queue and \
+ (max_jobs is True or len(running_tasks) < max_jobs):
+ task = task_queue.popleft()
+ cancelled = getattr(task, "cancelled", None)
+ if not cancelled:
+ running_tasks.add(task)
+ task.addExitListener(self._task_exit)
+ task.start()
+ state_changed = True
+
+ self._dirty = False
+ self._scheduling = False
+
+ return state_changed
+
+ def _task_exit(self, task):
+ """
+ Since we can always rely on exit listeners being called, the set of
+ running tasks is always pruned automatically and there is never any need
+ to actively prune it.
+ """
+ self.running_tasks.remove(task)
+ if self._task_queue:
+ self._dirty = True
+
+ def clear(self):
+ self._task_queue.clear()
+ running_tasks = self.running_tasks
+ while running_tasks:
+ task = running_tasks.pop()
+ task.removeExitListener(self._task_exit)
+ task.cancel()
+ self._dirty = False
+
+ def __bool__(self):
+ return bool(self._task_queue or self.running_tasks)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __len__(self):
+ return len(self._task_queue) + len(self.running_tasks)
diff --git a/portage_with_autodep/pym/_emerge/SetArg.py b/portage_with_autodep/pym/_emerge/SetArg.py
new file mode 100644
index 0000000..94cf0a6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SetArg.py
@@ -0,0 +1,11 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DependencyArg import DependencyArg
+from portage._sets import SETPREFIX
+class SetArg(DependencyArg):
+ def __init__(self, pset=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.pset = pset
+ self.name = self.arg[len(SETPREFIX):]
+
diff --git a/portage_with_autodep/pym/_emerge/SlotObject.py b/portage_with_autodep/pym/_emerge/SlotObject.py
new file mode 100644
index 0000000..fdc6f35
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SlotObject.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class SlotObject(object):
+ __slots__ = ("__weakref__",)
+
+ def __init__(self, **kwargs):
+ classes = [self.__class__]
+ while classes:
+ c = classes.pop()
+ if c is SlotObject:
+ continue
+ classes.extend(c.__bases__)
+ slots = getattr(c, "__slots__", None)
+ if not slots:
+ continue
+ for myattr in slots:
+ myvalue = kwargs.get(myattr, None)
+ setattr(self, myattr, myvalue)
+
+ def copy(self):
+ """
+ Create a new instance and copy all attributes
+ defined from __slots__ (including those from
+ inherited classes).
+ """
+ obj = self.__class__()
+
+ classes = [self.__class__]
+ while classes:
+ c = classes.pop()
+ if c is SlotObject:
+ continue
+ classes.extend(c.__bases__)
+ slots = getattr(c, "__slots__", None)
+ if not slots:
+ continue
+ for myattr in slots:
+ setattr(obj, myattr, getattr(self, myattr))
+
+ return obj
+
diff --git a/portage_with_autodep/pym/_emerge/SpawnProcess.py b/portage_with_autodep/pym/_emerge/SpawnProcess.py
new file mode 100644
index 0000000..b72971c
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SpawnProcess.py
@@ -0,0 +1,235 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+import sys
+from portage.cache.mappings import slot_dict_class
+import portage
+from portage import _encodings
+from portage import _unicode_encode
+from portage import os
+from portage.const import BASH_BINARY
+import fcntl
+import errno
+import gzip
+
+class SpawnProcess(SubProcess):
+
+ """
+ Constructor keyword args are passed into portage.process.spawn().
+ The required "args" keyword argument will be passed as the first
+ spawn() argument.
+ """
+
+ _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
+ "uid", "gid", "groups", "umask", "logfile",
+ "path_lookup", "pre_exec")
+
+ __slots__ = ("args",) + \
+ _spawn_kwarg_names + ("_selinux_type",)
+
+ _file_names = ("log", "process", "stdout")
+ _files_dict = slot_dict_class(_file_names, prefix="")
+
+ def _start(self):
+
+ if self.cancelled:
+ return
+
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ fd_pipes = self.fd_pipes
+ fd_pipes.setdefault(0, sys.stdin.fileno())
+ fd_pipes.setdefault(1, sys.stdout.fileno())
+ fd_pipes.setdefault(2, sys.stderr.fileno())
+
+ # flush any pending output
+ for fd in fd_pipes.values():
+ if fd == sys.stdout.fileno():
+ sys.stdout.flush()
+ if fd == sys.stderr.fileno():
+ sys.stderr.flush()
+
+ self._files = self._files_dict()
+ files = self._files
+
+ master_fd, slave_fd = self._pipe(fd_pipes)
+ fcntl.fcntl(master_fd, fcntl.F_SETFL,
+ fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ logfile = None
+ if self._can_log(slave_fd):
+ logfile = self.logfile
+
+ null_input = None
+ fd_pipes_orig = fd_pipes.copy()
+ if self.background:
+ # TODO: Use job control functions like tcsetpgrp() to control
+ # access to stdin. Until then, use /dev/null so that any
+ # attempts to read from stdin will immediately return EOF
+ # instead of blocking indefinitely.
+ null_input = open('/dev/null', 'rb')
+ fd_pipes[0] = null_input.fileno()
+ else:
+ fd_pipes[0] = fd_pipes_orig[0]
+
+ # WARNING: It is very important to use unbuffered mode here,
+ # in order to avoid issue 5380 with python3.
+ files.process = os.fdopen(master_fd, 'rb', 0)
+ if logfile is not None:
+
+ fd_pipes[1] = slave_fd
+ fd_pipes[2] = slave_fd
+
+ files.log = open(_unicode_encode(logfile,
+ encoding=_encodings['fs'], errors='strict'), mode='ab')
+ if logfile.endswith('.gz'):
+ files.log = gzip.GzipFile(filename='', mode='ab',
+ fileobj=files.log)
+
+ portage.util.apply_secpass_permissions(logfile,
+ uid=portage.portage_uid, gid=portage.portage_gid,
+ mode=0o660)
+
+ if not self.background:
+ files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
+
+ output_handler = self._output_handler
+
+ else:
+
+ # Create a dummy pipe so the scheduler can monitor
+ # the process from inside a poll() loop.
+ fd_pipes[self._dummy_pipe_fd] = slave_fd
+ if self.background:
+ fd_pipes[1] = slave_fd
+ fd_pipes[2] = slave_fd
+ output_handler = self._dummy_handler
+
+ kwargs = {}
+ for k in self._spawn_kwarg_names:
+ v = getattr(self, k)
+ if v is not None:
+ kwargs[k] = v
+
+ kwargs["fd_pipes"] = fd_pipes
+ kwargs["returnpid"] = True
+ kwargs.pop("logfile", None)
+
+ self._reg_id = self.scheduler.register(files.process.fileno(),
+ self._registered_events, output_handler)
+ self._registered = True
+
+ retval = self._spawn(self.args, **kwargs)
+
+ os.close(slave_fd)
+ if null_input is not None:
+ null_input.close()
+
+ if isinstance(retval, int):
+ # spawn failed
+ self._unregister()
+ self._set_returncode((self.pid, retval))
+ self.wait()
+ return
+
+ self.pid = retval[0]
+ portage.process.spawned_pids.remove(self.pid)
+
+ def _can_log(self, slave_fd):
+ return True
+
+ def _pipe(self, fd_pipes):
+ """
+ @type fd_pipes: dict
+ @param fd_pipes: pipes from which to copy terminal size if desired.
+ """
+ return os.pipe()
+
+ def _spawn(self, args, **kwargs):
+ spawn_func = portage.process.spawn
+
+ if self._selinux_type is not None:
+ spawn_func = portage.selinux.spawn_wrapper(spawn_func,
+ self._selinux_type)
+ # bash is an allowed entrypoint, while most binaries are not
+ if args[0] != BASH_BINARY:
+ args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
+
+ return spawn_func(args, **kwargs)
+
+ def _output_handler(self, fd, event):
+
+ files = self._files
+ buf = self._read_buf(files.process, event)
+
+ if buf is not None:
+
+ if buf:
+ if not self.background:
+ write_successful = False
+ failures = 0
+ while True:
+ try:
+ if not write_successful:
+ buf.tofile(files.stdout)
+ write_successful = True
+ files.stdout.flush()
+ break
+ except IOError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ del e
+ failures += 1
+ if failures > 50:
+ # Avoid a potentially infinite loop. In
+ # most cases, the failure count is zero
+ # and it's unlikely to exceed 1.
+ raise
+
+ # This means that a subprocess has put an inherited
+ # stdio file descriptor (typically stdin) into
+ # O_NONBLOCK mode. This is not acceptable (see bug
+ # #264435), so revert it. We need to use a loop
+ # here since there's a race condition due to
+ # parallel processes being able to change the
+ # flags on the inherited file descriptor.
+ # TODO: When possible, avoid having child processes
+ # inherit stdio file descriptors from portage
+ # (maybe it can't be avoided with
+ # PROPERTIES=interactive).
+ fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
+ fcntl.fcntl(files.stdout.fileno(),
+ fcntl.F_GETFL) ^ os.O_NONBLOCK)
+
+ try:
+ buf.tofile(files.log)
+ except TypeError:
+ # array.tofile() doesn't work with GzipFile
+ files.log.write(buf.tostring())
+ files.log.flush()
+ else:
+ self._unregister()
+ self.wait()
+
+ self._unregister_if_appropriate(event)
+
+ def _dummy_handler(self, fd, event):
+ """
+ This method is mainly interested in detecting EOF, since
+ the only purpose of the pipe is to allow the scheduler to
+ monitor the process from inside a poll() loop.
+ """
+
+ buf = self._read_buf(self._files.process, event)
+
+ if buf is not None:
+
+ if buf:
+ pass
+ else:
+ self._unregister()
+ self.wait()
+
+ self._unregister_if_appropriate(event)
+
diff --git a/portage_with_autodep/pym/_emerge/SubProcess.py b/portage_with_autodep/pym/_emerge/SubProcess.py
new file mode 100644
index 0000000..b99cf0b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SubProcess.py
@@ -0,0 +1,141 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+import signal
+import errno
+
+class SubProcess(AbstractPollTask):
+
+ __slots__ = ("pid",) + \
+ ("_files", "_reg_id")
+
+ # A file descriptor is required for the scheduler to monitor changes from
+ # inside a poll() loop. When logging is not enabled, create a pipe just to
+ # serve this purpose alone.
+ _dummy_pipe_fd = 9
+
+ def _poll(self):
+ if self.returncode is not None:
+ return self.returncode
+ if self.pid is None:
+ return self.returncode
+ if self._registered:
+ return self.returncode
+
+ try:
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ retval = os.waitpid(self.pid, os.WNOHANG)
+ except OSError as e:
+ if e.errno != errno.ECHILD:
+ raise
+ del e
+ retval = (self.pid, 1)
+
+ if retval[0] == 0:
+ return None
+ self._set_returncode(retval)
+ self.wait()
+ return self.returncode
+
+ def _cancel(self):
+ if self.isAlive():
+ try:
+ os.kill(self.pid, signal.SIGTERM)
+ except OSError as e:
+ if e.errno != errno.ESRCH:
+ raise
+
+ def isAlive(self):
+ return self.pid is not None and \
+ self.returncode is None
+
+ def _wait(self):
+
+ if self.returncode is not None:
+ return self.returncode
+
+ if self._registered:
+ if self.cancelled:
+ timeout = 1000
+ self.scheduler.schedule(self._reg_id, timeout=timeout)
+ if self._registered:
+ try:
+ os.kill(self.pid, signal.SIGKILL)
+ except OSError as e:
+ if e.errno != errno.ESRCH:
+ raise
+ del e
+ self.scheduler.schedule(self._reg_id, timeout=timeout)
+ if self._registered:
+ self._orphan_process_warn()
+ else:
+ self.scheduler.schedule(self._reg_id)
+ self._unregister()
+ if self.returncode is not None:
+ return self.returncode
+
+ try:
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ wait_retval = os.waitpid(self.pid, os.WNOHANG)
+ except OSError as e:
+ if e.errno != errno.ECHILD:
+ raise
+ del e
+ self._set_returncode((self.pid, 1 << 8))
+ else:
+ if wait_retval[0] != 0:
+ self._set_returncode(wait_retval)
+ else:
+ try:
+ wait_retval = os.waitpid(self.pid, 0)
+ except OSError as e:
+ if e.errno != errno.ECHILD:
+ raise
+ del e
+ self._set_returncode((self.pid, 1 << 8))
+ else:
+ self._set_returncode(wait_retval)
+
+ return self.returncode
+
+ def _orphan_process_warn(self):
+ pass
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self._reg_id is not None:
+ self.scheduler.unregister(self._reg_id)
+ self._reg_id = None
+
+ if self._files is not None:
+ for f in self._files.values():
+ f.close()
+ self._files = None
+
+ def _set_returncode(self, wait_retval):
+ """
+ Set the returncode in a manner compatible with
+ subprocess.Popen.returncode: A negative value -N indicates
+ that the child was terminated by signal N (Unix only).
+ """
+
+ pid, status = wait_retval
+
+ if os.WIFSIGNALED(status):
+ retval = - os.WTERMSIG(status)
+ else:
+ retval = os.WEXITSTATUS(status)
+
+ self.returncode = retval
+
diff --git a/portage_with_autodep/pym/_emerge/Task.py b/portage_with_autodep/pym/_emerge/Task.py
new file mode 100644
index 0000000..efbe3a9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Task.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SlotObject import SlotObject
+class Task(SlotObject):
+ __slots__ = ("_hash_key", "_hash_value")
+
+ def __eq__(self, other):
+ try:
+ return self._hash_key == other._hash_key
+ except AttributeError:
+ # depgraph._pkg() generates _hash_key
+ # for lookups here, so handle that
+ return self._hash_key == other
+
+ def __ne__(self, other):
+ try:
+ return self._hash_key != other._hash_key
+ except AttributeError:
+ return True
+
+ def __hash__(self):
+ return self._hash_value
+
+ def __len__(self):
+ return len(self._hash_key)
+
+ def __getitem__(self, key):
+ return self._hash_key[key]
+
+ def __iter__(self):
+ return iter(self._hash_key)
+
+ def __contains__(self, key):
+ return key in self._hash_key
+
+ def __str__(self):
+ """
+ Emulate tuple.__repr__, but don't show 'foo' as u'foo' for unicode
+ strings.
+ """
+ return "(%s)" % ", ".join(("'%s'" % x for x in self._hash_key))
diff --git a/portage_with_autodep/pym/_emerge/TaskScheduler.py b/portage_with_autodep/pym/_emerge/TaskScheduler.py
new file mode 100644
index 0000000..83c0cbe
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/TaskScheduler.py
@@ -0,0 +1,25 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.QueueScheduler import QueueScheduler
+from _emerge.SequentialTaskQueue import SequentialTaskQueue
+
+class TaskScheduler(object):
+
+ """
+ A simple way to handle scheduling of AsynchrousTask instances. Simply
+ add tasks and call run(). The run() method returns when no tasks remain.
+ """
+
+ def __init__(self, max_jobs=None, max_load=None):
+ self._queue = SequentialTaskQueue(max_jobs=max_jobs)
+ self._scheduler = QueueScheduler(
+ max_jobs=max_jobs, max_load=max_load)
+ self.sched_iface = self._scheduler.sched_iface
+ self.run = self._scheduler.run
+ self.clear = self._scheduler.clear
+ self._scheduler.add(self._queue)
+
+ def add(self, task):
+ self._queue.add(task)
+
diff --git a/portage_with_autodep/pym/_emerge/TaskSequence.py b/portage_with_autodep/pym/_emerge/TaskSequence.py
new file mode 100644
index 0000000..1fecf63
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/TaskSequence.py
@@ -0,0 +1,44 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.CompositeTask import CompositeTask
+from _emerge.AsynchronousTask import AsynchronousTask
+from collections import deque
+
+class TaskSequence(CompositeTask):
+ """
+ A collection of tasks that executes sequentially. Each task
+ must have a addExitListener() method that can be used as
+ a means to trigger movement from one task to the next.
+ """
+
+ __slots__ = ("_task_queue",)
+
+ def __init__(self, **kwargs):
+ AsynchronousTask.__init__(self, **kwargs)
+ self._task_queue = deque()
+
+ def add(self, task):
+ self._task_queue.append(task)
+
+ def _start(self):
+ self._start_next_task()
+
+ def _cancel(self):
+ self._task_queue.clear()
+ CompositeTask._cancel(self)
+
+ def _start_next_task(self):
+ self._start_task(self._task_queue.popleft(),
+ self._task_exit_handler)
+
+ def _task_exit_handler(self, task):
+ if self._default_exit(task) != os.EX_OK:
+ self.wait()
+ elif self._task_queue:
+ self._start_next_task()
+ else:
+ self._final_exit(task)
+ self.wait()
+
diff --git a/portage_with_autodep/pym/_emerge/UninstallFailure.py b/portage_with_autodep/pym/_emerge/UninstallFailure.py
new file mode 100644
index 0000000..e4f2834
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UninstallFailure.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+
+class UninstallFailure(portage.exception.PortageException):
+ """
+ An instance of this class is raised by unmerge() when
+ an uninstallation fails.
+ """
+ status = 1
+ def __init__(self, *pargs):
+ portage.exception.PortageException.__init__(self, pargs)
+ if pargs:
+ self.status = pargs[0]
diff --git a/portage_with_autodep/pym/_emerge/UnmergeDepPriority.py b/portage_with_autodep/pym/_emerge/UnmergeDepPriority.py
new file mode 100644
index 0000000..4316600
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UnmergeDepPriority.py
@@ -0,0 +1,41 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractDepPriority import AbstractDepPriority
+class UnmergeDepPriority(AbstractDepPriority):
+ __slots__ = ("ignored", "optional", "satisfied",)
+ """
+ Combination of properties Priority Category
+
+ runtime 0 HARD
+ runtime_post -1 HARD
+ buildtime -2 SOFT
+ (none of the above) -2 SOFT
+ """
+
+ MAX = 0
+ SOFT = -2
+ MIN = -2
+
+ def __init__(self, **kwargs):
+ AbstractDepPriority.__init__(self, **kwargs)
+ if self.buildtime:
+ self.optional = True
+
+ def __int__(self):
+ if self.runtime:
+ return 0
+ if self.runtime_post:
+ return -1
+ if self.buildtime:
+ return -2
+ return -2
+
+ def __str__(self):
+ if self.ignored:
+ return "ignored"
+ myvalue = self.__int__()
+ if myvalue > self.SOFT:
+ return "hard"
+ return "soft"
+
diff --git a/portage_with_autodep/pym/_emerge/UseFlagDisplay.py b/portage_with_autodep/pym/_emerge/UseFlagDisplay.py
new file mode 100644
index 0000000..3daca19
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UseFlagDisplay.py
@@ -0,0 +1,122 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import chain
+import sys
+
+from portage import _encodings, _unicode_decode, _unicode_encode
+from portage.output import red
+from portage.util import cmp_sort_key
+from portage.output import blue
+
+class UseFlagDisplay(object):
+
+ __slots__ = ('name', 'enabled', 'forced')
+
+ def __init__(self, name, enabled, forced):
+ self.name = name
+ self.enabled = enabled
+ self.forced = forced
+
+ def __str__(self):
+ s = self.name
+ if self.enabled:
+ s = red(s)
+ else:
+ s = '-' + s
+ s = blue(s)
+ if self.forced:
+ s = '(%s)' % s
+ return s
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+ def _cmp_combined(a, b):
+ """
+ Sort by name, combining enabled and disabled flags.
+ """
+ return (a.name > b.name) - (a.name < b.name)
+
+ sort_combined = cmp_sort_key(_cmp_combined)
+ del _cmp_combined
+
+ def _cmp_separated(a, b):
+ """
+ Sort by name, separating enabled flags from disabled flags.
+ """
+ enabled_diff = b.enabled - a.enabled
+ if enabled_diff:
+ return enabled_diff
+ return (a.name > b.name) - (a.name < b.name)
+
+ sort_separated = cmp_sort_key(_cmp_separated)
+ del _cmp_separated
+
+def pkg_use_display(pkg, opts, modified_use=None):
+ settings = pkg.root_config.settings
+ use_expand = pkg.use.expand
+ use_expand_hidden = pkg.use.expand_hidden
+ alphabetical_use = '--alphabetical' in opts
+ forced_flags = set(chain(pkg.use.force,
+ pkg.use.mask))
+ if modified_use is None:
+ use = set(pkg.use.enabled)
+ else:
+ use = set(modified_use)
+ use.discard(settings.get('ARCH'))
+ use_expand_flags = set()
+ use_enabled = {}
+ use_disabled = {}
+ for varname in use_expand:
+ flag_prefix = varname.lower() + "_"
+ for f in use:
+ if f.startswith(flag_prefix):
+ use_expand_flags.add(f)
+ use_enabled.setdefault(
+ varname.upper(), []).append(f[len(flag_prefix):])
+
+ for f in pkg.iuse.all:
+ if f.startswith(flag_prefix):
+ use_expand_flags.add(f)
+ if f not in use:
+ use_disabled.setdefault(
+ varname.upper(), []).append(f[len(flag_prefix):])
+
+ var_order = set(use_enabled)
+ var_order.update(use_disabled)
+ var_order = sorted(var_order)
+ var_order.insert(0, 'USE')
+ use.difference_update(use_expand_flags)
+ use_enabled['USE'] = list(use)
+ use_disabled['USE'] = []
+
+ for f in pkg.iuse.all:
+ if f not in use and \
+ f not in use_expand_flags:
+ use_disabled['USE'].append(f)
+
+ flag_displays = []
+ for varname in var_order:
+ if varname.lower() in use_expand_hidden:
+ continue
+ flags = []
+ for f in use_enabled.get(varname, []):
+ flags.append(UseFlagDisplay(f, True, f in forced_flags))
+ for f in use_disabled.get(varname, []):
+ flags.append(UseFlagDisplay(f, False, f in forced_flags))
+ if alphabetical_use:
+ flags.sort(key=UseFlagDisplay.sort_combined)
+ else:
+ flags.sort(key=UseFlagDisplay.sort_separated)
+ # Use _unicode_decode() to force unicode format string so
+ # that UseFlagDisplay.__unicode__() is called in python2.
+ flag_displays.append('%s="%s"' % (varname,
+ ' '.join(_unicode_decode("%s") % (f,) for f in flags)))
+
+ return ' '.join(flag_displays)
diff --git a/portage_with_autodep/pym/_emerge/__init__.py b/portage_with_autodep/pym/_emerge/__init__.py
new file mode 100644
index 0000000..f98c564
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py b/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py
new file mode 100644
index 0000000..ca09d83
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py
@@ -0,0 +1,38 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+from _emerge.Package import Package
+
+def _find_deep_system_runtime_deps(graph):
+ deep_system_deps = set()
+ node_stack = []
+ for node in graph:
+ if not isinstance(node, Package) or \
+ node.operation == 'uninstall':
+ continue
+ if node.root_config.sets['system'].findAtomForPackage(node):
+ node_stack.append(node)
+
+ def ignore_priority(priority):
+ """
+ Ignore non-runtime priorities.
+ """
+ if isinstance(priority, DepPriority) and \
+ (priority.runtime or priority.runtime_post):
+ return False
+ return True
+
+ while node_stack:
+ node = node_stack.pop()
+ if node in deep_system_deps:
+ continue
+ deep_system_deps.add(node)
+ for child in graph.child_nodes(node, ignore_priority=ignore_priority):
+ if not isinstance(child, Package) or \
+ child.operation == 'uninstall':
+ continue
+ node_stack.append(child)
+
+ return deep_system_deps
+
diff --git a/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
new file mode 100644
index 0000000..eab4168
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.elog import mod_echo
+
+def _flush_elog_mod_echo():
+ """
+ Dump the mod_echo output now so that our other
+ notifications are shown last.
+ @rtype: bool
+ @returns: True if messages were shown, False otherwise.
+ """
+ messages_shown = bool(mod_echo._items)
+ mod_echo.finalize()
+ return messages_shown
diff --git a/portage_with_autodep/pym/_emerge/actions.py b/portage_with_autodep/pym/_emerge/actions.py
new file mode 100644
index 0000000..2166963
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/actions.py
@@ -0,0 +1,3123 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import errno
+import logging
+import platform
+import pwd
+import random
+import re
+import shutil
+import signal
+import socket
+import stat
+import sys
+import tempfile
+import textwrap
+import time
+from itertools import chain
+
+import portage
+from portage import os
+from portage import subprocess_getstatusoutput
+from portage import _unicode_decode
+from portage.cache.cache_errors import CacheError
+from portage.const import GLOBAL_CONFIG_PATH, NEWS_LIB_PATH
+from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_SET_CONFIG
+from portage.dbapi.dep_expand import dep_expand
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage.dep import Atom, extended_cp_match
+from portage.exception import InvalidAtom
+from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
+ red, yellow
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage._sets import load_default_config, SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import cmp_sort_key, writemsg, \
+ writemsg_level, writemsg_stdout
+from portage.util.digraph import digraph
+from portage._global_updates import _global_updates
+
+from _emerge.clear_caches import clear_caches
+from _emerge.countdown import countdown
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.Dependency import Dependency
+from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.emergelog import emergelog
+from _emerge.is_valid_package_atom import is_valid_package_atom
+from _emerge.MetadataRegen import MetadataRegen
+from _emerge.Package import Package
+from _emerge.ProgressHandler import ProgressHandler
+from _emerge.RootConfig import RootConfig
+from _emerge.Scheduler import Scheduler
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.sync.getaddrinfo_validate import getaddrinfo_validate
+from _emerge.sync.old_tree_timestamp import old_tree_timestamp_warn
+from _emerge.unmerge import unmerge
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import pkg_use_display
+from _emerge.userquery import userquery
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+def action_build(settings, trees, mtimedb,
+ myopts, myaction, myfiles, spinner):
+
+ if '--usepkgonly' not in myopts:
+ old_tree_timestamp_warn(settings['PORTDIR'], settings)
+
+ # It's best for config updates in /etc/portage to be processed
+ # before we get here, so warn if they're not (bug #267103).
+ chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
+
+ # validate the state of the resume data
+ # so that we can make assumptions later.
+ for k in ("resume", "resume_backup"):
+ if k not in mtimedb:
+ continue
+ resume_data = mtimedb[k]
+ if not isinstance(resume_data, dict):
+ del mtimedb[k]
+ continue
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ del mtimedb[k]
+ continue
+ for x in mergelist:
+ if not (isinstance(x, list) and len(x) == 4):
+ continue
+ pkg_type, pkg_root, pkg_key, pkg_action = x
+ if pkg_root not in trees:
+ # Current $ROOT setting differs,
+ # so the list must be stale.
+ mergelist = None
+ break
+ if not mergelist:
+ del mtimedb[k]
+ continue
+ resume_opts = resume_data.get("myopts")
+ if not isinstance(resume_opts, (dict, list)):
+ del mtimedb[k]
+ continue
+ favorites = resume_data.get("favorites")
+ if not isinstance(favorites, list):
+ del mtimedb[k]
+ continue
+
+ resume = False
+ if "--resume" in myopts and \
+ ("resume" in mtimedb or
+ "resume_backup" in mtimedb):
+ resume = True
+ if "resume" not in mtimedb:
+ mtimedb["resume"] = mtimedb["resume_backup"]
+ del mtimedb["resume_backup"]
+ mtimedb.commit()
+ # "myopts" is a list for backward compatibility.
+ resume_opts = mtimedb["resume"].get("myopts", [])
+ if isinstance(resume_opts, list):
+ resume_opts = dict((k,True) for k in resume_opts)
+ for opt in ("--ask", "--color", "--skipfirst", "--tree"):
+ resume_opts.pop(opt, None)
+
+ # Current options always override resume_opts.
+ resume_opts.update(myopts)
+ myopts.clear()
+ myopts.update(resume_opts)
+
+ if "--debug" in myopts:
+ writemsg_level("myopts %s\n" % (myopts,))
+
+ # Adjust config according to options of the command being resumed.
+ for myroot in trees:
+ mysettings = trees[myroot]["vartree"].settings
+ mysettings.unlock()
+ adjust_config(myopts, mysettings)
+ mysettings.lock()
+ del myroot, mysettings
+
+ ldpath_mtimes = mtimedb["ldpath"]
+ favorites=[]
+ buildpkgonly = "--buildpkgonly" in myopts
+ pretend = "--pretend" in myopts
+ fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
+ ask = "--ask" in myopts
+ enter_invalid = '--ask-enter-invalid' in myopts
+ nodeps = "--nodeps" in myopts
+ oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
+ tree = "--tree" in myopts
+ if nodeps and tree:
+ tree = False
+ del myopts["--tree"]
+ portage.writemsg(colorize("WARN", " * ") + \
+ "--tree is broken with --nodeps. Disabling...\n")
+ debug = "--debug" in myopts
+ verbose = "--verbose" in myopts
+ quiet = "--quiet" in myopts
+ myparams = create_depgraph_params(myopts, myaction)
+
+ if pretend or fetchonly:
+ # make the mtimedb readonly
+ mtimedb.filename = None
+ if '--digest' in myopts or 'digest' in settings.features:
+ if '--digest' in myopts:
+ msg = "The --digest option"
+ else:
+ msg = "The FEATURES=digest setting"
+
+ msg += " can prevent corruption from being" + \
+ " noticed. The `repoman manifest` command is the preferred" + \
+ " way to generate manifests and it is capable of doing an" + \
+ " entire repository or category at once."
+ prefix = bad(" * ")
+ writemsg(prefix + "\n")
+ from textwrap import wrap
+ for line in wrap(msg, 72):
+ writemsg("%s%s\n" % (prefix, line))
+ writemsg(prefix + "\n")
+
+ if resume:
+ favorites = mtimedb["resume"].get("favorites")
+ if not isinstance(favorites, list):
+ favorites = []
+
+ resume_data = mtimedb["resume"]
+ mergelist = resume_data["mergelist"]
+ if mergelist and "--skipfirst" in myopts:
+ for i, task in enumerate(mergelist):
+ if isinstance(task, list) and \
+ task and task[-1] == "merge":
+ del mergelist[i]
+ break
+
+ success = False
+ mydepgraph = None
+ try:
+ success, mydepgraph, dropped_tasks = resume_depgraph(
+ settings, trees, mtimedb, myopts, myparams, spinner)
+ except (portage.exception.PackageNotFound,
+ depgraph.UnsatisfiedResumeDep) as e:
+ if isinstance(e, depgraph.UnsatisfiedResumeDep):
+ mydepgraph = e.depgraph
+
+ from textwrap import wrap
+ from portage.output import EOutput
+ out = EOutput()
+
+ resume_data = mtimedb["resume"]
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ mergelist = []
+ if mergelist and debug or (verbose and not quiet):
+ out.eerror("Invalid resume list:")
+ out.eerror("")
+ indent = " "
+ for task in mergelist:
+ if isinstance(task, list):
+ out.eerror(indent + str(tuple(task)))
+ out.eerror("")
+
+ if isinstance(e, depgraph.UnsatisfiedResumeDep):
+ out.eerror("One or more packages are either masked or " + \
+ "have missing dependencies:")
+ out.eerror("")
+ indent = " "
+ for dep in e.value:
+ if dep.atom is None:
+ out.eerror(indent + "Masked package:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ else:
+ out.eerror(indent + str(dep.atom) + " pulled in by:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ msg = "The resume list contains packages " + \
+ "that are either masked or have " + \
+ "unsatisfied dependencies. " + \
+ "Please restart/continue " + \
+ "the operation manually, or use --skipfirst " + \
+ "to skip the first package in the list and " + \
+ "any other packages that may be " + \
+ "masked or have missing dependencies."
+ for line in wrap(msg, 72):
+ out.eerror(line)
+ elif isinstance(e, portage.exception.PackageNotFound):
+ out.eerror("An expected package is " + \
+ "not available: %s" % str(e))
+ out.eerror("")
+ msg = "The resume list contains one or more " + \
+ "packages that are no longer " + \
+ "available. Please restart/continue " + \
+ "the operation manually."
+ for line in wrap(msg, 72):
+ out.eerror(line)
+
+ if success:
+ if dropped_tasks:
+ portage.writemsg("!!! One or more packages have been " + \
+ "dropped due to\n" + \
+ "!!! masking or unsatisfied dependencies:\n\n",
+ noiselevel=-1)
+ for task in dropped_tasks:
+ portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
+ portage.writemsg("\n", noiselevel=-1)
+ del dropped_tasks
+ else:
+ if mydepgraph is not None:
+ mydepgraph.display_problems()
+ if not (ask or pretend):
+ # delete the current list and also the backup
+ # since it's probably stale too.
+ for k in ("resume", "resume_backup"):
+ mtimedb.pop(k, None)
+ mtimedb.commit()
+
+ return 1
+ else:
+ if ("--resume" in myopts):
+ print(darkgreen("emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+
+ try:
+ success, mydepgraph, favorites = backtrack_depgraph(
+ settings, trees, myopts, myparams, myaction, myfiles, spinner)
+ except portage.exception.PackageSetNotFound as e:
+ root_config = trees[settings["ROOT"]]["root_config"]
+ display_missing_pkg_set(root_config, e.value)
+ return 1
+
+ if not success:
+ mydepgraph.display_problems()
+ return 1
+
+ if "--pretend" not in myopts and \
+ ("--ask" in myopts or "--tree" in myopts or \
+ "--verbose" in myopts) and \
+ not ("--quiet" in myopts and "--ask" not in myopts):
+ if "--resume" in myopts:
+ mymergelist = mydepgraph.altlist()
+ if len(mymergelist) == 0:
+ print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+ favorites = mtimedb["resume"]["favorites"]
+ retval = mydepgraph.display(
+ mydepgraph.altlist(reversed=tree),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ if retval != os.EX_OK:
+ return retval
+ prompt="Would you like to resume merging these packages?"
+ else:
+ retval = mydepgraph.display(
+ mydepgraph.altlist(reversed=("--tree" in myopts)),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ if retval != os.EX_OK:
+ return retval
+ mergecount=0
+ for x in mydepgraph.altlist():
+ if isinstance(x, Package) and x.operation == "merge":
+ mergecount += 1
+
+ if mergecount==0:
+ sets = trees[settings["ROOT"]]["root_config"].sets
+ world_candidates = None
+ if "selective" in myparams and \
+ not oneshot and favorites:
+ # Sets that are not world candidates are filtered
+ # out here since the favorites list needs to be
+ # complete for depgraph.loadResumeCommand() to
+ # operate correctly.
+ world_candidates = [x for x in favorites \
+ if not (x.startswith(SETPREFIX) and \
+ not sets[x[1:]].world_candidate)]
+ if "selective" in myparams and \
+ not oneshot and world_candidates:
+ print()
+ for x in world_candidates:
+ print(" %s %s" % (good("*"), x))
+ prompt="Would you like to add these packages to your world favorites?"
+ elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
+ prompt="Nothing to merge; would you like to auto-clean packages?"
+ else:
+ print()
+ print("Nothing to merge; quitting.")
+ print()
+ return os.EX_OK
+ elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
+ prompt="Would you like to fetch the source files for these packages?"
+ else:
+ prompt="Would you like to merge these packages?"
+ print()
+ if "--ask" in myopts and userquery(prompt, enter_invalid) == "No":
+ print()
+ print("Quitting.")
+ print()
+ return os.EX_OK
+ # Don't ask again (e.g. when auto-cleaning packages after merge)
+ myopts.pop("--ask", None)
+
+ if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
+ if ("--resume" in myopts):
+ mymergelist = mydepgraph.altlist()
+ if len(mymergelist) == 0:
+ print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+ favorites = mtimedb["resume"]["favorites"]
+ retval = mydepgraph.display(
+ mydepgraph.altlist(reversed=tree),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ if retval != os.EX_OK:
+ return retval
+ else:
+ retval = mydepgraph.display(
+ mydepgraph.altlist(reversed=("--tree" in myopts)),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ if retval != os.EX_OK:
+ return retval
+ if "--buildpkgonly" in myopts:
+ graph_copy = mydepgraph._dynamic_config.digraph.copy()
+ removed_nodes = set()
+ for node in graph_copy:
+ if not isinstance(node, Package) or \
+ node.operation == "nomerge":
+ removed_nodes.add(node)
+ graph_copy.difference_update(removed_nodes)
+ if not graph_copy.hasallzeros(ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium):
+ print("\n!!! --buildpkgonly requires all dependencies to be merged.")
+ print("!!! You have to merge the dependencies before you can build this package.\n")
+ return 1
+ else:
+ if "--buildpkgonly" in myopts:
+ graph_copy = mydepgraph._dynamic_config.digraph.copy()
+ removed_nodes = set()
+ for node in graph_copy:
+ if not isinstance(node, Package) or \
+ node.operation == "nomerge":
+ removed_nodes.add(node)
+ graph_copy.difference_update(removed_nodes)
+ if not graph_copy.hasallzeros(ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium):
+ print("\n!!! --buildpkgonly requires all dependencies to be merged.")
+ print("!!! Cannot merge requested packages. Merge deps and try again.\n")
+ return 1
+
+ if ("--resume" in myopts):
+ favorites=mtimedb["resume"]["favorites"]
+
+ else:
+ if "resume" in mtimedb and \
+ "mergelist" in mtimedb["resume"] and \
+ len(mtimedb["resume"]["mergelist"]) > 1:
+ mtimedb["resume_backup"] = mtimedb["resume"]
+ del mtimedb["resume"]
+ mtimedb.commit()
+
+ mydepgraph.saveNomergeFavorites()
+
+ mergetask = Scheduler(settings, trees, mtimedb, myopts,
+ spinner, favorites=favorites,
+ graph_config=mydepgraph.schedulerGraph())
+
+ del mydepgraph
+ clear_caches(trees)
+
+ retval = mergetask.merge()
+
+ if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
+ if "yes" == settings.get("AUTOCLEAN"):
+ portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
+ unmerge(trees[settings["ROOT"]]["root_config"],
+ myopts, "clean", [],
+ ldpath_mtimes, autoclean=1)
+ else:
+ portage.writemsg_stdout(colorize("WARN", "WARNING:")
+ + " AUTOCLEAN is disabled. This can cause serious"
+ + " problems due to overlapping packages.\n")
+
+ return retval
+
+def action_config(settings, trees, myopts, myfiles):
+ enter_invalid = '--ask-enter-invalid' in myopts
+ if len(myfiles) != 1:
+ print(red("!!! config can only take a single package atom at this time\n"))
+ sys.exit(1)
+ if not is_valid_package_atom(myfiles[0]):
+ portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
+ noiselevel=-1)
+ portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+ portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+ sys.exit(1)
+ print()
+ try:
+ pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
+ except portage.exception.AmbiguousPackageName as e:
+ # Multiple matches thrown from cpv_expand
+ pkgs = e.args[0]
+ if len(pkgs) == 0:
+ print("No packages found.\n")
+ sys.exit(0)
+ elif len(pkgs) > 1:
+ if "--ask" in myopts:
+ options = []
+ print("Please select a package to configure:")
+ idx = 0
+ for pkg in pkgs:
+ idx += 1
+ options.append(str(idx))
+ print(options[-1]+") "+pkg)
+ print("X) Cancel")
+ options.append("X")
+ idx = userquery("Selection?", enter_invalid, responses=options)
+ if idx == "X":
+ sys.exit(0)
+ pkg = pkgs[int(idx)-1]
+ else:
+ print("The following packages available:")
+ for pkg in pkgs:
+ print("* "+pkg)
+ print("\nPlease use a specific atom or the --ask option.")
+ sys.exit(1)
+ else:
+ pkg = pkgs[0]
+
+ print()
+ if "--ask" in myopts:
+ if userquery("Ready to configure %s?" % pkg, enter_invalid) == "No":
+ sys.exit(0)
+ else:
+ print("Configuring pkg...")
+ print()
+ ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
+ mysettings = portage.config(clone=settings)
+ vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
+ debug = mysettings.get("PORTAGE_DEBUG") == "1"
+ retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
+ mysettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
+ mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
+ if retval == os.EX_OK:
+ portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
+ mysettings, debug=debug, mydbapi=vardb, tree="vartree")
+ print()
+
+def action_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, myfiles, spinner, scheduler=None):
+ # Kill packages that aren't explicitly merged or are required as a
+ # dependency of another package. World file is explicit.
+
+ # Global depclean or prune operations are not very safe when there are
+ # missing dependencies since it's unknown how badly incomplete
+ # the dependency graph is, and we might accidentally remove packages
+ # that should have been pulled into the graph. On the other hand, it's
+ # relatively safe to ignore missing deps when only asked to remove
+ # specific packages.
+
+ msg = []
+ if not _ENABLE_DYN_LINK_MAP:
+ msg.append("Depclean may break link level dependencies. Thus, it is\n")
+ msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
+ msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
+ msg.append("\n")
+ msg.append("Always study the list of packages to be cleaned for any obvious\n")
+ msg.append("mistakes. Packages that are part of the world set will always\n")
+ msg.append("be kept. They can be manually added to this set with\n")
+ msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
+ msg.append("package.provided (see portage(5)) will be removed by\n")
+ msg.append("depclean, even if they are part of the world set.\n")
+ msg.append("\n")
+ msg.append("As a safety measure, depclean will not remove any packages\n")
+ msg.append("unless *all* required dependencies have been resolved. As a\n")
+ msg.append("consequence, it is often necessary to run %s\n" % \
+ good("`emerge --update"))
+ msg.append(good("--newuse --deep @world`") + \
+ " prior to depclean.\n")
+
+ if action == "depclean" and "--quiet" not in myopts and not myfiles:
+ portage.writemsg_stdout("\n")
+ for x in msg:
+ portage.writemsg_stdout(colorize("WARN", " * ") + x)
+
+ root_config = trees[settings['ROOT']]['root_config']
+ vardb = root_config.trees['vartree'].dbapi
+
+ args_set = InternalPackageSet(allow_repo=True)
+ if myfiles:
+ args_set.update(myfiles)
+ matched_packages = False
+ for x in args_set:
+ if vardb.match(x):
+ matched_packages = True
+ else:
+ writemsg_level("--- Couldn't find '%s' to %s.\n" % \
+ (x.replace("null/", ""), action),
+ level=logging.WARN, noiselevel=-1)
+ if not matched_packages:
+ writemsg_level(">>> No packages selected for removal by %s\n" % \
+ action)
+ return 0
+
+ # The calculation is done in a separate function so that depgraph
+ # references go out of scope and the corresponding memory
+ # is freed before we call unmerge().
+ rval, cleanlist, ordered, req_pkg_count = \
+ calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner)
+
+ clear_caches(trees)
+
+ if rval != os.EX_OK:
+ return rval
+
+ if cleanlist:
+ unmerge(root_config, myopts, "unmerge",
+ cleanlist, ldpath_mtimes, ordered=ordered,
+ scheduler=scheduler)
+
+ if action == "prune":
+ return
+
+ if not cleanlist and "--quiet" in myopts:
+ return
+
+ print("Packages installed: " + str(len(vardb.cpv_all())))
+ print("Packages in world: " + \
+ str(len(root_config.sets["selected"].getAtoms())))
+ print("Packages in system: " + \
+ str(len(root_config.sets["system"].getAtoms())))
+ print("Required packages: "+str(req_pkg_count))
+ if "--pretend" in myopts:
+ print("Number to remove: "+str(len(cleanlist)))
+ else:
+ print("Number removed: "+str(len(cleanlist)))
+
+def calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner):
+ allow_missing_deps = bool(args_set)
+
+ debug = '--debug' in myopts
+ xterm_titles = "notitles" not in settings.features
+ myroot = settings["ROOT"]
+ root_config = trees[myroot]["root_config"]
+ psets = root_config.setconfig.psets
+ deselect = myopts.get('--deselect') != 'n'
+ required_sets = {}
+ required_sets['world'] = psets['world']
+
+ # When removing packages, a temporary version of the world 'selected'
+ # set may be used which excludes packages that are intended to be
+ # eligible for removal.
+ selected_set = psets['selected']
+ required_sets['selected'] = selected_set
+ protected_set = InternalPackageSet()
+ protected_set_name = '____depclean_protected_set____'
+ required_sets[protected_set_name] = protected_set
+ system_set = psets["system"]
+
+ if not system_set or not selected_set:
+
+ if not system_set:
+ writemsg_level("!!! You have no system list.\n",
+ level=logging.ERROR, noiselevel=-1)
+
+ if not selected_set:
+ writemsg_level("!!! You have no world file.\n",
+ level=logging.WARNING, noiselevel=-1)
+
+ writemsg_level("!!! Proceeding is likely to " + \
+ "break your installation.\n",
+ level=logging.WARNING, noiselevel=-1)
+ if "--pretend" not in myopts:
+ countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
+
+ if action == "depclean":
+ emergelog(xterm_titles, " >>> depclean")
+
+ writemsg_level("\nCalculating dependencies ")
+ resolver_params = create_depgraph_params(myopts, "remove")
+ resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
+ resolver._load_vdb()
+ vardb = resolver._frozen_config.trees[myroot]["vartree"].dbapi
+ real_vardb = trees[myroot]["vartree"].dbapi
+
+ if action == "depclean":
+
+ if args_set:
+
+ if deselect:
+ # Start with an empty set.
+ selected_set = InternalPackageSet()
+ required_sets['selected'] = selected_set
+ # Pull in any sets nested within the selected set.
+ selected_set.update(psets['selected'].getNonAtoms())
+
+ # Pull in everything that's installed but not matched
+ # by an argument atom since we don't want to clean any
+ # package if something depends on it.
+ for pkg in vardb:
+ if spinner:
+ spinner.update()
+
+ try:
+ if args_set.findAtomForPackage(pkg) is None:
+ protected_set.add("=" + pkg.cpv)
+ continue
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg,
+ pkg.metadata["PROVIDE"], str(e))
+ del e
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ elif action == "prune":
+
+ if deselect:
+ # Start with an empty set.
+ selected_set = InternalPackageSet()
+ required_sets['selected'] = selected_set
+ # Pull in any sets nested within the selected set.
+ selected_set.update(psets['selected'].getNonAtoms())
+
+ # Pull in everything that's installed since we don't
+ # to prune a package if something depends on it.
+ protected_set.update(vardb.cp_all())
+
+ if not args_set:
+
+ # Try to prune everything that's slotted.
+ for cp in vardb.cp_all():
+ if len(vardb.cp_list(cp)) > 1:
+ args_set.add(cp)
+
+ # Remove atoms from world that match installed packages
+ # that are also matched by argument atoms, but do not remove
+ # them if they match the highest installed version.
+ for pkg in vardb:
+ spinner.update()
+ pkgs_for_cp = vardb.match_pkgs(pkg.cp)
+ if not pkgs_for_cp or pkg not in pkgs_for_cp:
+ raise AssertionError("package expected in matches: " + \
+ "cp = %s, cpv = %s matches = %s" % \
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+ highest_version = pkgs_for_cp[-1]
+ if pkg == highest_version:
+ # pkg is the highest version
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ if len(pkgs_for_cp) <= 1:
+ raise AssertionError("more packages expected: " + \
+ "cp = %s, cpv = %s matches = %s" % \
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+ try:
+ if args_set.findAtomForPackage(pkg) is None:
+ protected_set.add("=" + pkg.cpv)
+ continue
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg,
+ pkg.metadata["PROVIDE"], str(e))
+ del e
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ if resolver._frozen_config.excluded_pkgs:
+ excluded_set = resolver._frozen_config.excluded_pkgs
+ required_sets['__excluded__'] = InternalPackageSet()
+
+ for pkg in vardb:
+ if spinner:
+ spinner.update()
+
+ try:
+ if excluded_set.findAtomForPackage(pkg):
+ required_sets['__excluded__'].add("=" + pkg.cpv)
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg,
+ pkg.metadata["PROVIDE"], str(e))
+ del e
+ required_sets['__excluded__'].add("=" + pkg.cpv)
+
+ success = resolver._complete_graph(required_sets={myroot:required_sets})
+ writemsg_level("\b\b... done!\n")
+
+ resolver.display_problems()
+
+ if not success:
+ return 1, [], False, 0
+
+ def unresolved_deps():
+
+ unresolvable = set()
+ for dep in resolver._dynamic_config._initially_unsatisfied_deps:
+ if isinstance(dep.parent, Package) and \
+ (dep.priority > UnmergeDepPriority.SOFT):
+ unresolvable.add((dep.atom, dep.parent.cpv))
+
+ if not unresolvable:
+ return False
+
+ if unresolvable and not allow_missing_deps:
+
+ if "--debug" in myopts:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ resolver._dynamic_config.digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ prefix = bad(" * ")
+ msg = []
+ msg.append("Dependencies could not be completely resolved due to")
+ msg.append("the following required packages not being installed:")
+ msg.append("")
+ for atom, parent in unresolvable:
+ msg.append(" %s pulled in by:" % (atom,))
+ msg.append(" %s" % (parent,))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Have you forgotten to do a complete update prior " + \
+ "to depclean? The most comprehensive command for this " + \
+ "purpose is as follows:", 65
+ ))
+ msg.append("")
+ msg.append(" " + \
+ good("emerge --update --newuse --deep --with-bdeps=y @world"))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Note that the --with-bdeps=y option is not required in " + \
+ "many situations. Refer to the emerge manual page " + \
+ "(run `man emerge`) for more information about " + \
+ "--with-bdeps.", 65
+ ))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Also, note that it may be necessary to manually uninstall " + \
+ "packages that no longer exist in the portage tree, since " + \
+ "it may not be possible to satisfy their dependencies.", 65
+ ))
+ if action == "prune":
+ msg.append("")
+ msg.append("If you would like to ignore " + \
+ "dependencies then use %s." % good("--nodeps"))
+ writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return True
+ return False
+
+ if unresolved_deps():
+ return 1, [], False, 0
+
+ graph = resolver._dynamic_config.digraph.copy()
+ required_pkgs_total = 0
+ for node in graph:
+ if isinstance(node, Package):
+ required_pkgs_total += 1
+
+ def show_parents(child_node):
+ parent_nodes = graph.parent_nodes(child_node)
+ if not parent_nodes:
+ # With --prune, the highest version can be pulled in without any
+ # real parent since all installed packages are pulled in. In that
+ # case there's nothing to show here.
+ return
+ parent_strs = []
+ for node in parent_nodes:
+ parent_strs.append(str(getattr(node, "cpv", node)))
+ parent_strs.sort()
+ msg = []
+ msg.append(" %s pulled in by:\n" % (child_node.cpv,))
+ for parent_str in parent_strs:
+ msg.append(" %s\n" % (parent_str,))
+ msg.append("\n")
+ portage.writemsg_stdout("".join(msg), noiselevel=-1)
+
+ def cmp_pkg_cpv(pkg1, pkg2):
+ """Sort Package instances by cpv."""
+ if pkg1.cpv > pkg2.cpv:
+ return 1
+ elif pkg1.cpv == pkg2.cpv:
+ return 0
+ else:
+ return -1
+
+ def create_cleanlist():
+
+ if "--debug" in myopts:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ graph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ # Never display the special internal protected_set.
+ for node in graph:
+ if isinstance(node, SetArg) and node.name == protected_set_name:
+ graph.remove(node)
+ break
+
+ pkgs_to_remove = []
+
+ if action == "depclean":
+ if args_set:
+
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+ arg_atom = None
+ try:
+ arg_atom = args_set.findAtomForPackage(pkg)
+ except portage.exception.InvalidDependString:
+ # this error has already been displayed by now
+ continue
+
+ if arg_atom:
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ else:
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ elif action == "prune":
+
+ for atom in args_set:
+ for pkg in vardb.match_pkgs(atom):
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ if not pkgs_to_remove:
+ writemsg_level(
+ ">>> No packages selected for removal by %s\n" % action)
+ if "--verbose" not in myopts:
+ writemsg_level(
+ ">>> To see reverse dependencies, use %s\n" % \
+ good("--verbose"))
+ if action == "prune":
+ writemsg_level(
+ ">>> To ignore dependencies, use %s\n" % \
+ good("--nodeps"))
+
+ return pkgs_to_remove
+
+ cleanlist = create_cleanlist()
+ clean_set = set(cleanlist)
+
+ if cleanlist and \
+ real_vardb._linkmap is not None and \
+ myopts.get("--depclean-lib-check") != "n" and \
+ "preserve-libs" not in settings.features:
+
+ # Check if any of these packages are the sole providers of libraries
+ # with consumers that have not been selected for removal. If so, these
+ # packages and any dependencies need to be added to the graph.
+ linkmap = real_vardb._linkmap
+ consumer_cache = {}
+ provider_cache = {}
+ consumer_map = {}
+
+ writemsg_level(">>> Checking for lib consumers...\n")
+
+ for pkg in cleanlist:
+ pkg_dblink = real_vardb._dblink(pkg.cpv)
+ consumers = {}
+
+ for lib in pkg_dblink.getcontents():
+ lib = lib[len(myroot):]
+ lib_key = linkmap._obj_key(lib)
+ lib_consumers = consumer_cache.get(lib_key)
+ if lib_consumers is None:
+ try:
+ lib_consumers = linkmap.findConsumers(lib_key)
+ except KeyError:
+ continue
+ consumer_cache[lib_key] = lib_consumers
+ if lib_consumers:
+ consumers[lib_key] = lib_consumers
+
+ if not consumers:
+ continue
+
+ for lib, lib_consumers in list(consumers.items()):
+ for consumer_file in list(lib_consumers):
+ if pkg_dblink.isowner(consumer_file):
+ lib_consumers.remove(consumer_file)
+ if not lib_consumers:
+ del consumers[lib]
+
+ if not consumers:
+ continue
+
+ for lib, lib_consumers in consumers.items():
+
+ soname = linkmap.getSoname(lib)
+
+ consumer_providers = []
+ for lib_consumer in lib_consumers:
+ providers = provider_cache.get(lib)
+ if providers is None:
+ providers = linkmap.findProviders(lib_consumer)
+ provider_cache[lib_consumer] = providers
+ if soname not in providers:
+ # Why does this happen?
+ continue
+ consumer_providers.append(
+ (lib_consumer, providers[soname]))
+
+ consumers[lib] = consumer_providers
+
+ consumer_map[pkg] = consumers
+
+ if consumer_map:
+
+ search_files = set()
+ for consumers in consumer_map.values():
+ for lib, consumer_providers in consumers.items():
+ for lib_consumer, providers in consumer_providers:
+ search_files.add(lib_consumer)
+ search_files.update(providers)
+
+ writemsg_level(">>> Assigning files to packages...\n")
+ file_owners = {}
+ for f in search_files:
+ owner_set = set()
+ for owner in linkmap.getOwners(f):
+ owner_dblink = real_vardb._dblink(owner)
+ if owner_dblink.exists():
+ owner_set.add(owner_dblink)
+ if owner_set:
+ file_owners[f] = owner_set
+
+ for pkg, consumers in list(consumer_map.items()):
+ for lib, consumer_providers in list(consumers.items()):
+ lib_consumers = set()
+
+ for lib_consumer, providers in consumer_providers:
+ owner_set = file_owners.get(lib_consumer)
+ provider_dblinks = set()
+ provider_pkgs = set()
+
+ if len(providers) > 1:
+ for provider in providers:
+ provider_set = file_owners.get(provider)
+ if provider_set is not None:
+ provider_dblinks.update(provider_set)
+
+ if len(provider_dblinks) > 1:
+ for provider_dblink in provider_dblinks:
+ provider_pkg = resolver._pkg(
+ provider_dblink.mycpv, "installed",
+ root_config, installed=True)
+ if provider_pkg not in clean_set:
+ provider_pkgs.add(provider_pkg)
+
+ if provider_pkgs:
+ continue
+
+ if owner_set is not None:
+ lib_consumers.update(owner_set)
+
+ for consumer_dblink in list(lib_consumers):
+ if resolver._pkg(consumer_dblink.mycpv, "installed",
+ root_config, installed=True) in clean_set:
+ lib_consumers.remove(consumer_dblink)
+ continue
+
+ if lib_consumers:
+ consumers[lib] = lib_consumers
+ else:
+ del consumers[lib]
+ if not consumers:
+ del consumer_map[pkg]
+
+ if consumer_map:
+ # TODO: Implement a package set for rebuilding consumer packages.
+
+ msg = "In order to avoid breakage of link level " + \
+ "dependencies, one or more packages will not be removed. " + \
+ "This can be solved by rebuilding " + \
+ "the packages that pulled them in."
+
+ prefix = bad(" * ")
+ from textwrap import wrap
+ writemsg_level("".join(prefix + "%s\n" % line for \
+ line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
+
+ msg = []
+ for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
+ consumers = consumer_map[pkg]
+ consumer_libs = {}
+ for lib, lib_consumers in consumers.items():
+ for consumer in lib_consumers:
+ consumer_libs.setdefault(
+ consumer.mycpv, set()).add(linkmap.getSoname(lib))
+ unique_consumers = set(chain(*consumers.values()))
+ unique_consumers = sorted(consumer.mycpv \
+ for consumer in unique_consumers)
+ msg.append("")
+ msg.append(" %s pulled in by:" % (pkg.cpv,))
+ for consumer in unique_consumers:
+ libs = consumer_libs[consumer]
+ msg.append(" %s needs %s" % \
+ (consumer, ', '.join(sorted(libs))))
+ msg.append("")
+ writemsg_level("".join(prefix + "%s\n" % line for line in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ # Add lib providers to the graph as children of lib consumers,
+ # and also add any dependencies pulled in by the provider.
+ writemsg_level(">>> Adding lib providers to graph...\n")
+
+ for pkg, consumers in consumer_map.items():
+ for consumer_dblink in set(chain(*consumers.values())):
+ consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
+ "installed", root_config, installed=True)
+ if not resolver._add_pkg(pkg,
+ Dependency(parent=consumer_pkg,
+ priority=UnmergeDepPriority(runtime=True),
+ root=pkg.root)):
+ resolver.display_problems()
+ return 1, [], False, 0
+
+ writemsg_level("\nCalculating dependencies ")
+ success = resolver._complete_graph(
+ required_sets={myroot:required_sets})
+ writemsg_level("\b\b... done!\n")
+ resolver.display_problems()
+ if not success:
+ return 1, [], False, 0
+ if unresolved_deps():
+ return 1, [], False, 0
+
+ graph = resolver._dynamic_config.digraph.copy()
+ required_pkgs_total = 0
+ for node in graph:
+ if isinstance(node, Package):
+ required_pkgs_total += 1
+ cleanlist = create_cleanlist()
+ if not cleanlist:
+ return 0, [], False, required_pkgs_total
+ clean_set = set(cleanlist)
+
+ if clean_set:
+ writemsg_level(">>> Calculating removal order...\n")
+ # Use a topological sort to create an unmerge order such that
+ # each package is unmerged before it's dependencies. This is
+ # necessary to avoid breaking things that may need to run
+ # during pkg_prerm or pkg_postrm phases.
+
+ # Create a new graph to account for dependencies between the
+ # packages being unmerged.
+ graph = digraph()
+ del cleanlist[:]
+
+ dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
+ runtime = UnmergeDepPriority(runtime=True)
+ runtime_post = UnmergeDepPriority(runtime_post=True)
+ buildtime = UnmergeDepPriority(buildtime=True)
+ priority_map = {
+ "RDEPEND": runtime,
+ "PDEPEND": runtime_post,
+ "DEPEND": buildtime,
+ }
+
+ for node in clean_set:
+ graph.add(node, None)
+ mydeps = []
+ for dep_type in dep_keys:
+ depstr = node.metadata[dep_type]
+ if not depstr:
+ continue
+ priority = priority_map[dep_type]
+
+ if debug:
+ writemsg_level(_unicode_decode("\nParent: %s\n") \
+ % (node,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level(_unicode_decode( "Depstring: %s\n") \
+ % (depstr,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level(_unicode_decode( "Priority: %s\n") \
+ % (priority,), noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ atoms = resolver._select_atoms(myroot, depstr,
+ myuse=node.use.enabled, parent=node,
+ priority=priority)[node]
+ except portage.exception.InvalidDependString:
+ # Ignore invalid deps of packages that will
+ # be uninstalled anyway.
+ continue
+
+ if debug:
+ writemsg_level("Candidates: [%s]\n" % \
+ ', '.join(_unicode_decode("'%s'") % (x,) for x in atoms),
+ noiselevel=-1, level=logging.DEBUG)
+
+ for atom in atoms:
+ if not isinstance(atom, portage.dep.Atom):
+ # Ignore invalid atoms returned from dep_check().
+ continue
+ if atom.blocker:
+ continue
+ matches = vardb.match_pkgs(atom)
+ if not matches:
+ continue
+ for child_node in matches:
+ if child_node in clean_set:
+ graph.add(child_node, node, priority=priority)
+
+ if debug:
+ writemsg_level("\nunmerge digraph:\n\n",
+ noiselevel=-1, level=logging.DEBUG)
+ graph.debug_print()
+ writemsg_level("\n", noiselevel=-1, level=logging.DEBUG)
+
+ ordered = True
+ if len(graph.order) == len(graph.root_nodes()):
+ # If there are no dependencies between packages
+ # let unmerge() group them by cat/pn.
+ ordered = False
+ cleanlist = [pkg.cpv for pkg in graph.order]
+ else:
+ # Order nodes from lowest to highest overall reference count for
+ # optimal root node selection (this can help minimize issues
+ # with unaccounted implicit dependencies).
+ node_refcounts = {}
+ for node in graph.order:
+ node_refcounts[node] = len(graph.parent_nodes(node))
+ def cmp_reference_count(node1, node2):
+ return node_refcounts[node1] - node_refcounts[node2]
+ graph.order.sort(key=cmp_sort_key(cmp_reference_count))
+
+ ignore_priority_range = [None]
+ ignore_priority_range.extend(
+ range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
+ while graph:
+ for ignore_priority in ignore_priority_range:
+ nodes = graph.root_nodes(ignore_priority=ignore_priority)
+ if nodes:
+ break
+ if not nodes:
+ raise AssertionError("no root nodes")
+ if ignore_priority is not None:
+ # Some deps have been dropped due to circular dependencies,
+ # so only pop one node in order to minimize the number that
+ # are dropped.
+ del nodes[1:]
+ for node in nodes:
+ graph.remove(node)
+ cleanlist.append(node.cpv)
+
+ return 0, cleanlist, ordered, required_pkgs_total
+ return 0, [], False, required_pkgs_total
+
+def action_deselect(settings, trees, opts, atoms):
+ enter_invalid = '--ask-enter-invalid' in opts
+ root_config = trees[settings['ROOT']]['root_config']
+ world_set = root_config.sets['selected']
+ if not hasattr(world_set, 'update'):
+ writemsg_level("World @selected set does not appear to be mutable.\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ pretend = '--pretend' in opts
+ locked = False
+ if not pretend and hasattr(world_set, 'lock'):
+ world_set.lock()
+ locked = True
+ try:
+ world_set.load()
+ world_atoms = world_set.getAtoms()
+ vardb = root_config.trees["vartree"].dbapi
+ expanded_atoms = set(atoms)
+
+ for atom in atoms:
+ if not atom.startswith(SETPREFIX):
+ if atom.cp.startswith("null/"):
+ # try to expand category from world set
+ null_cat, pn = portage.catsplit(atom.cp)
+ for world_atom in world_atoms:
+ cat, world_pn = portage.catsplit(world_atom.cp)
+ if pn == world_pn:
+ expanded_atoms.add(
+ Atom(atom.replace("null", cat, 1),
+ allow_repo=True, allow_wildcard=True))
+
+ for cpv in vardb.match(atom):
+ slot, = vardb.aux_get(cpv, ["SLOT"])
+ if not slot:
+ slot = "0"
+ expanded_atoms.add(Atom("%s:%s" % \
+ (portage.cpv_getkey(cpv), slot)))
+
+ discard_atoms = set()
+ for atom in world_set:
+ for arg_atom in expanded_atoms:
+ if arg_atom.startswith(SETPREFIX):
+ if atom.startswith(SETPREFIX) and \
+ arg_atom == atom:
+ discard_atoms.add(atom)
+ break
+ else:
+ if not atom.startswith(SETPREFIX) and \
+ arg_atom.intersects(atom) and \
+ not (arg_atom.slot and not atom.slot) and \
+ not (arg_atom.repo and not atom.repo):
+ discard_atoms.add(atom)
+ break
+ if discard_atoms:
+ for atom in sorted(discard_atoms):
+ if pretend:
+ print(">>> Would remove %s from \"world\" favorites file..." % \
+ colorize("INFORM", str(atom)))
+ else:
+ print(">>> Removing %s from \"world\" favorites file..." % \
+ colorize("INFORM", str(atom)))
+
+ if '--ask' in opts:
+ prompt = "Would you like to remove these " + \
+ "packages from your world favorites?"
+ if userquery(prompt, enter_invalid) == 'No':
+ return os.EX_OK
+
+ remaining = set(world_set)
+ remaining.difference_update(discard_atoms)
+ if not pretend:
+ world_set.replace(remaining)
+ else:
+ print(">>> No matching atoms found in \"world\" favorites file...")
+ finally:
+ if locked:
+ world_set.unlock()
+ return os.EX_OK
+
+class _info_pkgs_ver(object):
+ def __init__(self, ver, repo_suffix, provide_suffix):
+ self.ver = ver
+ self.repo_suffix = repo_suffix
+ self.provide_suffix = provide_suffix
+
+ def __lt__(self, other):
+ return portage.versions.vercmp(self.ver, other.ver) < 0
+
+ def toString(self):
+ """
+ This may return unicode if repo_name contains unicode.
+ Don't use __str__ and str() since unicode triggers compatibility
+ issues between python 2.x and 3.x.
+ """
+ return self.ver + self.repo_suffix + self.provide_suffix
+
+def action_info(settings, trees, myopts, myfiles):
+
+ output_buffer = []
+ append = output_buffer.append
+ root_config = trees[settings['ROOT']]['root_config']
+
+ append(getportageversion(settings["PORTDIR"], settings["ROOT"],
+ settings.profile_path, settings["CHOST"],
+ trees[settings["ROOT"]]["vartree"].dbapi))
+
+ header_width = 65
+ header_title = "System Settings"
+ if myfiles:
+ append(header_width * "=")
+ append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ append(header_width * "=")
+ append("System uname: %s" % (platform.platform(aliased=1),))
+
+ lastSync = portage.grabfile(os.path.join(
+ settings["PORTDIR"], "metadata", "timestamp.chk"))
+ if lastSync:
+ lastSync = lastSync[0]
+ else:
+ lastSync = "Unknown"
+ append("Timestamp of tree: %s" % (lastSync,))
+
+ output=subprocess_getstatusoutput("distcc --version")
+ if output[0] == os.EX_OK:
+ distcc_str = output[1].split("\n", 1)[0]
+ if "distcc" in settings.features:
+ distcc_str += " [enabled]"
+ else:
+ distcc_str += " [disabled]"
+ append(distcc_str)
+
+ output=subprocess_getstatusoutput("ccache -V")
+ if output[0] == os.EX_OK:
+ ccache_str = output[1].split("\n", 1)[0]
+ if "ccache" in settings.features:
+ ccache_str += " [enabled]"
+ else:
+ ccache_str += " [disabled]"
+ append(ccache_str)
+
+ myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
+ "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
+ myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
+ atoms = []
+ vardb = trees["/"]["vartree"].dbapi
+ for x in myvars:
+ try:
+ x = Atom(x)
+ except InvalidAtom:
+ append("%-20s %s" % (x+":", "[NOT VALID]"))
+ else:
+ for atom in expand_new_virt(vardb, x):
+ if not atom.blocker:
+ atoms.append((x, atom))
+
+ myvars = sorted(set(atoms))
+
+ portdb = trees["/"]["porttree"].dbapi
+ main_repo = portdb.getRepositoryName(portdb.porttree_root)
+ cp_map = {}
+ cp_max_len = 0
+
+ for orig_atom, x in myvars:
+ pkg_matches = vardb.match(x)
+
+ versions = []
+ for cpv in pkg_matches:
+ matched_cp = portage.versions.cpv_getkey(cpv)
+ ver = portage.versions.cpv_getversion(cpv)
+ ver_map = cp_map.setdefault(matched_cp, {})
+ prev_match = ver_map.get(ver)
+ if prev_match is not None:
+ if prev_match.provide_suffix:
+ # prefer duplicate matches that include
+ # additional virtual provider info
+ continue
+
+ if len(matched_cp) > cp_max_len:
+ cp_max_len = len(matched_cp)
+ repo = vardb.aux_get(cpv, ["repository"])[0]
+ if repo == main_repo:
+ repo_suffix = ""
+ elif not repo:
+ repo_suffix = "::<unknown repository>"
+ else:
+ repo_suffix = "::" + repo
+
+ if matched_cp == orig_atom.cp:
+ provide_suffix = ""
+ else:
+ provide_suffix = " (%s)" % (orig_atom,)
+
+ ver_map[ver] = _info_pkgs_ver(ver, repo_suffix, provide_suffix)
+
+ for cp in sorted(cp_map):
+ versions = sorted(cp_map[cp].values())
+ versions = ", ".join(ver.toString() for ver in versions)
+ append("%s %s" % \
+ ((cp + ":").ljust(cp_max_len + 1), versions))
+
+ libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
+
+ repos = portdb.settings.repositories
+ if "--verbose" in myopts:
+ append("Repositories:\n")
+ for repo in repos:
+ append(repo.info_string())
+ else:
+ append("Repositories: %s" % \
+ " ".join(repo.name for repo in repos))
+
+ if _ENABLE_SET_CONFIG:
+ sets_line = "Installed sets: "
+ sets_line += ", ".join(s for s in \
+ sorted(root_config.sets['selected'].getNonAtoms()) \
+ if s.startswith(SETPREFIX))
+ append(sets_line)
+
+ if "--verbose" in myopts:
+ myvars = list(settings)
+ else:
+ myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
+ 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
+ 'PORTDIR_OVERLAY', 'PORTAGE_BUNZIP2_COMMAND',
+ 'PORTAGE_BZIP2_COMMAND',
+ 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
+ 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'SYNC', 'FEATURES',
+ 'EMERGE_DEFAULT_OPTS']
+
+ myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
+
+ myvars_ignore_defaults = {
+ 'PORTAGE_BZIP2_COMMAND' : 'bzip2',
+ }
+
+ myvars = portage.util.unique_array(myvars)
+ use_expand = settings.get('USE_EXPAND', '').split()
+ use_expand.sort()
+ use_expand_hidden = set(
+ settings.get('USE_EXPAND_HIDDEN', '').upper().split())
+ alphabetical_use = '--alphabetical' in myopts
+ unset_vars = []
+ myvars.sort()
+ for k in myvars:
+ v = settings.get(k)
+ if v is not None:
+ if k != "USE":
+ default = myvars_ignore_defaults.get(k)
+ if default is not None and \
+ default == v:
+ continue
+ append('%s="%s"' % (k, v))
+ else:
+ use = set(v.split())
+ for varname in use_expand:
+ flag_prefix = varname.lower() + "_"
+ for f in list(use):
+ if f.startswith(flag_prefix):
+ use.remove(f)
+ use = list(use)
+ use.sort()
+ use = ['USE="%s"' % " ".join(use)]
+ for varname in use_expand:
+ myval = settings.get(varname)
+ if myval:
+ use.append('%s="%s"' % (varname, myval))
+ append(" ".join(use))
+ else:
+ unset_vars.append(k)
+ if unset_vars:
+ append("Unset: "+", ".join(unset_vars))
+ append("")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+
+ # See if we can find any packages installed matching the strings
+ # passed on the command line
+ mypkgs = []
+ vardb = trees[settings["ROOT"]]["vartree"].dbapi
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ bindb = trees[settings["ROOT"]]["bintree"].dbapi
+ for x in myfiles:
+ match_found = False
+ installed_match = vardb.match(x)
+ for installed in installed_match:
+ mypkgs.append((installed, "installed"))
+ match_found = True
+
+ if match_found:
+ continue
+
+ for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
+ if pkg_type == "binary" and "--usepkg" not in myopts:
+ continue
+
+ matches = db.match(x)
+ matches.reverse()
+ for match in matches:
+ if pkg_type == "binary":
+ if db.bintree.isremote(match):
+ continue
+ auxkeys = ["EAPI", "DEFINED_PHASES"]
+ metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
+ if metadata["EAPI"] not in ("0", "1", "2", "3") and \
+ "info" in metadata["DEFINED_PHASES"].split():
+ mypkgs.append((match, pkg_type))
+ break
+
+ # If some packages were found...
+ if mypkgs:
+ # Get our global settings (we only print stuff if it varies from
+ # the current config)
+ mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
+ auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
+ auxkeys.append('DEFINED_PHASES')
+ global_vals = {}
+ pkgsettings = portage.config(clone=settings)
+
+ # Loop through each package
+ # Only print settings if they differ from global settings
+ header_title = "Package Settings"
+ print(header_width * "=")
+ print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ print(header_width * "=")
+ from portage.output import EOutput
+ out = EOutput()
+ for mypkg in mypkgs:
+ cpv = mypkg[0]
+ pkg_type = mypkg[1]
+ # Get all package specific variables
+ if pkg_type == "installed":
+ metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
+ elif pkg_type == "ebuild":
+ metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
+ elif pkg_type == "binary":
+ metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
+
+ pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
+ installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
+ (metadata.get(x, '') for x in Package.metadata_keys)),
+ root_config=root_config, type_name=pkg_type)
+
+ if pkg_type == "installed":
+ print("\n%s was built with the following:" % \
+ colorize("INFORM", str(pkg.cpv)))
+ elif pkg_type == "ebuild":
+ print("\n%s would be build with the following:" % \
+ colorize("INFORM", str(pkg.cpv)))
+ elif pkg_type == "binary":
+ print("\n%s (non-installed binary) was built with the following:" % \
+ colorize("INFORM", str(pkg.cpv)))
+
+ writemsg_stdout('%s\n' % pkg_use_display(pkg, myopts),
+ noiselevel=-1)
+ if pkg_type == "installed":
+ for myvar in mydesiredvars:
+ if metadata[myvar].split() != settings.get(myvar, '').split():
+ print("%s=\"%s\"" % (myvar, metadata[myvar]))
+ print()
+
+ if metadata['DEFINED_PHASES']:
+ if 'info' not in metadata['DEFINED_PHASES'].split():
+ continue
+
+ print(">>> Attempting to run pkg_info() for '%s'" % pkg.cpv)
+
+ if pkg_type == "installed":
+ ebuildpath = vardb.findname(pkg.cpv)
+ elif pkg_type == "ebuild":
+ ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ elif pkg_type == "binary":
+ tbz2_file = bindb.bintree.getname(pkg.cpv)
+ ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
+ ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
+ tmpdir = tempfile.mkdtemp()
+ ebuildpath = os.path.join(tmpdir, ebuild_file_name)
+ file = open(ebuildpath, 'w')
+ file.write(ebuild_file_contents)
+ file.close()
+
+ if not ebuildpath or not os.path.exists(ebuildpath):
+ out.ewarn("No ebuild found for '%s'" % pkg.cpv)
+ continue
+
+ if pkg_type == "installed":
+ portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
+ pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
+ tree="vartree")
+ elif pkg_type == "ebuild":
+ portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
+ pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings["ROOT"]]["porttree"].dbapi,
+ tree="porttree")
+ elif pkg_type == "binary":
+ portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
+ pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings["ROOT"]]["bintree"].dbapi,
+ tree="bintree")
+ shutil.rmtree(tmpdir)
+
+def action_metadata(settings, portdb, myopts, porttrees=None):
+ if porttrees is None:
+ porttrees = portdb.porttrees
+ portage.writemsg_stdout("\n>>> Updating Portage cache\n")
+ old_umask = os.umask(0o002)
+ cachedir = os.path.normpath(settings.depcachedir)
+ if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
+ "/lib", "/opt", "/proc", "/root", "/sbin",
+ "/sys", "/tmp", "/usr", "/var"]:
+ print("!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
+ "ROOT DIRECTORY ON YOUR SYSTEM.", file=sys.stderr)
+ print("!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir, file=sys.stderr)
+ sys.exit(73)
+ if not os.path.exists(cachedir):
+ os.makedirs(cachedir)
+
+ auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
+ auxdbkeys = tuple(auxdbkeys)
+
+ class TreeData(object):
+ __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
+ def __init__(self, dest_db, eclass_db, path, src_db):
+ self.dest_db = dest_db
+ self.eclass_db = eclass_db
+ self.path = path
+ self.src_db = src_db
+ self.valid_nodes = set()
+
+ porttrees_data = []
+ for path in porttrees:
+ src_db = portdb._pregen_auxdb.get(path)
+ if src_db is None and \
+ os.path.isdir(os.path.join(path, 'metadata', 'cache')):
+ src_db = portdb.metadbmodule(
+ path, 'metadata/cache', auxdbkeys, readonly=True)
+ try:
+ src_db.ec = portdb._repo_info[path].eclass_db
+ except AttributeError:
+ pass
+
+ if src_db is not None:
+ porttrees_data.append(TreeData(portdb.auxdb[path],
+ portdb._repo_info[path].eclass_db, path, src_db))
+
+ porttrees = [tree_data.path for tree_data in porttrees_data]
+
+ quiet = settings.get('TERM') == 'dumb' or \
+ '--quiet' in myopts or \
+ not sys.stdout.isatty()
+
+ onProgress = None
+ if not quiet:
+ progressBar = portage.output.TermProgressBar()
+ progressHandler = ProgressHandler()
+ onProgress = progressHandler.onProgress
+ def display():
+ progressBar.set(progressHandler.curval, progressHandler.maxval)
+ progressHandler.display = display
+ def sigwinch_handler(signum, frame):
+ lines, progressBar.term_columns = \
+ portage.output.get_term_size()
+ signal.signal(signal.SIGWINCH, sigwinch_handler)
+
+ # Temporarily override portdb.porttrees so portdb.cp_all()
+ # will only return the relevant subset.
+ portdb_porttrees = portdb.porttrees
+ portdb.porttrees = porttrees
+ try:
+ cp_all = portdb.cp_all()
+ finally:
+ portdb.porttrees = portdb_porttrees
+
+ curval = 0
+ maxval = len(cp_all)
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ from portage.cache.util import quiet_mirroring
+ from portage import eapi_is_supported, \
+ _validate_cache_for_unsupported_eapis
+
+ # TODO: Display error messages, but do not interfere with the progress bar.
+ # Here's how:
+ # 1) erase the progress bar
+ # 2) show the error message
+ # 3) redraw the progress bar on a new line
+ noise = quiet_mirroring()
+
+ for cp in cp_all:
+ for tree_data in porttrees_data:
+ for cpv in portdb.cp_list(cp, mytree=tree_data.path):
+ tree_data.valid_nodes.add(cpv)
+ try:
+ src = tree_data.src_db[cpv]
+ except KeyError as e:
+ noise.missing_entry(cpv)
+ del e
+ continue
+ except CacheError as ce:
+ noise.exception(cpv, ce)
+ del ce
+ continue
+
+ eapi = src.get('EAPI')
+ if not eapi:
+ eapi = '0'
+ eapi = eapi.lstrip('-')
+ eapi_supported = eapi_is_supported(eapi)
+ if not eapi_supported:
+ if not _validate_cache_for_unsupported_eapis:
+ noise.misc(cpv, "unable to validate " + \
+ "cache for EAPI='%s'" % eapi)
+ continue
+
+ dest = None
+ try:
+ dest = tree_data.dest_db[cpv]
+ except (KeyError, CacheError):
+ pass
+
+ for d in (src, dest):
+ if d is not None and d.get('EAPI') in ('', '0'):
+ del d['EAPI']
+
+ if dest is not None:
+ if not (dest['_mtime_'] == src['_mtime_'] and \
+ tree_data.eclass_db.is_eclass_data_valid(
+ dest['_eclasses_']) and \
+ set(dest['_eclasses_']) == set(src['_eclasses_'])):
+ dest = None
+ else:
+ # We don't want to skip the write unless we're really
+ # sure that the existing cache is identical, so don't
+ # trust _mtime_ and _eclasses_ alone.
+ for k in set(chain(src, dest)).difference(
+ ('_mtime_', '_eclasses_')):
+ if dest.get(k, '') != src.get(k, ''):
+ dest = None
+ break
+
+ if dest is not None:
+ # The existing data is valid and identical,
+ # so there's no need to overwrite it.
+ continue
+
+ try:
+ inherited = src.get('INHERITED', '')
+ eclasses = src.get('_eclasses_')
+ except CacheError as ce:
+ noise.exception(cpv, ce)
+ del ce
+ continue
+
+ if eclasses is not None:
+ if not tree_data.eclass_db.is_eclass_data_valid(
+ src['_eclasses_']):
+ noise.eclass_stale(cpv)
+ continue
+ inherited = eclasses
+ else:
+ inherited = inherited.split()
+
+ if tree_data.src_db.complete_eclass_entries and \
+ eclasses is None:
+ noise.corruption(cpv, "missing _eclasses_ field")
+ continue
+
+ if inherited:
+ # Even if _eclasses_ already exists, replace it with data from
+ # eclass_cache, in order to insert local eclass paths.
+ try:
+ eclasses = tree_data.eclass_db.get_eclass_data(inherited)
+ except KeyError:
+ # INHERITED contains a non-existent eclass.
+ noise.eclass_stale(cpv)
+ continue
+
+ if eclasses is None:
+ noise.eclass_stale(cpv)
+ continue
+ src['_eclasses_'] = eclasses
+ else:
+ src['_eclasses_'] = {}
+
+ if not eapi_supported:
+ src = {
+ 'EAPI' : '-' + eapi,
+ '_mtime_' : src['_mtime_'],
+ '_eclasses_' : src['_eclasses_'],
+ }
+
+ try:
+ tree_data.dest_db[cpv] = src
+ except CacheError as ce:
+ noise.exception(cpv, ce)
+ del ce
+
+ curval += 1
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ for tree_data in porttrees_data:
+ try:
+ dead_nodes = set(tree_data.dest_db)
+ except CacheError as e:
+ writemsg_level("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (tree_data.path, e),
+ level=logging.ERROR, noiselevel=-1)
+ del e
+ else:
+ dead_nodes.difference_update(tree_data.valid_nodes)
+ for cpv in dead_nodes:
+ try:
+ del tree_data.dest_db[cpv]
+ except (KeyError, CacheError):
+ pass
+
+ if not quiet:
+ # make sure the final progress is displayed
+ progressHandler.display()
+ print()
+ signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+
+ sys.stdout.flush()
+ os.umask(old_umask)
+
+def action_regen(settings, portdb, max_jobs, max_load):
+ xterm_titles = "notitles" not in settings.features
+ emergelog(xterm_titles, " === regen")
+ #regenerate cache entries
+ try:
+ os.close(sys.stdin.fileno())
+ except SystemExit as e:
+ raise # Needed else can't exit
+ except:
+ pass
+ sys.stdout.flush()
+
+ regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
+ received_signal = []
+
+ def emergeexitsig(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
+ {"signal":signum})
+ regen.terminate()
+ received_signal.append(128 + signum)
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, emergeexitsig)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, emergeexitsig)
+
+ try:
+ regen.run()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ if received_signal:
+ sys.exit(received_signal[0])
+
+ portage.writemsg_stdout("done!\n")
+ return regen.returncode
+
+def action_search(root_config, myopts, myfiles, spinner):
+ if not myfiles:
+ print("emerge: no search terms provided.")
+ else:
+ searchinstance = search(root_config,
+ spinner, "--searchdesc" in myopts,
+ "--quiet" not in myopts, "--usepkg" in myopts,
+ "--usepkgonly" in myopts)
+ for mysearch in myfiles:
+ try:
+ searchinstance.execute(mysearch)
+ except re.error as comment:
+ print("\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment ))
+ sys.exit(1)
+ searchinstance.output()
+
+def action_sync(settings, trees, mtimedb, myopts, myaction):
+ enter_invalid = '--ask-enter-invalid' in myopts
+ xterm_titles = "notitles" not in settings.features
+ emergelog(xterm_titles, " === sync")
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ myportdir = portdb.porttree_root
+ if not myportdir:
+ myportdir = settings.get('PORTDIR', '')
+ if myportdir and myportdir.strip():
+ myportdir = os.path.realpath(myportdir)
+ else:
+ myportdir = None
+ out = portage.output.EOutput()
+ global_config_path = GLOBAL_CONFIG_PATH
+ if settings['EPREFIX']:
+ global_config_path = os.path.join(settings['EPREFIX'],
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ if not myportdir:
+ sys.stderr.write("!!! PORTDIR is undefined. " + \
+ "Is %s/make.globals missing?\n" % global_config_path)
+ sys.exit(1)
+ if myportdir[-1]=="/":
+ myportdir=myportdir[:-1]
+ try:
+ st = os.stat(myportdir)
+ except OSError:
+ st = None
+ if st is None:
+ print(">>>",myportdir,"not found, creating it.")
+ portage.util.ensure_dirs(myportdir, mode=0o755)
+ st = os.stat(myportdir)
+
+ usersync_uid = None
+ spawn_kwargs = {}
+ spawn_kwargs["env"] = settings.environ()
+ if 'usersync' in settings.features and \
+ portage.data.secpass >= 2 and \
+ (st.st_uid != os.getuid() and st.st_mode & 0o700 or \
+ st.st_gid != os.getgid() and st.st_mode & 0o070):
+ try:
+ homedir = pwd.getpwuid(st.st_uid).pw_dir
+ except KeyError:
+ pass
+ else:
+ # Drop privileges when syncing, in order to match
+ # existing uid/gid settings.
+ usersync_uid = st.st_uid
+ spawn_kwargs["uid"] = st.st_uid
+ spawn_kwargs["gid"] = st.st_gid
+ spawn_kwargs["groups"] = [st.st_gid]
+ spawn_kwargs["env"]["HOME"] = homedir
+ umask = 0o002
+ if not st.st_mode & 0o020:
+ umask = umask | 0o020
+ spawn_kwargs["umask"] = umask
+
+ if usersync_uid is not None:
+ # PORTAGE_TMPDIR is used below, so validate it and
+ # bail out if necessary.
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ return rval
+
+ syncuri = settings.get("SYNC", "").strip()
+ if not syncuri:
+ writemsg_level("!!! SYNC is undefined. " + \
+ "Is %s/make.globals missing?\n" % global_config_path,
+ noiselevel=-1, level=logging.ERROR)
+ return 1
+
+ vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
+ vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
+
+ os.umask(0o022)
+ dosyncuri = syncuri
+ updatecache_flg = False
+ if myaction == "metadata":
+ print("skipping sync")
+ updatecache_flg = True
+ elif ".git" in vcs_dirs:
+ # Update existing git repository, and ignore the syncuri. We are
+ # going to trust the user and assume that the user is in the branch
+ # that he/she wants updated. We'll let the user manage branches with
+ # git directly.
+ if portage.process.find_binary("git") is None:
+ msg = ["Command not found: git",
+ "Type \"emerge dev-util/git\" to enable git support."]
+ for l in msg:
+ writemsg_level("!!! %s\n" % l,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ msg = ">>> Starting git pull in %s..." % myportdir
+ emergelog(xterm_titles, msg )
+ writemsg_level(msg + "\n")
+ exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
+ (portage._shell_quote(myportdir),), **spawn_kwargs)
+ if exitcode != os.EX_OK:
+ msg = "!!! git pull error in %s." % myportdir
+ emergelog(xterm_titles, msg)
+ writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
+ return exitcode
+ msg = ">>> Git pull in %s successful" % myportdir
+ emergelog(xterm_titles, msg)
+ writemsg_level(msg + "\n")
+ exitcode = git_sync_timestamps(settings, myportdir)
+ if exitcode == os.EX_OK:
+ updatecache_flg = True
+ elif syncuri[:8]=="rsync://" or syncuri[:6]=="ssh://":
+ for vcs_dir in vcs_dirs:
+ writemsg_level(("!!! %s appears to be under revision " + \
+ "control (contains %s).\n!!! Aborting rsync sync.\n") % \
+ (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
+ return 1
+ if not os.path.exists("/usr/bin/rsync"):
+ print("!!! /usr/bin/rsync does not exist, so rsync support is disabled.")
+ print("!!! Type \"emerge net-misc/rsync\" to enable rsync support.")
+ sys.exit(1)
+ mytimeout=180
+
+ rsync_opts = []
+ if settings["PORTAGE_RSYNC_OPTS"] == "":
+ portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
+ rsync_opts.extend([
+ "--recursive", # Recurse directories
+ "--links", # Consider symlinks
+ "--safe-links", # Ignore links outside of tree
+ "--perms", # Preserve permissions
+ "--times", # Preserive mod times
+ "--compress", # Compress the data transmitted
+ "--force", # Force deletion on non-empty dirs
+ "--whole-file", # Don't do block transfers, only entire files
+ "--delete", # Delete files that aren't in the master tree
+ "--stats", # Show final statistics about what was transfered
+ "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
+ "--exclude=/distfiles", # Exclude distfiles from consideration
+ "--exclude=/local", # Exclude local from consideration
+ "--exclude=/packages", # Exclude packages from consideration
+ ])
+
+ else:
+ # The below validation is not needed when using the above hardcoded
+ # defaults.
+
+ portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
+ rsync_opts.extend(portage.util.shlex_split(
+ settings.get("PORTAGE_RSYNC_OPTS", "")))
+ for opt in ("--recursive", "--times"):
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + " adding required option " + \
+ "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
+ rsync_opts.append(opt)
+
+ for exclude in ("distfiles", "local", "packages"):
+ opt = "--exclude=/%s" % exclude
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + \
+ " adding required option %s not included in " % opt + \
+ "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
+ rsync_opts.append(opt)
+
+ if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
+ def rsync_opt_startswith(opt_prefix):
+ for x in rsync_opts:
+ if x.startswith(opt_prefix):
+ return True
+ return False
+
+ if not rsync_opt_startswith("--timeout="):
+ rsync_opts.append("--timeout=%d" % mytimeout)
+
+ for opt in ("--compress", "--whole-file"):
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + " adding required option " + \
+ "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
+ rsync_opts.append(opt)
+
+ if "--quiet" in myopts:
+ rsync_opts.append("--quiet") # Shut up a lot
+ else:
+ rsync_opts.append("--verbose") # Print filelist
+
+ if "--verbose" in myopts:
+ rsync_opts.append("--progress") # Progress meter for each file
+
+ if "--debug" in myopts:
+ rsync_opts.append("--checksum") # Force checksum on all files
+
+ # Real local timestamp file.
+ servertimestampfile = os.path.join(
+ myportdir, "metadata", "timestamp.chk")
+
+ content = portage.util.grabfile(servertimestampfile)
+ mytimestamp = 0
+ if content:
+ try:
+ mytimestamp = time.mktime(time.strptime(content[0],
+ "%a, %d %b %Y %H:%M:%S +0000"))
+ except (OverflowError, ValueError):
+ pass
+ del content
+
+ try:
+ rsync_initial_timeout = \
+ int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
+ except ValueError:
+ rsync_initial_timeout = 15
+
+ try:
+ maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
+ except SystemExit as e:
+ raise # Needed else can't exit
+ except:
+ maxretries = -1 #default number of retries
+
+ retries=0
+ try:
+ proto, user_name, hostname, port = re.split(
+ r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
+ syncuri, maxsplit=4)[1:5]
+ except ValueError:
+ writemsg_level("!!! SYNC is invalid: %s\n" % syncuri,
+ noiselevel=-1, level=logging.ERROR)
+ return 1
+ if port is None:
+ port=""
+ if user_name is None:
+ user_name=""
+ if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
+ getaddrinfo_host = hostname
+ else:
+ # getaddrinfo needs the brackets stripped
+ getaddrinfo_host = hostname[1:-1]
+ updatecache_flg=True
+ all_rsync_opts = set(rsync_opts)
+ extra_rsync_opts = portage.util.shlex_split(
+ settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
+ all_rsync_opts.update(extra_rsync_opts)
+
+ family = socket.AF_UNSPEC
+ if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
+ family = socket.AF_INET
+ elif socket.has_ipv6 and \
+ ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
+ family = socket.AF_INET6
+
+ addrinfos = None
+ uris = []
+
+ try:
+ addrinfos = getaddrinfo_validate(
+ socket.getaddrinfo(getaddrinfo_host, None,
+ family, socket.SOCK_STREAM))
+ except socket.error as e:
+ writemsg_level(
+ "!!! getaddrinfo failed for '%s': %s\n" % (hostname, e),
+ noiselevel=-1, level=logging.ERROR)
+
+ if addrinfos:
+
+ AF_INET = socket.AF_INET
+ AF_INET6 = None
+ if socket.has_ipv6:
+ AF_INET6 = socket.AF_INET6
+
+ ips_v4 = []
+ ips_v6 = []
+
+ for addrinfo in addrinfos:
+ if addrinfo[0] == AF_INET:
+ ips_v4.append("%s" % addrinfo[4][0])
+ elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
+ # IPv6 addresses need to be enclosed in square brackets
+ ips_v6.append("[%s]" % addrinfo[4][0])
+
+ random.shuffle(ips_v4)
+ random.shuffle(ips_v6)
+
+ # Give priority to the address family that
+ # getaddrinfo() returned first.
+ if AF_INET6 is not None and addrinfos and \
+ addrinfos[0][0] == AF_INET6:
+ ips = ips_v6 + ips_v4
+ else:
+ ips = ips_v4 + ips_v6
+
+ for ip in ips:
+ uris.append(syncuri.replace(
+ "//" + user_name + hostname + port + "/",
+ "//" + user_name + ip + port + "/", 1))
+
+ if not uris:
+ # With some configurations we need to use the plain hostname
+ # rather than try to resolve the ip addresses (bug #340817).
+ uris.append(syncuri)
+
+ # reverse, for use with pop()
+ uris.reverse()
+
+ effective_maxretries = maxretries
+ if effective_maxretries < 0:
+ effective_maxretries = len(uris) - 1
+
+ SERVER_OUT_OF_DATE = -1
+ EXCEEDED_MAX_RETRIES = -2
+ while (1):
+ if uris:
+ dosyncuri = uris.pop()
+ else:
+ writemsg("!!! Exhausted addresses for %s\n" % \
+ hostname, noiselevel=-1)
+ return 1
+
+ if (retries==0):
+ if "--ask" in myopts:
+ if userquery("Do you want to sync your Portage tree " + \
+ "with the mirror at\n" + blue(dosyncuri) + bold("?"),
+ enter_invalid) == "No":
+ print()
+ print("Quitting.")
+ print()
+ sys.exit(0)
+ emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
+ if "--quiet" not in myopts:
+ print(">>> Starting rsync with "+dosyncuri+"...")
+ else:
+ emergelog(xterm_titles,
+ ">>> Starting retry %d of %d with %s" % \
+ (retries, effective_maxretries, dosyncuri))
+ writemsg_stdout(
+ "\n\n>>> Starting retry %d of %d with %s\n" % \
+ (retries, effective_maxretries, dosyncuri), noiselevel=-1)
+
+ if dosyncuri.startswith('ssh://'):
+ dosyncuri = dosyncuri[6:].replace('/', ':/', 1)
+
+ if mytimestamp != 0 and "--quiet" not in myopts:
+ print(">>> Checking server timestamp ...")
+
+ rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
+
+ if "--debug" in myopts:
+ print(rsynccommand)
+
+ exitcode = os.EX_OK
+ servertimestamp = 0
+ # Even if there's no timestamp available locally, fetch the
+ # timestamp anyway as an initial probe to verify that the server is
+ # responsive. This protects us from hanging indefinitely on a
+ # connection attempt to an unresponsive server which rsync's
+ # --timeout option does not prevent.
+ if True:
+ # Temporary file for remote server timestamp comparison.
+ # NOTE: If FEATURES=usersync is enabled then the tempfile
+ # needs to be in a directory that's readable by the usersync
+ # user. We assume that PORTAGE_TMPDIR will satisfy this
+ # requirement, since that's not necessarily true for the
+ # default directory used by the tempfile module.
+ if usersync_uid is not None:
+ tmpdir = settings['PORTAGE_TMPDIR']
+ else:
+ # use default dir from tempfile module
+ tmpdir = None
+ fd, tmpservertimestampfile = \
+ tempfile.mkstemp(dir=tmpdir)
+ os.close(fd)
+ if usersync_uid is not None:
+ portage.util.apply_permissions(tmpservertimestampfile,
+ uid=usersync_uid)
+ mycommand = rsynccommand[:]
+ mycommand.append(dosyncuri.rstrip("/") + \
+ "/metadata/timestamp.chk")
+ mycommand.append(tmpservertimestampfile)
+ content = None
+ mypids = []
+ try:
+ # Timeout here in case the server is unresponsive. The
+ # --timeout rsync option doesn't apply to the initial
+ # connection attempt.
+ try:
+ if rsync_initial_timeout:
+ portage.exception.AlarmSignal.register(
+ rsync_initial_timeout)
+
+ mypids.extend(portage.process.spawn(
+ mycommand, returnpid=True, **spawn_kwargs))
+ exitcode = os.waitpid(mypids[0], 0)[1]
+ if usersync_uid is not None:
+ portage.util.apply_permissions(tmpservertimestampfile,
+ uid=os.getuid())
+ content = portage.grabfile(tmpservertimestampfile)
+ finally:
+ if rsync_initial_timeout:
+ portage.exception.AlarmSignal.unregister()
+ try:
+ os.unlink(tmpservertimestampfile)
+ except OSError:
+ pass
+ except portage.exception.AlarmSignal:
+ # timed out
+ print('timed out')
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ if mypids and os.waitpid(mypids[0], os.WNOHANG)[0] == 0:
+ os.kill(mypids[0], signal.SIGTERM)
+ os.waitpid(mypids[0], 0)
+ # This is the same code rsync uses for timeout.
+ exitcode = 30
+ else:
+ if exitcode != os.EX_OK:
+ if exitcode & 0xff:
+ exitcode = (exitcode & 0xff) << 8
+ else:
+ exitcode = exitcode >> 8
+ if mypids:
+ portage.process.spawned_pids.remove(mypids[0])
+ if content:
+ try:
+ servertimestamp = time.mktime(time.strptime(
+ content[0], "%a, %d %b %Y %H:%M:%S +0000"))
+ except (OverflowError, ValueError):
+ pass
+ del mycommand, mypids, content
+ if exitcode == os.EX_OK:
+ if (servertimestamp != 0) and (servertimestamp == mytimestamp):
+ emergelog(xterm_titles,
+ ">>> Cancelling sync -- Already current.")
+ print()
+ print(">>>")
+ print(">>> Timestamps on the server and in the local repository are the same.")
+ print(">>> Cancelling all further sync action. You are already up to date.")
+ print(">>>")
+ print(">>> In order to force sync, remove '%s'." % servertimestampfile)
+ print(">>>")
+ print()
+ sys.exit(0)
+ elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
+ emergelog(xterm_titles,
+ ">>> Server out of date: %s" % dosyncuri)
+ print()
+ print(">>>")
+ print(">>> SERVER OUT OF DATE: %s" % dosyncuri)
+ print(">>>")
+ print(">>> In order to force sync, remove '%s'." % servertimestampfile)
+ print(">>>")
+ print()
+ exitcode = SERVER_OUT_OF_DATE
+ elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
+ # actual sync
+ mycommand = rsynccommand + [dosyncuri+"/", myportdir]
+ exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
+ if exitcode in [0,1,3,4,11,14,20,21]:
+ break
+ elif exitcode in [1,3,4,11,14,20,21]:
+ break
+ else:
+ # Code 2 indicates protocol incompatibility, which is expected
+ # for servers with protocol < 29 that don't support
+ # --prune-empty-directories. Retry for a server that supports
+ # at least rsync protocol version 29 (>=rsync-2.6.4).
+ pass
+
+ retries=retries+1
+
+ if maxretries < 0 or retries <= maxretries:
+ print(">>> Retrying...")
+ else:
+ # over retries
+ # exit loop
+ updatecache_flg=False
+ exitcode = EXCEEDED_MAX_RETRIES
+ break
+
+ if (exitcode==0):
+ emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
+ elif exitcode == SERVER_OUT_OF_DATE:
+ sys.exit(1)
+ elif exitcode == EXCEEDED_MAX_RETRIES:
+ sys.stderr.write(
+ ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
+ sys.exit(1)
+ elif (exitcode>0):
+ msg = []
+ if exitcode==1:
+ msg.append("Rsync has reported that there is a syntax error. Please ensure")
+ msg.append("that your SYNC statement is proper.")
+ msg.append("SYNC=" + settings["SYNC"])
+ elif exitcode==11:
+ msg.append("Rsync has reported that there is a File IO error. Normally")
+ msg.append("this means your disk is full, but can be caused by corruption")
+ msg.append("on the filesystem that contains PORTDIR. Please investigate")
+ msg.append("and try again after the problem has been fixed.")
+ msg.append("PORTDIR=" + settings["PORTDIR"])
+ elif exitcode==20:
+ msg.append("Rsync was killed before it finished.")
+ else:
+ msg.append("Rsync has not successfully finished. It is recommended that you keep")
+ msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
+ msg.append("to use rsync due to firewall or other restrictions. This should be a")
+ msg.append("temporary problem unless complications exist with your network")
+ msg.append("(and possibly your system's filesystem) configuration.")
+ for line in msg:
+ out.eerror(line)
+ sys.exit(exitcode)
+ elif syncuri[:6]=="cvs://":
+ if not os.path.exists("/usr/bin/cvs"):
+ print("!!! /usr/bin/cvs does not exist, so CVS support is disabled.")
+ print("!!! Type \"emerge dev-vcs/cvs\" to enable CVS support.")
+ sys.exit(1)
+ cvsroot=syncuri[6:]
+ cvsdir=os.path.dirname(myportdir)
+ if not os.path.exists(myportdir+"/CVS"):
+ #initial checkout
+ print(">>> Starting initial cvs checkout with "+syncuri+"...")
+ if os.path.exists(cvsdir+"/gentoo-x86"):
+ print("!!! existing",cvsdir+"/gentoo-x86 directory; exiting.")
+ sys.exit(1)
+ try:
+ os.rmdir(myportdir)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ sys.stderr.write(
+ "!!! existing '%s' directory; exiting.\n" % myportdir)
+ sys.exit(1)
+ del e
+ if portage.process.spawn_bash(
+ "cd %s; exec cvs -z0 -d %s co -P gentoo-x86" % \
+ (portage._shell_quote(cvsdir), portage._shell_quote(cvsroot)),
+ **spawn_kwargs) != os.EX_OK:
+ print("!!! cvs checkout error; exiting.")
+ sys.exit(1)
+ os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
+ else:
+ #cvs update
+ print(">>> Starting cvs update with "+syncuri+"...")
+ retval = portage.process.spawn_bash(
+ "cd %s; exec cvs -z0 -q update -dP" % \
+ (portage._shell_quote(myportdir),), **spawn_kwargs)
+ if retval != os.EX_OK:
+ writemsg_level("!!! cvs update error; exiting.\n",
+ noiselevel=-1, level=logging.ERROR)
+ sys.exit(retval)
+ dosyncuri = syncuri
+ else:
+ writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
+ noiselevel=-1, level=logging.ERROR)
+ return 1
+
+ if updatecache_flg and \
+ myaction != "metadata" and \
+ "metadata-transfer" not in settings.features:
+ updatecache_flg = False
+
+ # Reload the whole config from scratch.
+ settings, trees, mtimedb = load_emerge_config(trees=trees)
+ adjust_configs(myopts, trees)
+ root_config = trees[settings["ROOT"]]["root_config"]
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+
+ if updatecache_flg and \
+ os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
+
+ # Only update cache for myportdir since that's
+ # the only one that's been synced here.
+ action_metadata(settings, portdb, myopts, porttrees=[myportdir])
+
+ if myopts.get('--package-moves') != 'n' and \
+ _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
+ mtimedb.commit()
+ # Reload the whole config from scratch.
+ settings, trees, mtimedb = load_emerge_config(trees=trees)
+ adjust_configs(myopts, trees)
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ root_config = trees[settings["ROOT"]]["root_config"]
+
+ mybestpv = portdb.xmatch("bestmatch-visible",
+ portage.const.PORTAGE_PACKAGE_ATOM)
+ mypvs = portage.best(
+ trees[settings["ROOT"]]["vartree"].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM))
+
+ chk_updated_cfg_files(settings["EROOT"],
+ portage.util.shlex_split(settings.get("CONFIG_PROTECT", "")))
+
+ if myaction != "metadata":
+ postsync = os.path.join(settings["PORTAGE_CONFIGROOT"],
+ portage.USER_CONFIG_PATH, "bin", "post_sync")
+ if os.access(postsync, os.X_OK):
+ retval = portage.process.spawn(
+ [postsync, dosyncuri], env=settings.environ())
+ if retval != os.EX_OK:
+ writemsg_level(
+ " %s spawn failed of %s\n" % (bad("*"), postsync,),
+ level=logging.ERROR, noiselevel=-1)
+
+ if(mybestpv != mypvs) and not "--quiet" in myopts:
+ print()
+ print(red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
+ print(red(" * ")+"that you update portage now, before any other packages are updated.")
+ print()
+ print(red(" * ")+"To update portage, run 'emerge portage' now.")
+ print()
+
+ display_news_notification(root_config, myopts)
+ return os.EX_OK
+
+def action_uninstall(settings, trees, ldpath_mtimes,
+ opts, action, files, spinner):
+ # For backward compat, some actions do not require leading '='.
+ ignore_missing_eq = action in ('clean', 'unmerge')
+ root = settings['ROOT']
+ vardb = trees[root]['vartree'].dbapi
+ valid_atoms = []
+ lookup_owners = []
+
+ # Ensure atoms are valid before calling unmerge().
+ # For backward compat, leading '=' is not required.
+ for x in files:
+ if is_valid_package_atom(x, allow_repo=True) or \
+ (ignore_missing_eq and is_valid_package_atom('=' + x)):
+
+ try:
+ atom = dep_expand(x, mydb=vardb, settings=settings)
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ else:
+ if atom.use and atom.use.conditional:
+ writemsg_level(
+ ("\n\n!!! '%s' contains a conditional " + \
+ "which is not allowed.\n") % (x,),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level(
+ "!!! Please check ebuild(5) for full details.\n",
+ level=logging.ERROR)
+ return 1
+ valid_atoms.append(atom)
+
+ elif x.startswith(os.sep):
+ if not x.startswith(root):
+ writemsg_level(("!!! '%s' does not start with" + \
+ " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
+ return 1
+ # Queue these up since it's most efficient to handle
+ # multiple files in a single iter_owners() call.
+ lookup_owners.append(x)
+
+ elif x.startswith(SETPREFIX) and action == "deselect":
+ valid_atoms.append(x)
+
+ elif "*" in x:
+ try:
+ ext_atom = Atom(x, allow_repo=True, allow_wildcard=True)
+ except InvalidAtom:
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ for cp in vardb.cp_all():
+ if extended_cp_match(ext_atom.cp, cp):
+ atom = cp
+ if ext_atom.slot:
+ atom += ":" + ext_atom.slot
+ if ext_atom.repo:
+ atom += "::" + ext_atom.repo
+
+ if vardb.match(atom):
+ valid_atoms.append(Atom(atom, allow_repo=True))
+
+ else:
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if lookup_owners:
+ relative_paths = []
+ search_for_multiple = False
+ if len(lookup_owners) > 1:
+ search_for_multiple = True
+
+ for x in lookup_owners:
+ if not search_for_multiple and os.path.isdir(x):
+ search_for_multiple = True
+ relative_paths.append(x[len(root)-1:])
+
+ owners = set()
+ for pkg, relative_path in \
+ vardb._owners.iter_owners(relative_paths):
+ owners.add(pkg.mycpv)
+ if not search_for_multiple:
+ break
+
+ if owners:
+ for cpv in owners:
+ slot = vardb.aux_get(cpv, ['SLOT'])[0]
+ if not slot:
+ # portage now masks packages with missing slot, but it's
+ # possible that one was installed by an older version
+ atom = portage.cpv_getkey(cpv)
+ else:
+ atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
+ valid_atoms.append(portage.dep.Atom(atom))
+ else:
+ writemsg_level(("!!! '%s' is not claimed " + \
+ "by any package.\n") % lookup_owners[0],
+ level=logging.WARNING, noiselevel=-1)
+
+ if files and not valid_atoms:
+ return 1
+
+ if action == 'unmerge' and \
+ '--quiet' not in opts and \
+ '--quiet-unmerge-warn' not in opts:
+ msg = "This action can remove important packages! " + \
+ "In order to be safer, use " + \
+ "`emerge -pv --depclean <atom>` to check for " + \
+ "reverse dependencies before removing packages."
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+ if action == 'deselect':
+ return action_deselect(settings, trees, opts, valid_atoms)
+
+ # Create a Scheduler for calls to unmerge(), in order to cause
+ # redirection of ebuild phase output to logs as required for
+ # options such as --quiet.
+ sched = Scheduler(settings, trees, None, opts,
+ spinner)
+ sched._background = sched._background_mode()
+ sched._status_display.quiet = True
+
+ if sched._background:
+ sched.settings.unlock()
+ sched.settings["PORTAGE_BACKGROUND"] = "1"
+ sched.settings.backup_changes("PORTAGE_BACKGROUND")
+ sched.settings.lock()
+ sched.pkgsettings[root] = portage.config(clone=sched.settings)
+
+ if action in ('clean', 'unmerge') or \
+ (action == 'prune' and "--nodeps" in opts):
+ # When given a list of atoms, unmerge them in the order given.
+ ordered = action == 'unmerge'
+ unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
+ valid_atoms, ldpath_mtimes, ordered=ordered,
+ scheduler=sched._sched_iface)
+ rval = os.EX_OK
+ else:
+ rval = action_depclean(settings, trees, ldpath_mtimes,
+ opts, action, valid_atoms, spinner, scheduler=sched._sched_iface)
+
+ return rval
+
+def adjust_configs(myopts, trees):
+ for myroot in trees:
+ mysettings = trees[myroot]["vartree"].settings
+ mysettings.unlock()
+ adjust_config(myopts, mysettings)
+ mysettings.lock()
+
+def adjust_config(myopts, settings):
+ """Make emerge specific adjustments to the config."""
+
+ # Kill noauto as it will break merges otherwise.
+ if "noauto" in settings.features:
+ settings.features.remove('noauto')
+
+ fail_clean = myopts.get('--fail-clean')
+ if fail_clean is not None:
+ if fail_clean is True and \
+ 'fail-clean' not in settings.features:
+ settings.features.add('fail-clean')
+ elif fail_clean == 'n' and \
+ 'fail-clean' in settings.features:
+ settings.features.remove('fail-clean')
+
+ CLEAN_DELAY = 5
+ try:
+ CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
+ settings["CLEAN_DELAY"], noiselevel=-1)
+ settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
+ settings.backup_changes("CLEAN_DELAY")
+
+ EMERGE_WARNING_DELAY = 10
+ try:
+ EMERGE_WARNING_DELAY = int(settings.get(
+ "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
+ settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
+ settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
+ settings.backup_changes("EMERGE_WARNING_DELAY")
+
+ if "--quiet" in myopts or "--quiet-build" in myopts:
+ settings["PORTAGE_QUIET"]="1"
+ settings.backup_changes("PORTAGE_QUIET")
+
+ if "--verbose" in myopts:
+ settings["PORTAGE_VERBOSE"] = "1"
+ settings.backup_changes("PORTAGE_VERBOSE")
+
+ # Set so that configs will be merged regardless of remembered status
+ if ("--noconfmem" in myopts):
+ settings["NOCONFMEM"]="1"
+ settings.backup_changes("NOCONFMEM")
+
+ # Set various debug markers... They should be merged somehow.
+ PORTAGE_DEBUG = 0
+ try:
+ PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
+ if PORTAGE_DEBUG not in (0, 1):
+ portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
+ PORTAGE_DEBUG, noiselevel=-1)
+ portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
+ noiselevel=-1)
+ PORTAGE_DEBUG = 0
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
+ settings["PORTAGE_DEBUG"], noiselevel=-1)
+ del e
+ if "--debug" in myopts:
+ PORTAGE_DEBUG = 1
+ settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
+ settings.backup_changes("PORTAGE_DEBUG")
+
+ if settings.get("NOCOLOR") not in ("yes","true"):
+ portage.output.havecolor = 1
+
+ """The explicit --color < y | n > option overrides the NOCOLOR environment
+ variable and stdout auto-detection."""
+ if "--color" in myopts:
+ if "y" == myopts["--color"]:
+ portage.output.havecolor = 1
+ settings["NOCOLOR"] = "false"
+ else:
+ portage.output.havecolor = 0
+ settings["NOCOLOR"] = "true"
+ settings.backup_changes("NOCOLOR")
+ elif settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ portage.output.havecolor = 0
+ settings["NOCOLOR"] = "true"
+ settings.backup_changes("NOCOLOR")
+
+def display_missing_pkg_set(root_config, set_name):
+
+ msg = []
+ msg.append(("emerge: There are no sets to satisfy '%s'. " + \
+ "The following sets exist:") % \
+ colorize("INFORM", set_name))
+ msg.append("")
+
+ for s in sorted(root_config.sets):
+ msg.append(" %s" % s)
+ msg.append("")
+
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.ERROR, noiselevel=-1)
+
+def relative_profile_path(portdir, abs_profile):
+ realpath = os.path.realpath(abs_profile)
+ basepath = os.path.realpath(os.path.join(portdir, "profiles"))
+ if realpath.startswith(basepath):
+ profilever = realpath[1 + len(basepath):]
+ else:
+ profilever = None
+ return profilever
+
+def getportageversion(portdir, target_root, profile, chost, vardb):
+ profilever = None
+ if profile:
+ profilever = relative_profile_path(portdir, profile)
+ if profilever is None:
+ try:
+ for parent in portage.grabfile(
+ os.path.join(profile, 'parent')):
+ profilever = relative_profile_path(portdir,
+ os.path.join(profile, parent))
+ if profilever is not None:
+ break
+ except portage.exception.PortageException:
+ pass
+
+ if profilever is None:
+ try:
+ profilever = "!" + os.readlink(profile)
+ except (OSError):
+ pass
+
+ if profilever is None:
+ profilever = "unavailable"
+
+ libcver = []
+ libclist = set()
+ for atom in expand_new_virt(vardb, portage.const.LIBC_PACKAGE_ATOM):
+ if not atom.blocker:
+ libclist.update(vardb.match(atom))
+ if libclist:
+ for cpv in sorted(libclist):
+ libc_split = portage.catpkgsplit(cpv)[1:]
+ if libc_split[-1] == "r0":
+ libc_split[:-1]
+ libcver.append("-".join(libc_split))
+ else:
+ libcver = ["unavailable"]
+
+ gccver = getgccversion(chost)
+ unameout=platform.release()+" "+platform.machine()
+
+ return "Portage %s (%s, %s, %s, %s)" % \
+ (portage.VERSION, profilever, gccver, ",".join(libcver), unameout)
+
+def git_sync_timestamps(settings, portdir):
+ """
+ Since git doesn't preserve timestamps, synchronize timestamps between
+ entries and ebuilds/eclasses. Assume the cache has the correct timestamp
+ for a given file as long as the file in the working tree is not modified
+ (relative to HEAD).
+ """
+ cache_dir = os.path.join(portdir, "metadata", "cache")
+ if not os.path.isdir(cache_dir):
+ return os.EX_OK
+ writemsg_level(">>> Synchronizing timestamps...\n")
+
+ from portage.cache.cache_errors import CacheError
+ try:
+ cache_db = settings.load_best_module("portdbapi.metadbmodule")(
+ portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
+ except CacheError as e:
+ writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ ec_dir = os.path.join(portdir, "eclass")
+ try:
+ ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
+ if f.endswith(".eclass"))
+ except OSError as e:
+ writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ args = [portage.const.BASH_BINARY, "-c",
+ "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
+ portage._shell_quote(portdir)]
+ import subprocess
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ modified_files = set(_unicode_decode(l).rstrip("\n") for l in proc.stdout)
+ rval = proc.wait()
+ if rval != os.EX_OK:
+ return rval
+
+ modified_eclasses = set(ec for ec in ec_names \
+ if os.path.join("eclass", ec + ".eclass") in modified_files)
+
+ updated_ec_mtimes = {}
+
+ for cpv in cache_db:
+ cpv_split = portage.catpkgsplit(cpv)
+ if cpv_split is None:
+ writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ cat, pn, ver, rev = cpv_split
+ cat, pf = portage.catsplit(cpv)
+ relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
+ if relative_eb_path in modified_files:
+ continue
+
+ try:
+ cache_entry = cache_db[cpv]
+ eb_mtime = cache_entry.get("_mtime_")
+ ec_mtimes = cache_entry.get("_eclasses_")
+ except KeyError:
+ writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ except CacheError as e:
+ writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
+ (cpv, e), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if eb_mtime is None:
+ writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ try:
+ eb_mtime = long(eb_mtime)
+ except ValueError:
+ writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
+ (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if ec_mtimes is None:
+ writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if modified_eclasses.intersection(ec_mtimes):
+ continue
+
+ missing_eclasses = set(ec_mtimes).difference(ec_names)
+ if missing_eclasses:
+ writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
+ (cpv, sorted(missing_eclasses)), level=logging.ERROR,
+ noiselevel=-1)
+ continue
+
+ eb_path = os.path.join(portdir, relative_eb_path)
+ try:
+ current_eb_mtime = os.stat(eb_path)
+ except OSError:
+ writemsg_level("!!! Missing ebuild: %s\n" % \
+ (cpv,), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ inconsistent = False
+ for ec, (ec_path, ec_mtime) in ec_mtimes.items():
+ updated_mtime = updated_ec_mtimes.get(ec)
+ if updated_mtime is not None and updated_mtime != ec_mtime:
+ writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
+ (cpv, ec), level=logging.ERROR, noiselevel=-1)
+ inconsistent = True
+ break
+
+ if inconsistent:
+ continue
+
+ if current_eb_mtime != eb_mtime:
+ os.utime(eb_path, (eb_mtime, eb_mtime))
+
+ for ec, (ec_path, ec_mtime) in ec_mtimes.items():
+ if ec in updated_ec_mtimes:
+ continue
+ ec_path = os.path.join(ec_dir, ec + ".eclass")
+ current_mtime = os.stat(ec_path)[stat.ST_MTIME]
+ if current_mtime != ec_mtime:
+ os.utime(ec_path, (ec_mtime, ec_mtime))
+ updated_ec_mtimes[ec] = ec_mtime
+
+ return os.EX_OK
+
+def load_emerge_config(trees=None):
+ kwargs = {}
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+ v = os.environ.get(envvar, None)
+ if v and v.strip():
+ kwargs[k] = v
+ trees = portage.create_trees(trees=trees, **kwargs)
+
+ for root, root_trees in trees.items():
+ settings = root_trees["vartree"].settings
+ settings._init_dirs()
+ setconfig = load_default_config(settings, root_trees)
+ root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
+
+ settings = trees["/"]["vartree"].settings
+
+ for myroot in trees:
+ if myroot != "/":
+ settings = trees[myroot]["vartree"].settings
+ break
+
+ mtimedbfile = os.path.join(settings['EROOT'], portage.CACHE_PATH, "mtimedb")
+ mtimedb = portage.MtimeDB(mtimedbfile)
+ portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
+ QueryCommand._db = trees
+ return settings, trees, mtimedb
+
+def chk_updated_cfg_files(eroot, config_protect):
+ target_root = eroot
+ result = list(
+ portage.util.find_updated_config_files(target_root, config_protect))
+
+ for x in result:
+ writemsg_level("\n %s " % (colorize("WARN", "* IMPORTANT:"),),
+ level=logging.INFO, noiselevel=-1)
+ if not x[1]: # it's a protected file
+ writemsg_level("config file '%s' needs updating.\n" % x[0],
+ level=logging.INFO, noiselevel=-1)
+ else: # it's a protected dir
+ if len(x[1]) == 1:
+ head, tail = os.path.split(x[1][0])
+ tail = tail[len("._cfg0000_"):]
+ fpath = os.path.join(head, tail)
+ writemsg_level("config file '%s' needs updating.\n" % fpath,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ writemsg_level("%d config files in '%s' need updating.\n" % \
+ (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
+
+ if result:
+ print(" "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")\
+ + " section of the " + bold("emerge"))
+ print(" "+yellow("*")+" man page to learn how to update config files.")
+
+def display_news_notification(root_config, myopts):
+ target_root = root_config.settings['EROOT']
+ trees = root_config.trees
+ settings = trees["vartree"].settings
+ portdb = trees["porttree"].dbapi
+ vardb = trees["vartree"].dbapi
+ NEWS_PATH = os.path.join("metadata", "news")
+ UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
+ newsReaderDisplay = False
+ update = "--pretend" not in myopts
+ if "news" not in settings.features:
+ return
+
+ for repo in portdb.getRepositories():
+ unreadItems = checkUpdatedNewsItems(
+ portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
+ if unreadItems:
+ if not newsReaderDisplay:
+ newsReaderDisplay = True
+ print()
+ print(colorize("WARN", " * IMPORTANT:"), end=' ')
+ print("%s news items need reading for repository '%s'." % (unreadItems, repo))
+
+
+ if newsReaderDisplay:
+ print(colorize("WARN", " *"), end=' ')
+ print("Use " + colorize("GOOD", "eselect news") + " to read news items.")
+ print()
+
+def getgccversion(chost):
+ """
+ rtype: C{str}
+ return: the current in-use gcc version
+ """
+
+ gcc_ver_command = 'gcc -dumpversion'
+ gcc_ver_prefix = 'gcc-'
+
+ gcc_not_found_error = red(
+ "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
+ "!!! to update the environment of this terminal and possibly\n" +
+ "!!! other terminals also.\n"
+ )
+
+ mystatus, myoutput = subprocess_getstatusoutput("gcc-config -c")
+ if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
+ return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
+
+ mystatus, myoutput = subprocess_getstatusoutput(
+ chost + "-" + gcc_ver_command)
+ if mystatus == os.EX_OK:
+ return gcc_ver_prefix + myoutput
+
+ mystatus, myoutput = subprocess_getstatusoutput(gcc_ver_command)
+ if mystatus == os.EX_OK:
+ return gcc_ver_prefix + myoutput
+
+ portage.writemsg(gcc_not_found_error, noiselevel=-1)
+ return "[unavailable]"
+
+def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
+ update=False):
+ """
+ Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
+ Returns the number of unread (yet relevent) items.
+
+ @param portdb: a portage tree database
+ @type portdb: pordbapi
+ @param vardb: an installed package database
+ @type vardb: vardbapi
+ @param NEWS_PATH:
+ @type NEWS_PATH:
+ @param UNREAD_PATH:
+ @type UNREAD_PATH:
+ @param repo_id:
+ @type repo_id:
+ @rtype: Integer
+ @returns:
+ 1. The number of unread but relevant news items.
+
+ """
+ from portage.news import NewsManager
+ manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
+ return manager.getUnreadItems( repo_id, update=update )
+
diff --git a/portage_with_autodep/pym/_emerge/clear_caches.py b/portage_with_autodep/pym/_emerge/clear_caches.py
new file mode 100644
index 0000000..7b7c5ec
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/clear_caches.py
@@ -0,0 +1,19 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gc
+from portage.util.listdir import dircache
+
+def clear_caches(trees):
+ for d in trees.values():
+ d["porttree"].dbapi.melt()
+ d["porttree"].dbapi._aux_cache.clear()
+ d["bintree"].dbapi._aux_cache.clear()
+ d["bintree"].dbapi._clear_cache()
+ if d["vartree"].dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ d["vartree"].dbapi._linkmap._clear_cache()
+ dircache.clear()
+ gc.collect()
diff --git a/portage_with_autodep/pym/_emerge/countdown.py b/portage_with_autodep/pym/_emerge/countdown.py
new file mode 100644
index 0000000..5abdc8a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/countdown.py
@@ -0,0 +1,22 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+import time
+
+from portage.output import colorize
+
+def countdown(secs=5, doing="Starting"):
+ if secs:
+ print(">>> Waiting",secs,"seconds before starting...")
+ print(">>> (Control-C to abort)...\n"+doing+" in: ", end=' ')
+ ticks=list(range(secs))
+ ticks.reverse()
+ for sec in ticks:
+ sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
+ sys.stdout.flush()
+ time.sleep(1)
+ print()
+
diff --git a/portage_with_autodep/pym/_emerge/create_depgraph_params.py b/portage_with_autodep/pym/_emerge/create_depgraph_params.py
new file mode 100644
index 0000000..44dceda
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/create_depgraph_params.py
@@ -0,0 +1,72 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+from portage.util import writemsg_level
+
+def create_depgraph_params(myopts, myaction):
+ #configure emerge engine parameters
+ #
+ # self: include _this_ package regardless of if it is merged.
+ # selective: exclude the package if it is merged
+ # recurse: go into the dependencies
+ # deep: go into the dependencies of already merged packages
+ # empty: pretend nothing is merged
+ # complete: completely account for all known dependencies
+ # remove: build graph for use in removing packages
+ # rebuilt_binaries: replace installed packages with rebuilt binaries
+ myparams = {"recurse" : True}
+
+ bdeps = myopts.get("--with-bdeps")
+ if bdeps is not None:
+ myparams["bdeps"] = bdeps
+
+ if myaction == "remove":
+ myparams["remove"] = True
+ myparams["complete"] = True
+ myparams["selective"] = True
+ return myparams
+
+ if "--update" in myopts or \
+ "--newuse" in myopts or \
+ "--reinstall" in myopts or \
+ "--noreplace" in myopts or \
+ myopts.get("--selective", "n") != "n":
+ myparams["selective"] = True
+
+ deep = myopts.get("--deep")
+ if deep is not None and deep != 0:
+ myparams["deep"] = deep
+ if ("--complete-graph" in myopts or "--rebuild-if-new-rev" in myopts or
+ "--rebuild-if-new-ver" in myopts or "--rebuild-if-unbuilt" in myopts):
+ myparams["complete"] = True
+ if "--emptytree" in myopts:
+ myparams["empty"] = True
+ myparams["deep"] = True
+ myparams.pop("selective", None)
+
+ if "--nodeps" in myopts:
+ myparams.pop("recurse", None)
+ myparams.pop("deep", None)
+ myparams.pop("complete", None)
+
+ rebuilt_binaries = myopts.get('--rebuilt-binaries')
+ if rebuilt_binaries is True or \
+ rebuilt_binaries != 'n' and \
+ '--usepkgonly' in myopts and \
+ myopts.get('--deep') is True and \
+ '--update' in myopts:
+ myparams['rebuilt_binaries'] = True
+
+ if myopts.get("--selective") == "n":
+ # --selective=n can be used to remove selective
+ # behavior that may have been implied by some
+ # other option like --update.
+ myparams.pop("selective", None)
+
+ if '--debug' in myopts:
+ writemsg_level('\n\nmyparams %s\n\n' % myparams,
+ noiselevel=-1, level=logging.DEBUG)
+
+ return myparams
+
diff --git a/portage_with_autodep/pym/_emerge/create_world_atom.py b/portage_with_autodep/pym/_emerge/create_world_atom.py
new file mode 100644
index 0000000..fa7cffc
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/create_world_atom.py
@@ -0,0 +1,92 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.dep import _repo_separator
+
+def create_world_atom(pkg, args_set, root_config):
+ """Create a new atom for the world file if one does not exist. If the
+ argument atom is precise enough to identify a specific slot then a slot
+ atom will be returned. Atoms that are in the system set may also be stored
+ in world since system atoms can only match one slot while world atoms can
+ be greedy with respect to slots. Unslotted system packages will not be
+ stored in world."""
+
+ arg_atom = args_set.findAtomForPackage(pkg)
+ if not arg_atom:
+ return None
+ cp = arg_atom.cp
+ new_world_atom = cp
+ if arg_atom.repo:
+ new_world_atom += _repo_separator + arg_atom.repo
+ sets = root_config.sets
+ portdb = root_config.trees["porttree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+ available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
+ for cpv in portdb.match(cp))
+ slotted = len(available_slots) > 1 or \
+ (len(available_slots) == 1 and "0" not in available_slots)
+ if not slotted:
+ # check the vdb in case this is multislot
+ available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
+ for cpv in vardb.match(cp))
+ slotted = len(available_slots) > 1 or \
+ (len(available_slots) == 1 and "0" not in available_slots)
+ if slotted and arg_atom.without_repo != cp:
+ # If the user gave a specific atom, store it as a
+ # slot atom in the world file.
+ slot_atom = pkg.slot_atom
+
+ # For USE=multislot, there are a couple of cases to
+ # handle here:
+ #
+ # 1) SLOT="0", but the real SLOT spontaneously changed to some
+ # unknown value, so just record an unslotted atom.
+ #
+ # 2) SLOT comes from an installed package and there is no
+ # matching SLOT in the portage tree.
+ #
+ # Make sure that the slot atom is available in either the
+ # portdb or the vardb, since otherwise the user certainly
+ # doesn't want the SLOT atom recorded in the world file
+ # (case 1 above). If it's only available in the vardb,
+ # the user may be trying to prevent a USE=multislot
+ # package from being removed by --depclean (case 2 above).
+
+ mydb = portdb
+ if not portdb.match(slot_atom):
+ # SLOT seems to come from an installed multislot package
+ mydb = vardb
+ # If there is no installed package matching the SLOT atom,
+ # it probably changed SLOT spontaneously due to USE=multislot,
+ # so just record an unslotted atom.
+ if vardb.match(slot_atom):
+ # Now verify that the argument is precise
+ # enough to identify a specific slot.
+ matches = mydb.match(arg_atom)
+ matched_slots = set()
+ for cpv in matches:
+ matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
+ if len(matched_slots) == 1:
+ new_world_atom = slot_atom
+ if arg_atom.repo:
+ new_world_atom += _repo_separator + arg_atom.repo
+
+ if new_world_atom == sets["selected"].findAtomForPackage(pkg):
+ # Both atoms would be identical, so there's nothing to add.
+ return None
+ if not slotted and not arg_atom.repo:
+ # Unlike world atoms, system atoms are not greedy for slots, so they
+ # can't be safely excluded from world if they are slotted.
+ system_atom = sets["system"].findAtomForPackage(pkg)
+ if system_atom:
+ if not system_atom.cp.startswith("virtual/"):
+ return None
+ # System virtuals aren't safe to exclude from world since they can
+ # match multiple old-style virtuals but only one of them will be
+ # pulled in by update or depclean.
+ providers = portdb.settings.getvirtuals().get(system_atom.cp)
+ if providers and len(providers) == 1 and \
+ providers[0].cp == arg_atom.cp:
+ return None
+ return new_world_atom
+
diff --git a/portage_with_autodep/pym/_emerge/depgraph.py b/portage_with_autodep/pym/_emerge/depgraph.py
new file mode 100644
index 0000000..5b48aca
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/depgraph.py
@@ -0,0 +1,7029 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import difflib
+import errno
+import io
+import logging
+import stat
+import sys
+import textwrap
+from collections import deque
+from itertools import chain
+
+import portage
+from portage import os, OrderedDict
+from portage import _unicode_decode, _unicode_encode, _encodings
+from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
+from portage.dbapi import dbapi
+from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use, _repo_separator
+from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
+from portage.exception import InvalidAtom, InvalidDependString, PortageException
+from portage.output import colorize, create_color_func, \
+ darkgreen, green
+bad = create_color_func("BAD")
+from portage.package.ebuild.getmaskingstatus import \
+ _getmaskingstatus, _MaskReason
+from portage._sets import SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import ConfigProtect, shlex_split, new_protect_filename
+from portage.util import cmp_sort_key, writemsg, writemsg_stdout
+from portage.util import ensure_dirs
+from portage.util import writemsg_level, write_atomic
+from portage.util.digraph import digraph
+from portage.util.listdir import _ignorecvs_dirs
+from portage.versions import catpkgsplit
+
+from _emerge.AtomArg import AtomArg
+from _emerge.Blocker import Blocker
+from _emerge.BlockerCache import BlockerCache
+from _emerge.BlockerDepPriority import BlockerDepPriority
+from _emerge.countdown import countdown
+from _emerge.create_world_atom import create_world_atom
+from _emerge.Dependency import Dependency
+from _emerge.DependencyArg import DependencyArg
+from _emerge.DepPriority import DepPriority
+from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.FakeVartree import FakeVartree
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge.is_valid_package_atom import insert_category_into_atom, \
+ is_valid_package_atom
+from _emerge.Package import Package
+from _emerge.PackageArg import PackageArg
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from _emerge.RootConfig import RootConfig
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import pkg_use_display
+from _emerge.userquery import userquery
+
+from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
+from _emerge.resolver.slot_collision import slot_conflict_handler
+from _emerge.resolver.circular_dependency import circular_dependency_handler
+from _emerge.resolver.output import Display
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class _scheduler_graph_config(object):
+ def __init__(self, trees, pkg_cache, graph, mergelist):
+ self.trees = trees
+ self.pkg_cache = pkg_cache
+ self.graph = graph
+ self.mergelist = mergelist
+
+def _wildcard_set(atoms):
+ pkgs = InternalPackageSet(allow_wildcard=True)
+ for x in atoms:
+ try:
+ x = Atom(x, allow_wildcard=True)
+ except portage.exception.InvalidAtom:
+ x = Atom("*/" + x, allow_wildcard=True)
+ pkgs.add(x)
+ return pkgs
+
+class _frozen_depgraph_config(object):
+
+ def __init__(self, settings, trees, myopts, spinner):
+ self.settings = settings
+ self.target_root = settings["ROOT"]
+ self.myopts = myopts
+ self.edebug = 0
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ self.edebug = 1
+ self.spinner = spinner
+ self._running_root = trees["/"]["root_config"]
+ self._opts_no_restart = frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri", "--pretend"])
+ self.pkgsettings = {}
+ self.trees = {}
+ self._trees_orig = trees
+ self.roots = {}
+ # All Package instances
+ self._pkg_cache = {}
+ self._highest_license_masked = {}
+ for myroot in trees:
+ self.trees[myroot] = {}
+ # Create a RootConfig instance that references
+ # the FakeVartree instead of the real one.
+ self.roots[myroot] = RootConfig(
+ trees[myroot]["vartree"].settings,
+ self.trees[myroot],
+ trees[myroot]["root_config"].setconfig)
+ for tree in ("porttree", "bintree"):
+ self.trees[myroot][tree] = trees[myroot][tree]
+ self.trees[myroot]["vartree"] = \
+ FakeVartree(trees[myroot]["root_config"],
+ pkg_cache=self._pkg_cache,
+ pkg_root_config=self.roots[myroot])
+ self.pkgsettings[myroot] = portage.config(
+ clone=self.trees[myroot]["vartree"].settings)
+
+ self._required_set_names = set(["world"])
+
+ atoms = ' '.join(myopts.get("--exclude", [])).split()
+ self.excluded_pkgs = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
+ self.reinstall_atoms = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
+ self.usepkg_exclude = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
+ self.useoldpkg_atoms = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
+ self.rebuild_exclude = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
+ self.rebuild_ignore = _wildcard_set(atoms)
+
+ self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
+ self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
+ self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
+
+class _depgraph_sets(object):
+ def __init__(self):
+ # contains all sets added to the graph
+ self.sets = {}
+ # contains non-set atoms given as arguments
+ self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
+ # contains all atoms from all sets added to the graph, including
+ # atoms given as arguments
+ self.atoms = InternalPackageSet(allow_repo=True)
+ self.atom_arg_map = {}
+
+class _rebuild_config(object):
+ def __init__(self, frozen_config, backtrack_parameters):
+ self._graph = digraph()
+ self._frozen_config = frozen_config
+ self.rebuild_list = backtrack_parameters.rebuild_list.copy()
+ self.orig_rebuild_list = self.rebuild_list.copy()
+ self.reinstall_list = backtrack_parameters.reinstall_list.copy()
+ self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
+ self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
+ self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
+ self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
+ self.rebuild_if_unbuilt)
+
+ def add(self, dep_pkg, dep):
+ parent = dep.collapsed_parent
+ priority = dep.collapsed_priority
+ rebuild_exclude = self._frozen_config.rebuild_exclude
+ rebuild_ignore = self._frozen_config.rebuild_ignore
+ if (self.rebuild and isinstance(parent, Package) and
+ parent.built and (priority.buildtime or priority.runtime) and
+ isinstance(dep_pkg, Package) and
+ not rebuild_exclude.findAtomForPackage(parent) and
+ not rebuild_ignore.findAtomForPackage(dep_pkg)):
+ self._graph.add(dep_pkg, parent, priority)
+
+ def _needs_rebuild(self, dep_pkg):
+ """Check whether packages that depend on dep_pkg need to be rebuilt."""
+ dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
+ if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
+ return False
+
+ if self.rebuild_if_unbuilt:
+ # dep_pkg is being installed from source, so binary
+ # packages for parents are invalid. Force rebuild
+ return True
+
+ trees = self._frozen_config.trees
+ vardb = trees[dep_pkg.root]["vartree"].dbapi
+ if self.rebuild_if_new_rev:
+ # Parent packages are valid if a package with the same
+ # cpv is already installed.
+ return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
+
+ # Otherwise, parent packages are valid if a package with the same
+ # version (excluding revision) is already installed.
+ assert self.rebuild_if_new_ver
+ cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+ for inst_cpv in vardb.match(dep_pkg.slot_atom):
+ inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
+ if inst_cpv_norev == cpv_norev:
+ return False
+
+ return True
+
+ def _trigger_rebuild(self, parent, build_deps, runtime_deps):
+ root_slot = (parent.root, parent.slot_atom)
+ if root_slot in self.rebuild_list:
+ return False
+ trees = self._frozen_config.trees
+ children = set(build_deps).intersection(runtime_deps)
+ reinstall = False
+ for slot_atom in children:
+ kids = set([build_deps[slot_atom], runtime_deps[slot_atom]])
+ for dep_pkg in kids:
+ dep_root_slot = (dep_pkg.root, slot_atom)
+ if self._needs_rebuild(dep_pkg):
+ self.rebuild_list.add(root_slot)
+ return True
+ elif ("--usepkg" in self._frozen_config.myopts and
+ (dep_root_slot in self.reinstall_list or
+ dep_root_slot in self.rebuild_list or
+ not dep_pkg.installed)):
+
+ # A direct rebuild dependency is being installed. We
+ # should update the parent as well to the latest binary,
+ # if that binary is valid.
+ #
+ # To validate the binary, we check whether all of the
+ # rebuild dependencies are present on the same binhost.
+ #
+ # 1) If parent is present on the binhost, but one of its
+ # rebuild dependencies is not, then the parent should
+ # be rebuilt from source.
+ # 2) Otherwise, the parent binary is assumed to be valid,
+ # because all of its rebuild dependencies are
+ # consistent.
+ bintree = trees[parent.root]["bintree"]
+ uri = bintree.get_pkgindex_uri(parent.cpv)
+ dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
+ bindb = bintree.dbapi
+ if self.rebuild_if_new_ver and uri and uri != dep_uri:
+ cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+ for cpv in bindb.match(dep_pkg.slot_atom):
+ if cpv_norev == catpkgsplit(cpv)[:-1]:
+ dep_uri = bintree.get_pkgindex_uri(cpv)
+ if uri == dep_uri:
+ break
+ if uri and uri != dep_uri:
+ # 1) Remote binary package is invalid because it was
+ # built without dep_pkg. Force rebuild.
+ self.rebuild_list.add(root_slot)
+ return True
+ elif (parent.installed and
+ root_slot not in self.reinstall_list):
+ inst_build_time = parent.metadata.get("BUILD_TIME")
+ try:
+ bin_build_time, = bindb.aux_get(parent.cpv,
+ ["BUILD_TIME"])
+ except KeyError:
+ continue
+ if bin_build_time != inst_build_time:
+ # 2) Remote binary package is valid, and local package
+ # is not up to date. Force reinstall.
+ reinstall = True
+ if reinstall:
+ self.reinstall_list.add(root_slot)
+ return reinstall
+
+ def trigger_rebuilds(self):
+ """
+ Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
+ depends on pkgA at both build-time and run-time, pkgB needs to be
+ rebuilt.
+ """
+ need_restart = False
+ graph = self._graph
+ build_deps = {}
+ runtime_deps = {}
+ leaf_nodes = deque(graph.leaf_nodes())
+
+ def ignore_non_runtime(priority):
+ return not priority.runtime
+
+ def ignore_non_buildtime(priority):
+ return not priority.buildtime
+
+ # Trigger rebuilds bottom-up (starting with the leaves) so that parents
+ # will always know which children are being rebuilt.
+ while graph:
+ if not leaf_nodes:
+ # We're interested in intersection of buildtime and runtime,
+ # so ignore edges that do not contain both.
+ leaf_nodes.extend(graph.leaf_nodes(
+ ignore_priority=ignore_non_runtime))
+ if not leaf_nodes:
+ leaf_nodes.extend(graph.leaf_nodes(
+ ignore_priority=ignore_non_buildtime))
+ if not leaf_nodes:
+ # We'll have to drop an edge that is both
+ # buildtime and runtime. This should be
+ # quite rare.
+ leaf_nodes.append(graph.order[-1])
+
+ node = leaf_nodes.popleft()
+ if node not in graph:
+ # This can be triggered by circular dependencies.
+ continue
+ slot_atom = node.slot_atom
+
+ # Remove our leaf node from the graph, keeping track of deps.
+ parents = graph.nodes[node][1].items()
+ graph.remove(node)
+ node_build_deps = build_deps.get(node, {})
+ node_runtime_deps = runtime_deps.get(node, {})
+ for parent, priorities in parents:
+ if parent == node:
+ # Ignore a direct cycle.
+ continue
+ parent_bdeps = build_deps.setdefault(parent, {})
+ parent_rdeps = runtime_deps.setdefault(parent, {})
+ for priority in priorities:
+ if priority.buildtime:
+ parent_bdeps[slot_atom] = node
+ if priority.runtime:
+ parent_rdeps[slot_atom] = node
+ if slot_atom in parent_bdeps and slot_atom in parent_rdeps:
+ parent_rdeps.update(node_runtime_deps)
+ if not graph.child_nodes(parent):
+ leaf_nodes.append(parent)
+
+ # Trigger rebuilds for our leaf node. Because all of our children
+ # have been processed, build_deps and runtime_deps will be
+ # completely filled in, and self.rebuild_list / self.reinstall_list
+ # will tell us whether any of our children need to be rebuilt or
+ # reinstalled.
+ if self._trigger_rebuild(node, node_build_deps, node_runtime_deps):
+ need_restart = True
+
+ return need_restart
+
+
+class _dynamic_depgraph_config(object):
+
+ def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
+ self.myparams = myparams.copy()
+ self._vdb_loaded = False
+ self._allow_backtracking = allow_backtracking
+ # Maps slot atom to package for each Package added to the graph.
+ self._slot_pkg_map = {}
+ # Maps nodes to the reasons they were selected for reinstallation.
+ self._reinstall_nodes = {}
+ self.mydbapi = {}
+ # Contains a filtered view of preferred packages that are selected
+ # from available repositories.
+ self._filtered_trees = {}
+ # Contains installed packages and new packages that have been added
+ # to the graph.
+ self._graph_trees = {}
+ # Caches visible packages returned from _select_package, for use in
+ # depgraph._iter_atoms_for_pkg() SLOT logic.
+ self._visible_pkgs = {}
+ #contains the args created by select_files
+ self._initial_arg_list = []
+ self.digraph = portage.digraph()
+ # manages sets added to the graph
+ self.sets = {}
+ # contains all nodes pulled in by self.sets
+ self._set_nodes = set()
+ # Contains only Blocker -> Uninstall edges
+ self._blocker_uninstalls = digraph()
+ # Contains only Package -> Blocker edges
+ self._blocker_parents = digraph()
+ # Contains only irrelevant Package -> Blocker edges
+ self._irrelevant_blockers = digraph()
+ # Contains only unsolvable Package -> Blocker edges
+ self._unsolvable_blockers = digraph()
+ # Contains all Blocker -> Blocked Package edges
+ self._blocked_pkgs = digraph()
+ # Contains world packages that have been protected from
+ # uninstallation but may not have been added to the graph
+ # if the graph is not complete yet.
+ self._blocked_world_pkgs = {}
+ # Contains packages whose dependencies have been traversed.
+ # This use used to check if we have accounted for blockers
+ # relevant to a package.
+ self._traversed_pkg_deps = set()
+ self._slot_collision_info = {}
+ # Slot collision nodes are not allowed to block other packages since
+ # blocker validation is only able to account for one package per slot.
+ self._slot_collision_nodes = set()
+ self._parent_atoms = {}
+ self._slot_conflict_parent_atoms = set()
+ self._slot_conflict_handler = None
+ self._circular_dependency_handler = None
+ self._serialized_tasks_cache = None
+ self._scheduler_graph = None
+ self._displayed_list = None
+ self._pprovided_args = []
+ self._missing_args = []
+ self._masked_installed = set()
+ self._masked_license_updates = set()
+ self._unsatisfied_deps_for_display = []
+ self._unsatisfied_blockers_for_display = None
+ self._circular_deps_for_display = None
+ self._dep_stack = []
+ self._dep_disjunctive_stack = []
+ self._unsatisfied_deps = []
+ self._initially_unsatisfied_deps = []
+ self._ignored_deps = []
+ self._highest_pkg_cache = {}
+
+ self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
+ self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
+ self._needed_license_changes = backtrack_parameters.needed_license_changes
+ self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
+ self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
+ self._need_restart = False
+ # For conditions that always require user intervention, such as
+ # unsatisfied REQUIRED_USE (currently has no autounmask support).
+ self._skip_restart = False
+ self._backtrack_infos = {}
+
+ self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
+ self._success_without_autounmask = False
+ self._traverse_ignored_deps = False
+
+ for myroot in depgraph._frozen_config.trees:
+ self.sets[myroot] = _depgraph_sets()
+ self._slot_pkg_map[myroot] = {}
+ vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
+ # This dbapi instance will model the state that the vdb will
+ # have after new packages have been installed.
+ fakedb = PackageVirtualDbapi(vardb.settings)
+
+ self.mydbapi[myroot] = fakedb
+ def graph_tree():
+ pass
+ graph_tree.dbapi = fakedb
+ self._graph_trees[myroot] = {}
+ self._filtered_trees[myroot] = {}
+ # Substitute the graph tree for the vartree in dep_check() since we
+ # want atom selections to be consistent with package selections
+ # have already been made.
+ self._graph_trees[myroot]["porttree"] = graph_tree
+ self._graph_trees[myroot]["vartree"] = graph_tree
+ self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
+ self._graph_trees[myroot]["graph"] = self.digraph
+ def filtered_tree():
+ pass
+ filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
+ self._filtered_trees[myroot]["porttree"] = filtered_tree
+ self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
+
+ # Passing in graph_tree as the vartree here could lead to better
+ # atom selections in some cases by causing atoms for packages that
+ # have been added to the graph to be preferred over other choices.
+ # However, it can trigger atom selections that result in
+ # unresolvable direct circular dependencies. For example, this
+ # happens with gwydion-dylan which depends on either itself or
+ # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
+ # gwydion-dylan-bin needs to be selected in order to avoid a
+ # an unresolvable direct circular dependency.
+ #
+ # To solve the problem described above, pass in "graph_db" so that
+ # packages that have been added to the graph are distinguishable
+ # from other available packages and installed packages. Also, pass
+ # the parent package into self._select_atoms() calls so that
+ # unresolvable direct circular dependencies can be detected and
+ # avoided when possible.
+ self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
+ self._filtered_trees[myroot]["graph"] = self.digraph
+ self._filtered_trees[myroot]["vartree"] = \
+ depgraph._frozen_config.trees[myroot]["vartree"]
+
+ dbs = []
+ # (db, pkg_type, built, installed, db_keys)
+ if "remove" in self.myparams:
+ # For removal operations, use _dep_check_composite_db
+ # for availability and visibility checks. This provides
+ # consistency with install operations, so we don't
+ # get install/uninstall cycles like in bug #332719.
+ self._graph_trees[myroot]["porttree"] = filtered_tree
+ else:
+ if "--usepkgonly" not in depgraph._frozen_config.myopts:
+ portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
+ db_keys = list(portdb._aux_cache_keys)
+ dbs.append((portdb, "ebuild", False, False, db_keys))
+
+ if "--usepkg" in depgraph._frozen_config.myopts:
+ bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
+ db_keys = list(bindb._aux_cache_keys)
+ dbs.append((bindb, "binary", True, False, db_keys))
+
+ vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
+ db_keys = list(depgraph._frozen_config._trees_orig[myroot
+ ]["vartree"].dbapi._aux_cache_keys)
+ dbs.append((vardb, "installed", True, True, db_keys))
+ self._filtered_trees[myroot]["dbs"] = dbs
+
+class depgraph(object):
+
+ pkg_tree_map = RootConfig.pkg_tree_map
+
+ _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
+
+ def __init__(self, settings, trees, myopts, myparams, spinner,
+ frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
+ if frozen_config is None:
+ frozen_config = _frozen_depgraph_config(settings, trees,
+ myopts, spinner)
+ self._frozen_config = frozen_config
+ self._dynamic_config = _dynamic_depgraph_config(self, myparams,
+ allow_backtracking, backtrack_parameters)
+ self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
+
+ self._select_atoms = self._select_atoms_highest_available
+ self._select_package = self._select_pkg_highest_available
+
+ def _load_vdb(self):
+ """
+ Load installed package metadata if appropriate. This used to be called
+ from the constructor, but that wasn't very nice since this procedure
+ is slow and it generates spinner output. So, now it's called on-demand
+ by various methods when necessary.
+ """
+
+ if self._dynamic_config._vdb_loaded:
+ return
+
+ for myroot in self._frozen_config.trees:
+
+ preload_installed_pkgs = \
+ "--nodeps" not in self._frozen_config.myopts
+
+ fake_vartree = self._frozen_config.trees[myroot]["vartree"]
+ if not fake_vartree.dbapi:
+ # This needs to be called for the first depgraph, but not for
+ # backtracking depgraphs that share the same frozen_config.
+ fake_vartree.sync()
+
+ # FakeVartree.sync() populates virtuals, and we want
+ # self.pkgsettings to have them populated too.
+ self._frozen_config.pkgsettings[myroot] = \
+ portage.config(clone=fake_vartree.settings)
+
+ if preload_installed_pkgs:
+ vardb = fake_vartree.dbapi
+ fakedb = self._dynamic_config._graph_trees[
+ myroot]["vartree"].dbapi
+
+ for pkg in vardb:
+ self._spinner_update()
+ # This triggers metadata updates via FakeVartree.
+ vardb.aux_get(pkg.cpv, [])
+ fakedb.cpv_inject(pkg)
+
+ self._dynamic_config._vdb_loaded = True
+
+ def _spinner_update(self):
+ if self._frozen_config.spinner:
+ self._frozen_config.spinner.update()
+
+ def _show_missed_update(self):
+
+ # In order to minimize noise, show only the highest
+ # missed update from each SLOT.
+ missed_updates = {}
+ for pkg, mask_reasons in \
+ self._dynamic_config._runtime_pkg_mask.items():
+ if pkg.installed:
+ # Exclude installed here since we only
+ # want to show available updates.
+ continue
+ k = (pkg.root, pkg.slot_atom)
+ if k in missed_updates:
+ other_pkg, mask_type, parent_atoms = missed_updates[k]
+ if other_pkg > pkg:
+ continue
+ for mask_type, parent_atoms in mask_reasons.items():
+ if not parent_atoms:
+ continue
+ missed_updates[k] = (pkg, mask_type, parent_atoms)
+ break
+
+ if not missed_updates:
+ return
+
+ missed_update_types = {}
+ for pkg, mask_type, parent_atoms in missed_updates.values():
+ missed_update_types.setdefault(mask_type,
+ []).append((pkg, parent_atoms))
+
+ if '--quiet' in self._frozen_config.myopts and \
+ '--debug' not in self._frozen_config.myopts:
+ missed_update_types.pop("slot conflict", None)
+ missed_update_types.pop("missing dependency", None)
+
+ self._show_missed_update_slot_conflicts(
+ missed_update_types.get("slot conflict"))
+
+ self._show_missed_update_unsatisfied_dep(
+ missed_update_types.get("missing dependency"))
+
+ def _show_missed_update_unsatisfied_dep(self, missed_updates):
+
+ if not missed_updates:
+ return
+
+ backtrack_masked = []
+
+ for pkg, parent_atoms in missed_updates:
+
+ try:
+ for parent, root, atom in parent_atoms:
+ self._show_unsatisfied_dep(root, atom, myparent=parent,
+ check_backtrack=True)
+ except self._backtrack_mask:
+ # This is displayed below in abbreviated form.
+ backtrack_masked.append((pkg, parent_atoms))
+ continue
+
+ writemsg("\n!!! The following update has been skipped " + \
+ "due to unsatisfied dependencies:\n\n", noiselevel=-1)
+
+ writemsg(str(pkg.slot_atom), noiselevel=-1)
+ if pkg.root != '/':
+ writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ for parent, root, atom in parent_atoms:
+ self._show_unsatisfied_dep(root, atom, myparent=parent)
+ writemsg("\n", noiselevel=-1)
+
+ if backtrack_masked:
+ # These are shown in abbreviated form, in order to avoid terminal
+ # flooding from mask messages as reported in bug #285832.
+ writemsg("\n!!! The following update(s) have been skipped " + \
+ "due to unsatisfied dependencies\n" + \
+ "!!! triggered by backtracking:\n\n", noiselevel=-1)
+ for pkg, parent_atoms in backtrack_masked:
+ writemsg(str(pkg.slot_atom), noiselevel=-1)
+ if pkg.root != '/':
+ writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ def _show_missed_update_slot_conflicts(self, missed_updates):
+
+ if not missed_updates:
+ return
+
+ msg = []
+ msg.append("\nWARNING: One or more updates have been " + \
+ "skipped due to a dependency conflict:\n\n")
+
+ indent = " "
+ for pkg, parent_atoms in missed_updates:
+ msg.append(str(pkg.slot_atom))
+ if pkg.root != '/':
+ msg.append(" for %s" % (pkg.root,))
+ msg.append("\n\n")
+
+ for parent, atom in parent_atoms:
+ msg.append(indent)
+ msg.append(str(pkg))
+
+ msg.append(" conflicts with\n")
+ msg.append(2*indent)
+ if isinstance(parent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ msg.append(str(parent))
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ msg.append("%s required by %s" % (atom, parent))
+ msg.append("\n")
+ msg.append("\n")
+
+ writemsg("".join(msg), noiselevel=-1)
+
+ def _show_slot_collision_notice(self):
+ """Show an informational message advising the user to mask one of the
+ the packages. In some cases it may be possible to resolve this
+ automatically, but support for backtracking (removal nodes that have
+ already been selected) will be required in order to handle all possible
+ cases.
+ """
+
+ if not self._dynamic_config._slot_collision_info:
+ return
+
+ self._show_merge_list()
+
+ self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
+ handler = self._dynamic_config._slot_conflict_handler
+
+ conflict = handler.get_conflict()
+ writemsg(conflict, noiselevel=-1)
+
+ explanation = handler.get_explanation()
+ if explanation:
+ writemsg(explanation, noiselevel=-1)
+ return
+
+ if "--quiet" in self._frozen_config.myopts:
+ return
+
+ msg = []
+ msg.append("It may be possible to solve this problem ")
+ msg.append("by using package.mask to prevent one of ")
+ msg.append("those packages from being selected. ")
+ msg.append("However, it is also possible that conflicting ")
+ msg.append("dependencies exist such that they are impossible to ")
+ msg.append("satisfy simultaneously. If such a conflict exists in ")
+ msg.append("the dependencies of two different packages, then those ")
+ msg.append("packages can not be installed simultaneously.")
+ backtrack_opt = self._frozen_config.myopts.get('--backtrack')
+ if not self._dynamic_config._allow_backtracking and \
+ (backtrack_opt is None or \
+ (backtrack_opt > 0 and backtrack_opt < 30)):
+ msg.append(" You may want to try a larger value of the ")
+ msg.append("--backtrack option, such as --backtrack=30, ")
+ msg.append("in order to see if that will solve this conflict ")
+ msg.append("automatically.")
+
+ for line in textwrap.wrap(''.join(msg), 70):
+ writemsg(line + '\n', noiselevel=-1)
+ writemsg('\n', noiselevel=-1)
+
+ msg = []
+ msg.append("For more information, see MASKED PACKAGES ")
+ msg.append("section in the emerge man page or refer ")
+ msg.append("to the Gentoo Handbook.")
+ for line in textwrap.wrap(''.join(msg), 70):
+ writemsg(line + '\n', noiselevel=-1)
+ writemsg('\n', noiselevel=-1)
+
+ def _process_slot_conflicts(self):
+ """
+ Process slot conflict data to identify specific atoms which
+ lead to conflict. These atoms only match a subset of the
+ packages that have been pulled into a given slot.
+ """
+ for (slot_atom, root), slot_nodes \
+ in self._dynamic_config._slot_collision_info.items():
+
+ all_parent_atoms = set()
+ for pkg in slot_nodes:
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if not parent_atoms:
+ continue
+ all_parent_atoms.update(parent_atoms)
+
+ for pkg in slot_nodes:
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if parent_atoms is None:
+ parent_atoms = set()
+ self._dynamic_config._parent_atoms[pkg] = parent_atoms
+ for parent_atom in all_parent_atoms:
+ if parent_atom in parent_atoms:
+ continue
+ # Use package set for matching since it will match via
+ # PROVIDE when necessary, while match_from_list does not.
+ parent, atom = parent_atom
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+ if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+ parent_atoms.add(parent_atom)
+ else:
+ self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
+
+ def _reinstall_for_flags(self, forced_flags,
+ orig_use, orig_iuse, cur_use, cur_iuse):
+ """Return a set of flags that trigger reinstallation, or None if there
+ are no such flags."""
+ if "--newuse" in self._frozen_config.myopts or \
+ "--binpkg-respect-use" in self._frozen_config.myopts:
+ flags = set(orig_iuse.symmetric_difference(
+ cur_iuse).difference(forced_flags))
+ flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use)))
+ if flags:
+ return flags
+ elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
+ flags = orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use))
+ if flags:
+ return flags
+ return None
+
+ def _create_graph(self, allow_unsatisfied=False):
+ dep_stack = self._dynamic_config._dep_stack
+ dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
+ while dep_stack or dep_disjunctive_stack:
+ self._spinner_update()
+ while dep_stack:
+ dep = dep_stack.pop()
+ if isinstance(dep, Package):
+ if not self._add_pkg_deps(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ continue
+ if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if dep_disjunctive_stack:
+ if not self._pop_disjunction(allow_unsatisfied):
+ return 0
+ return 1
+
+ def _expand_set_args(self, input_args, add_to_digraph=False):
+ """
+ Iterate over a list of DependencyArg instances and yield all
+ instances given in the input together with additional SetArg
+ instances that are generated from nested sets.
+ @param input_args: An iterable of DependencyArg instances
+ @type input_args: Iterable
+ @param add_to_digraph: If True then add SetArg instances
+ to the digraph, in order to record parent -> child
+ relationships from nested sets
+ @type add_to_digraph: Boolean
+ @rtype: Iterable
+ @returns: All args given in the input together with additional
+ SetArg instances that are generated from nested sets
+ """
+
+ traversed_set_args = set()
+
+ for arg in input_args:
+ if not isinstance(arg, SetArg):
+ yield arg
+ continue
+
+ root_config = arg.root_config
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ arg_stack = [arg]
+ while arg_stack:
+ arg = arg_stack.pop()
+ if arg in traversed_set_args:
+ continue
+ traversed_set_args.add(arg)
+
+ if add_to_digraph:
+ self._dynamic_config.digraph.add(arg, None,
+ priority=BlockerDepPriority.instance)
+
+ yield arg
+
+ # Traverse nested sets and add them to the stack
+ # if they're not already in the graph. Also, graph
+ # edges between parent and nested sets.
+ for token in arg.pset.getNonAtoms():
+ if not token.startswith(SETPREFIX):
+ continue
+ s = token[len(SETPREFIX):]
+ nested_set = depgraph_sets.sets.get(s)
+ if nested_set is None:
+ nested_set = root_config.sets.get(s)
+ if nested_set is not None:
+ nested_arg = SetArg(arg=token, pset=nested_set,
+ root_config=root_config)
+ arg_stack.append(nested_arg)
+ if add_to_digraph:
+ self._dynamic_config.digraph.add(nested_arg, arg,
+ priority=BlockerDepPriority.instance)
+ depgraph_sets.sets[nested_arg.name] = nested_arg.pset
+
+ def _add_dep(self, dep, allow_unsatisfied=False):
+ debug = "--debug" in self._frozen_config.myopts
+ buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
+ nodeps = "--nodeps" in self._frozen_config.myopts
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ recurse = deep is True or dep.depth <= deep
+ if dep.blocker:
+ if not buildpkgonly and \
+ not nodeps and \
+ not dep.collapsed_priority.ignored and \
+ not dep.collapsed_priority.optional and \
+ dep.parent not in self._dynamic_config._slot_collision_nodes:
+ if dep.parent.onlydeps:
+ # It's safe to ignore blockers if the
+ # parent is an --onlydeps node.
+ return 1
+ # The blocker applies to the root where
+ # the parent is or will be installed.
+ blocker = Blocker(atom=dep.atom,
+ eapi=dep.parent.metadata["EAPI"],
+ priority=dep.priority, root=dep.parent.root)
+ self._dynamic_config._blocker_parents.add(blocker, dep.parent)
+ return 1
+
+ if dep.child is None:
+ dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
+ onlydeps=dep.onlydeps)
+ else:
+ # The caller has selected a specific package
+ # via self._minimize_packages().
+ dep_pkg = dep.child
+ existing_node = self._dynamic_config._slot_pkg_map[
+ dep.root].get(dep_pkg.slot_atom)
+
+ if not dep_pkg:
+ if (dep.collapsed_priority.optional or
+ dep.collapsed_priority.ignored):
+ # This is an unnecessary build-time dep.
+ return 1
+ if allow_unsatisfied:
+ self._dynamic_config._unsatisfied_deps.append(dep)
+ return 1
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((dep.root, dep.atom), {"myparent":dep.parent}))
+
+ # The parent node should not already be in
+ # runtime_pkg_mask, since that would trigger an
+ # infinite backtracking loop.
+ if self._dynamic_config._allow_backtracking:
+ if dep.parent in self._dynamic_config._runtime_pkg_mask:
+ if "--debug" in self._frozen_config.myopts:
+ writemsg(
+ "!!! backtracking loop detected: %s %s\n" % \
+ (dep.parent,
+ self._dynamic_config._runtime_pkg_mask[
+ dep.parent]), noiselevel=-1)
+ elif not self.need_restart():
+ # Do not backtrack if only USE have to be changed in
+ # order to satisfy the dependency.
+ dep_pkg, existing_node = \
+ self._select_package(dep.root, dep.atom.without_use,
+ onlydeps=dep.onlydeps)
+ if dep_pkg is None:
+ self._dynamic_config._backtrack_infos["missing dependency"] = dep
+ self._dynamic_config._need_restart = True
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to unsatisfied dep:")
+ msg.append(" parent: %s" % dep.parent)
+ msg.append(" priority: %s" % dep.priority)
+ msg.append(" root: %s" % dep.root)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return 0
+
+ self._rebuild.add(dep_pkg, dep)
+
+ ignore = dep.collapsed_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps
+ if not ignore and not self._add_pkg(dep_pkg, dep):
+ return 0
+ return 1
+
+ def _check_slot_conflict(self, pkg, atom):
+ existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
+ matches = None
+ if existing_node:
+ matches = pkg.cpv == existing_node.cpv
+ if pkg != existing_node and \
+ atom is not None:
+ # Use package set for matching since it will match via
+ # PROVIDE when necessary, while match_from_list does not.
+ matches = bool(InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True).findAtomForPackage(existing_node,
+ modified_use=self._pkg_use_enabled(existing_node)))
+
+ return (existing_node, matches)
+
+ def _add_pkg(self, pkg, dep):
+ """
+ Adds a package to the depgraph, queues dependencies, and handles
+ slot conflicts.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+ myparent = None
+ priority = None
+ depth = 0
+ if dep is None:
+ dep = Dependency()
+ else:
+ myparent = dep.parent
+ priority = dep.priority
+ depth = dep.depth
+ if priority is None:
+ priority = DepPriority()
+
+ if debug:
+ writemsg_level(
+ "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
+ pkg_use_display(pkg, self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(pkg))),
+ level=logging.DEBUG, noiselevel=-1)
+ if isinstance(myparent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ writemsg_level(
+ "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
+ level=logging.DEBUG, noiselevel=-1)
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ writemsg_level(
+ "%s%s required by %s\n" %
+ ("Parent Dep:".ljust(15), dep.atom, myparent),
+ level=logging.DEBUG, noiselevel=-1)
+
+ # Ensure that the dependencies of the same package
+ # are never processed more than once.
+ previously_added = pkg in self._dynamic_config.digraph
+
+ # select the correct /var database that we'll be checking against
+ vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+
+ arg_atoms = None
+ if True:
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except portage.exception.InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+ del e
+
+ # NOTE: REQUIRED_USE checks are delayed until after
+ # package selection, since we want to prompt the user
+ # for USE adjustment rather than have REQUIRED_USE
+ # affect package selection and || dep choices.
+ if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
+ eapi_has_required_use(pkg.metadata["EAPI"]):
+ required_use_is_sat = check_required_use(
+ pkg.metadata["REQUIRED_USE"],
+ self._pkg_use_enabled(pkg),
+ pkg.iuse.is_valid_flag)
+ if not required_use_is_sat:
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._add_parent_atom(pkg, parent_atom)
+
+ atom = dep.atom
+ if atom is None:
+ atom = Atom("=" + pkg.cpv)
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((pkg.root, atom), {"myparent":dep.parent}))
+ self._dynamic_config._skip_restart = True
+ return 0
+
+ if not pkg.onlydeps:
+
+ existing_node, existing_node_matches = \
+ self._check_slot_conflict(pkg, dep.atom)
+ slot_collision = False
+ if existing_node:
+ if existing_node_matches:
+ # The existing node can be reused.
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._dynamic_config.digraph.add(existing_node, parent,
+ priority=priority)
+ self._add_parent_atom(existing_node, parent_atom)
+ # If a direct circular dependency is not an unsatisfied
+ # buildtime dependency then drop it here since otherwise
+ # it can skew the merge order calculation in an unwanted
+ # way.
+ if existing_node != myparent or \
+ (priority.buildtime and not priority.satisfied):
+ self._dynamic_config.digraph.addnode(existing_node, myparent,
+ priority=priority)
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(existing_node,
+ (dep.parent, dep.atom))
+ return 1
+ else:
+ # A slot conflict has occurred.
+ # The existing node should not already be in
+ # runtime_pkg_mask, since that would trigger an
+ # infinite backtracking loop.
+ if self._dynamic_config._allow_backtracking and \
+ existing_node in \
+ self._dynamic_config._runtime_pkg_mask:
+ if "--debug" in self._frozen_config.myopts:
+ writemsg(
+ "!!! backtracking loop detected: %s %s\n" % \
+ (existing_node,
+ self._dynamic_config._runtime_pkg_mask[
+ existing_node]), noiselevel=-1)
+ elif self._dynamic_config._allow_backtracking and \
+ not self._accept_blocker_conflicts() and \
+ not self.need_restart():
+
+ self._add_slot_conflict(pkg)
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._add_parent_atom(pkg, parent_atom)
+ self._process_slot_conflicts()
+
+ backtrack_data = []
+ fallback_data = []
+ all_parents = set()
+ # The ordering of backtrack_data can make
+ # a difference here, because both mask actions may lead
+ # to valid, but different, solutions and the one with
+ # 'existing_node' masked is usually the better one. Because
+ # of that, we choose an order such that
+ # the backtracker will first explore the choice with
+ # existing_node masked. The backtracker reverses the
+ # order, so the order it uses is the reverse of the
+ # order shown here. See bug #339606.
+ for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
+ # For missed update messages, find out which
+ # atoms matched to_be_selected that did not
+ # match to_be_masked.
+ parent_atoms = \
+ self._dynamic_config._parent_atoms.get(to_be_selected, set())
+ if parent_atoms:
+ conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
+ if conflict_atoms:
+ parent_atoms = conflict_atoms
+
+ all_parents.update(parent_atoms)
+
+ all_match = True
+ for parent, atom in parent_atoms:
+ i = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ if not i.findAtomForPackage(to_be_masked):
+ all_match = False
+ break
+
+ if to_be_selected >= to_be_masked:
+ # We only care about the parent atoms
+ # when they trigger a downgrade.
+ parent_atoms = set()
+
+ fallback_data.append((to_be_masked, parent_atoms))
+
+ if all_match:
+ # 'to_be_masked' does not violate any parent atom, which means
+ # there is no point in masking it.
+ pass
+ else:
+ backtrack_data.append((to_be_masked, parent_atoms))
+
+ if not backtrack_data:
+ # This shouldn't happen, but fall back to the old
+ # behavior if this gets triggered somehow.
+ backtrack_data = fallback_data
+
+ if len(backtrack_data) > 1:
+ # NOTE: Generally, we prefer to mask the higher
+ # version since this solves common cases in which a
+ # lower version is needed so that all dependencies
+ # will be satisfied (bug #337178). However, if
+ # existing_node happens to be installed then we
+ # mask that since this is a common case that is
+ # triggered when --update is not enabled.
+ if existing_node.installed:
+ pass
+ elif pkg > existing_node:
+ backtrack_data.reverse()
+
+ to_be_masked = backtrack_data[-1][0]
+
+ self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
+ self._dynamic_config._need_restart = True
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to slot conflict:")
+ if backtrack_data is fallback_data:
+ msg.append("!!! backtrack_data fallback")
+ msg.append(" first package: %s" % existing_node)
+ msg.append(" second package: %s" % pkg)
+ msg.append(" package to mask: %s" % to_be_masked)
+ msg.append(" slot: %s" % pkg.slot_atom)
+ msg.append(" parents: %s" % ", ".join( \
+ "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
+ msg.append("")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ noiselevel=-1, level=logging.DEBUG)
+ return 0
+
+ # A slot collision has occurred. Sometimes this coincides
+ # with unresolvable blockers, so the slot collision will be
+ # shown later if there are no unresolvable blockers.
+ self._add_slot_conflict(pkg)
+ slot_collision = True
+
+ if debug:
+ writemsg_level(
+ "%s%s %s\n" % ("Slot Conflict:".ljust(15),
+ existing_node, pkg_use_display(existing_node,
+ self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(existing_node))),
+ level=logging.DEBUG, noiselevel=-1)
+
+ if slot_collision:
+ # Now add this node to the graph so that self.display()
+ # can show use flags and --tree portage.output. This node is
+ # only being partially added to the graph. It must not be
+ # allowed to interfere with the other nodes that have been
+ # added. Do not overwrite data for existing nodes in
+ # self._dynamic_config.mydbapi since that data will be used for blocker
+ # validation.
+ # Even though the graph is now invalid, continue to process
+ # dependencies so that things like --fetchonly can still
+ # function despite collisions.
+ pass
+ elif not previously_added:
+ self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
+ self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
+ self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
+ self._dynamic_config._highest_pkg_cache.clear()
+ self._check_masks(pkg)
+
+ if not pkg.installed:
+ # Allow this package to satisfy old-style virtuals in case it
+ # doesn't already. Any pre-existing providers will be preferred
+ # over this one.
+ try:
+ pkgsettings.setinst(pkg.cpv, pkg.metadata)
+ # For consistency, also update the global virtuals.
+ settings = self._frozen_config.roots[pkg.root].settings
+ settings.unlock()
+ settings.setinst(pkg.cpv, pkg.metadata)
+ settings.lock()
+ except portage.exception.InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+
+ if arg_atoms:
+ self._dynamic_config._set_nodes.add(pkg)
+
+ # Do this even when addme is False (--onlydeps) so that the
+ # parent/child relationship is always known in case
+ # self._show_slot_collision_notice() needs to be called later.
+ self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._dynamic_config.digraph.add(pkg, parent, priority=priority)
+ self._add_parent_atom(pkg, parent_atom)
+
+ """ This section determines whether we go deeper into dependencies or not.
+ We want to go deeper on a few occasions:
+ Installing package A, we need to make sure package A's deps are met.
+ emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
+ If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
+ """
+ if arg_atoms:
+ depth = 0
+ pkg.depth = depth
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ recurse = deep is True or depth + 1 <= deep
+ dep_stack = self._dynamic_config._dep_stack
+ if "recurse" not in self._dynamic_config.myparams:
+ return 1
+ elif pkg.installed and not recurse:
+ dep_stack = self._dynamic_config._ignored_deps
+
+ self._spinner_update()
+
+ if not previously_added:
+ dep_stack.append(pkg)
+ return 1
+
+ def _check_masks(self, pkg):
+
+ slot_key = (pkg.root, pkg.slot_atom)
+
+ # Check for upgrades in the same slot that are
+ # masked due to a LICENSE change in a newer
+ # version that is not masked for any other reason.
+ other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
+ if other_pkg is not None and pkg < other_pkg:
+ self._dynamic_config._masked_license_updates.add(other_pkg)
+
+ def _add_parent_atom(self, pkg, parent_atom):
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if parent_atoms is None:
+ parent_atoms = set()
+ self._dynamic_config._parent_atoms[pkg] = parent_atoms
+ parent_atoms.add(parent_atom)
+
+ def _add_slot_conflict(self, pkg):
+ self._dynamic_config._slot_collision_nodes.add(pkg)
+ slot_key = (pkg.slot_atom, pkg.root)
+ slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
+ if slot_nodes is None:
+ slot_nodes = set()
+ slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
+ self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
+ slot_nodes.add(pkg)
+
+ def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
+
+ mytype = pkg.type_name
+ myroot = pkg.root
+ mykey = pkg.cpv
+ metadata = pkg.metadata
+ myuse = self._pkg_use_enabled(pkg)
+ jbigkey = pkg
+ depth = pkg.depth + 1
+ removal_action = "remove" in self._dynamic_config.myparams
+
+ edepend={}
+ depkeys = ["DEPEND","RDEPEND","PDEPEND"]
+ for k in depkeys:
+ edepend[k] = metadata[k]
+
+ if not pkg.built and \
+ "--buildpkgonly" in self._frozen_config.myopts and \
+ "deep" not in self._dynamic_config.myparams:
+ edepend["RDEPEND"] = ""
+ edepend["PDEPEND"] = ""
+
+ ignore_build_time_deps = False
+ if pkg.built and not removal_action:
+ if self._dynamic_config.myparams.get("bdeps", "n") == "y":
+ # Pull in build time deps as requested, but marked them as
+ # "optional" since they are not strictly required. This allows
+ # more freedom in the merge order calculation for solving
+ # circular dependencies. Don't convert to PDEPEND since that
+ # could make --with-bdeps=y less effective if it is used to
+ # adjust merge order to prevent built_with_use() calls from
+ # failing.
+ pass
+ else:
+ ignore_build_time_deps = True
+
+ if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
+ # Removal actions never traverse ignored buildtime
+ # dependencies, so it's safe to discard them early.
+ edepend["DEPEND"] = ""
+ ignore_build_time_deps = True
+
+ if removal_action:
+ depend_root = myroot
+ else:
+ depend_root = "/"
+ root_deps = self._frozen_config.myopts.get("--root-deps")
+ if root_deps is not None:
+ if root_deps is True:
+ depend_root = myroot
+ elif root_deps == "rdeps":
+ ignore_build_time_deps = True
+
+ # If rebuild mode is not enabled, it's safe to discard ignored
+ # build-time dependencies. If you want these deps to be traversed
+ # in "complete" mode then you need to specify --with-bdeps=y.
+ if ignore_build_time_deps and \
+ not self._rebuild.rebuild:
+ edepend["DEPEND"] = ""
+
+ deps = (
+ (depend_root, edepend["DEPEND"],
+ self._priority(buildtime=True,
+ optional=(pkg.built or ignore_build_time_deps),
+ ignored=ignore_build_time_deps)),
+ (myroot, edepend["RDEPEND"],
+ self._priority(runtime=True)),
+ (myroot, edepend["PDEPEND"],
+ self._priority(runtime_post=True))
+ )
+
+ debug = "--debug" in self._frozen_config.myopts
+ strict = mytype != "installed"
+
+ for dep_root, dep_string, dep_priority in deps:
+ if not dep_string:
+ continue
+ if debug:
+ writemsg_level("\nParent: %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Depstring: %s\n" % (dep_string,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Priority: %s\n" % (dep_priority,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
+ except portage.exception.InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+ del e
+
+ # Try again, but omit the is_valid_flag argument, since
+ # invalid USE conditionals are a common problem and it's
+ # practical to ignore this issue for installed packages.
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=self._pkg_use_enabled(pkg))
+ except portage.exception.InvalidDependString as e:
+ self._dynamic_config._masked_installed.add(pkg)
+ del e
+ continue
+
+ try:
+ dep_string = list(self._queue_disjunctive_deps(
+ pkg, dep_root, dep_priority, dep_string))
+ except portage.exception.InvalidDependString as e:
+ if pkg.installed:
+ self._dynamic_config._masked_installed.add(pkg)
+ del e
+ continue
+
+ # should have been masked before it was selected
+ raise
+
+ if not dep_string:
+ continue
+
+ dep_string = portage.dep.paren_enclose(dep_string,
+ unevaluated_atom=True)
+
+ if not self._add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied):
+ return 0
+
+ self._dynamic_config._traversed_pkg_deps.add(pkg)
+ return 1
+
+ def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied):
+ _autounmask_backup = self._dynamic_config._autounmask
+ if dep_priority.optional or dep_priority.ignored:
+ # Temporarily disable autounmask for deps that
+ # don't necessarily need to be satisfied.
+ self._dynamic_config._autounmask = False
+ try:
+ return self._wrapped_add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied)
+ finally:
+ self._dynamic_config._autounmask = _autounmask_backup
+
+ def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
+ dep_string, allow_unsatisfied):
+ depth = pkg.depth + 1
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ recurse_satisfied = deep is True or depth <= deep
+ debug = "--debug" in self._frozen_config.myopts
+ strict = pkg.type_name != "installed"
+
+ if debug:
+ writemsg_level("\nParent: %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Depstring: %s\n" % (dep_string,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Priority: %s\n" % (dep_priority,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ selected_atoms = self._select_atoms(dep_root,
+ dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
+ strict=strict, priority=dep_priority)
+ except portage.exception.InvalidDependString as e:
+ if pkg.installed:
+ self._dynamic_config._masked_installed.add(pkg)
+ return 1
+
+ # should have been masked before it was selected
+ raise
+
+ if debug:
+ writemsg_level("Candidates: %s\n" % \
+ ([str(x) for x in selected_atoms[pkg]],),
+ noiselevel=-1, level=logging.DEBUG)
+
+ root_config = self._frozen_config.roots[dep_root]
+ vardb = root_config.trees["vartree"].dbapi
+ traversed_virt_pkgs = set()
+
+ reinstall_atoms = self._frozen_config.reinstall_atoms
+ for atom, child in self._minimize_children(
+ pkg, dep_priority, root_config, selected_atoms[pkg]):
+
+ # If this was a specially generated virtual atom
+ # from dep_check, map it back to the original, in
+ # order to avoid distortion in places like display
+ # or conflict resolution code.
+ is_virt = hasattr(atom, '_orig_atom')
+ atom = getattr(atom, '_orig_atom', atom)
+
+ if atom.blocker and \
+ (dep_priority.optional or dep_priority.ignored):
+ # For --with-bdeps, ignore build-time only blockers
+ # that originate from built packages.
+ continue
+
+ mypriority = dep_priority.copy()
+ if not atom.blocker:
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ mypriority.satisfied = inst_pkg
+ break
+ if not mypriority.satisfied:
+ # none visible, so use highest
+ mypriority.satisfied = inst_pkgs[0]
+
+ dep = Dependency(atom=atom,
+ blocker=atom.blocker, child=child, depth=depth, parent=pkg,
+ priority=mypriority, root=dep_root)
+
+ # In some cases, dep_check will return deps that shouldn't
+ # be proccessed any further, so they are identified and
+ # discarded here. Try to discard as few as possible since
+ # discarded dependencies reduce the amount of information
+ # available for optimization of merge order.
+ ignored = False
+ if not atom.blocker and \
+ not recurse_satisfied and \
+ mypriority.satisfied and \
+ mypriority.satisfied.visible and \
+ dep.child is not None and \
+ not dep.child.installed and \
+ self._dynamic_config._slot_pkg_map[dep.child.root].get(
+ dep.child.slot_atom) is None:
+ myarg = None
+ if dep.root == self._frozen_config.target_root:
+ try:
+ myarg = next(self._iter_atoms_for_pkg(dep.child))
+ except StopIteration:
+ pass
+ except InvalidDependString:
+ if not dep.child.installed:
+ # This shouldn't happen since the package
+ # should have been masked.
+ raise
+
+ if myarg is None:
+ # Existing child selection may not be valid unless
+ # it's added to the graph immediately, since "complete"
+ # mode may select a different child later.
+ ignored = True
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+
+ if not ignored:
+ if dep_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps:
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+ else:
+ if not self._add_dep(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+
+ selected_atoms.pop(pkg)
+
+ # Add selected indirect virtual deps to the graph. This
+ # takes advantage of circular dependency avoidance that's done
+ # by dep_zapdeps. We preserve actual parent/child relationships
+ # here in order to avoid distorting the dependency graph like
+ # <=portage-2.1.6.x did.
+ for virt_dep, atoms in selected_atoms.items():
+
+ virt_pkg = virt_dep.child
+ if virt_pkg not in traversed_virt_pkgs:
+ continue
+
+ if debug:
+ writemsg_level("\nCandidates: %s: %s\n" % \
+ (virt_pkg.cpv, [str(x) for x in atoms]),
+ noiselevel=-1, level=logging.DEBUG)
+
+ if not dep_priority.ignored or \
+ self._dynamic_config._traverse_ignored_deps:
+
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(virt_dep.atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ virt_dep.priority.satisfied = inst_pkg
+ break
+ if not virt_dep.priority.satisfied:
+ # none visible, so use highest
+ virt_dep.priority.satisfied = inst_pkgs[0]
+
+ if not self._add_pkg(virt_pkg, virt_dep):
+ return 0
+
+ for atom, child in self._minimize_children(
+ pkg, self._priority(runtime=True), root_config, atoms):
+
+ # If this was a specially generated virtual atom
+ # from dep_check, map it back to the original, in
+ # order to avoid distortion in places like display
+ # or conflict resolution code.
+ is_virt = hasattr(atom, '_orig_atom')
+ atom = getattr(atom, '_orig_atom', atom)
+
+ # This is a GLEP 37 virtual, so its deps are all runtime.
+ mypriority = self._priority(runtime=True)
+ if not atom.blocker:
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ mypriority.satisfied = inst_pkg
+ break
+ if not mypriority.satisfied:
+ # none visible, so use highest
+ mypriority.satisfied = inst_pkgs[0]
+
+ # Dependencies of virtuals are considered to have the
+ # same depth as the virtual itself.
+ dep = Dependency(atom=atom,
+ blocker=atom.blocker, child=child, depth=virt_dep.depth,
+ parent=virt_pkg, priority=mypriority, root=dep_root,
+ collapsed_parent=pkg, collapsed_priority=dep_priority)
+
+ ignored = False
+ if not atom.blocker and \
+ not recurse_satisfied and \
+ mypriority.satisfied and \
+ mypriority.satisfied.visible and \
+ dep.child is not None and \
+ not dep.child.installed and \
+ self._dynamic_config._slot_pkg_map[dep.child.root].get(
+ dep.child.slot_atom) is None:
+ myarg = None
+ if dep.root == self._frozen_config.target_root:
+ try:
+ myarg = next(self._iter_atoms_for_pkg(dep.child))
+ except StopIteration:
+ pass
+ except InvalidDependString:
+ if not dep.child.installed:
+ raise
+
+ if myarg is None:
+ ignored = True
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+
+ if not ignored:
+ if dep_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps:
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+ else:
+ if not self._add_dep(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+
+ if debug:
+ writemsg_level("\nExiting... %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return 1
+
+ def _minimize_children(self, parent, priority, root_config, atoms):
+ """
+ Selects packages to satisfy the given atoms, and minimizes the
+ number of selected packages. This serves to identify and eliminate
+ redundant package selections when multiple atoms happen to specify
+ a version range.
+ """
+
+ atom_pkg_map = {}
+
+ for atom in atoms:
+ if atom.blocker:
+ yield (atom, None)
+ continue
+ dep_pkg, existing_node = self._select_package(
+ root_config.root, atom)
+ if dep_pkg is None:
+ yield (atom, None)
+ continue
+ atom_pkg_map[atom] = dep_pkg
+
+ if len(atom_pkg_map) < 2:
+ for item in atom_pkg_map.items():
+ yield item
+ return
+
+ cp_pkg_map = {}
+ pkg_atom_map = {}
+ for atom, pkg in atom_pkg_map.items():
+ pkg_atom_map.setdefault(pkg, set()).add(atom)
+ cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
+
+ for cp, pkgs in cp_pkg_map.items():
+ if len(pkgs) < 2:
+ for pkg in pkgs:
+ for atom in pkg_atom_map[pkg]:
+ yield (atom, pkg)
+ continue
+
+ # Use a digraph to identify and eliminate any
+ # redundant package selections.
+ atom_pkg_graph = digraph()
+ cp_atoms = set()
+ for pkg1 in pkgs:
+ for atom in pkg_atom_map[pkg1]:
+ cp_atoms.add(atom)
+ atom_pkg_graph.add(pkg1, atom)
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ for pkg2 in pkgs:
+ if pkg2 is pkg1:
+ continue
+ if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
+ atom_pkg_graph.add(pkg2, atom)
+
+ for pkg in pkgs:
+ eliminate_pkg = True
+ for atom in atom_pkg_graph.parent_nodes(pkg):
+ if len(atom_pkg_graph.child_nodes(atom)) < 2:
+ eliminate_pkg = False
+ break
+ if eliminate_pkg:
+ atom_pkg_graph.remove(pkg)
+
+ # Yield ~, =*, < and <= atoms first, since those are more likely to
+ # cause slot conflicts, and we want those atoms to be displayed
+ # in the resulting slot conflict message (see bug #291142).
+ conflict_atoms = []
+ normal_atoms = []
+ for atom in cp_atoms:
+ conflict = False
+ for child_pkg in atom_pkg_graph.child_nodes(atom):
+ existing_node, matches = \
+ self._check_slot_conflict(child_pkg, atom)
+ if existing_node and not matches:
+ conflict = True
+ break
+ if conflict:
+ conflict_atoms.append(atom)
+ else:
+ normal_atoms.append(atom)
+
+ for atom in chain(conflict_atoms, normal_atoms):
+ child_pkgs = atom_pkg_graph.child_nodes(atom)
+ # if more than one child, yield highest version
+ if len(child_pkgs) > 1:
+ child_pkgs.sort()
+ yield (atom, child_pkgs[-1])
+
+ def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
+ """
+ Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
+ Yields non-disjunctive deps. Raises InvalidDependString when
+ necessary.
+ """
+ i = 0
+ while i < len(dep_struct):
+ x = dep_struct[i]
+ if isinstance(x, list):
+ for y in self._queue_disjunctive_deps(
+ pkg, dep_root, dep_priority, x):
+ yield y
+ elif x == "||":
+ self._queue_disjunction(pkg, dep_root, dep_priority,
+ [ x, dep_struct[ i + 1 ] ] )
+ i += 1
+ else:
+ try:
+ x = portage.dep.Atom(x)
+ except portage.exception.InvalidAtom:
+ if not pkg.installed:
+ raise portage.exception.InvalidDependString(
+ "invalid atom: '%s'" % x)
+ else:
+ # Note: Eventually this will check for PROPERTIES=virtual
+ # or whatever other metadata gets implemented for this
+ # purpose.
+ if x.cp.startswith('virtual/'):
+ self._queue_disjunction( pkg, dep_root,
+ dep_priority, [ str(x) ] )
+ else:
+ yield str(x)
+ i += 1
+
+ def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
+ self._dynamic_config._dep_disjunctive_stack.append(
+ (pkg, dep_root, dep_priority, dep_struct))
+
+ def _pop_disjunction(self, allow_unsatisfied):
+ """
+ Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
+ populate self._dynamic_config._dep_stack.
+ """
+ pkg, dep_root, dep_priority, dep_struct = \
+ self._dynamic_config._dep_disjunctive_stack.pop()
+ dep_string = portage.dep.paren_enclose(dep_struct,
+ unevaluated_atom=True)
+ if not self._add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
+ return 0
+ return 1
+
+ def _priority(self, **kwargs):
+ if "remove" in self._dynamic_config.myparams:
+ priority_constructor = UnmergeDepPriority
+ else:
+ priority_constructor = DepPriority
+ return priority_constructor(**kwargs)
+
+ def _dep_expand(self, root_config, atom_without_category):
+ """
+ @param root_config: a root config instance
+ @type root_config: RootConfig
+ @param atom_without_category: an atom without a category component
+ @type atom_without_category: String
+ @rtype: list
+ @returns: a list of atoms containing categories (possibly empty)
+ """
+ null_cp = portage.dep_getkey(insert_category_into_atom(
+ atom_without_category, "null"))
+ cat, atom_pn = portage.catsplit(null_cp)
+
+ dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
+ categories = set()
+ for db, pkg_type, built, installed, db_keys in dbs:
+ for cat in db.categories:
+ if db.cp_list("%s/%s" % (cat, atom_pn)):
+ categories.add(cat)
+
+ deps = []
+ for cat in categories:
+ deps.append(Atom(insert_category_into_atom(
+ atom_without_category, cat), allow_repo=True))
+ return deps
+
+ def _have_new_virt(self, root, atom_cp):
+ ret = False
+ for db, pkg_type, built, installed, db_keys in \
+ self._dynamic_config._filtered_trees[root]["dbs"]:
+ if db.cp_list(atom_cp):
+ ret = True
+ break
+ return ret
+
+ def _iter_atoms_for_pkg(self, pkg):
+ depgraph_sets = self._dynamic_config.sets[pkg.root]
+ atom_arg_map = depgraph_sets.atom_arg_map
+ root_config = self._frozen_config.roots[pkg.root]
+ for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
+ if atom.cp != pkg.cp and \
+ self._have_new_virt(pkg.root, atom.cp):
+ continue
+ visible_pkgs = \
+ self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
+ visible_pkgs.reverse() # descending order
+ higher_slot = None
+ for visible_pkg in visible_pkgs:
+ if visible_pkg.cp != atom.cp:
+ continue
+ if pkg >= visible_pkg:
+ # This is descending order, and we're not
+ # interested in any versions <= pkg given.
+ break
+ if pkg.slot_atom != visible_pkg.slot_atom:
+ higher_slot = visible_pkg
+ break
+ if higher_slot is not None:
+ continue
+ for arg in atom_arg_map[(atom, pkg.root)]:
+ if isinstance(arg, PackageArg) and \
+ arg.package != pkg:
+ continue
+ yield arg, atom
+
+ def select_files(self, myfiles):
+ """Given a list of .tbz2s, .ebuilds sets, and deps, populate
+ self._dynamic_config._initial_arg_list and call self._resolve to create the
+ appropriate depgraph and return a favorite list."""
+ self._load_vdb()
+ debug = "--debug" in self._frozen_config.myopts
+ root_config = self._frozen_config.roots[self._frozen_config.target_root]
+ sets = root_config.sets
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ myfavorites=[]
+ myroot = self._frozen_config.target_root
+ dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
+ vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
+ real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
+ portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
+ bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
+ pkgsettings = self._frozen_config.pkgsettings[myroot]
+ args = []
+ onlydeps = "--onlydeps" in self._frozen_config.myopts
+ lookup_owners = []
+ for x in myfiles:
+ ext = os.path.splitext(x)[1]
+ if ext==".tbz2":
+ if not os.path.exists(x):
+ if os.path.exists(
+ os.path.join(pkgsettings["PKGDIR"], "All", x)):
+ x = os.path.join(pkgsettings["PKGDIR"], "All", x)
+ elif os.path.exists(
+ os.path.join(pkgsettings["PKGDIR"], x)):
+ x = os.path.join(pkgsettings["PKGDIR"], x)
+ else:
+ writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
+ writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
+ return 0, myfavorites
+ mytbz2=portage.xpak.tbz2(x)
+ mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
+ if os.path.realpath(x) != \
+ os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
+ writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+
+ pkg = self._pkg(mykey, "binary", root_config,
+ onlydeps=onlydeps)
+ args.append(PackageArg(arg=x, package=pkg,
+ root_config=root_config))
+ elif ext==".ebuild":
+ ebuild_path = portage.util.normalize_path(os.path.abspath(x))
+ pkgdir = os.path.dirname(ebuild_path)
+ tree_root = os.path.dirname(os.path.dirname(pkgdir))
+ cp = pkgdir[len(tree_root)+1:]
+ e = portage.exception.PackageNotFound(
+ ("%s is not in a valid portage tree " + \
+ "hierarchy or does not exist") % x)
+ if not portage.isvalidatom(cp):
+ raise e
+ cat = portage.catsplit(cp)[0]
+ mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
+ if not portage.isvalidatom("="+mykey):
+ raise e
+ ebuild_path = portdb.findname(mykey)
+ if ebuild_path:
+ if ebuild_path != os.path.join(os.path.realpath(tree_root),
+ cp, os.path.basename(ebuild_path)):
+ writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+ if mykey not in portdb.xmatch(
+ "match-visible", portage.cpv_getkey(mykey)):
+ writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
+ writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
+ writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
+ countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
+ "Continuing...")
+ else:
+ raise portage.exception.PackageNotFound(
+ "%s is not in a valid portage tree hierarchy or does not exist" % x)
+ pkg = self._pkg(mykey, "ebuild", root_config,
+ onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
+ os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
+ args.append(PackageArg(arg=x, package=pkg,
+ root_config=root_config))
+ elif x.startswith(os.path.sep):
+ if not x.startswith(myroot):
+ portage.writemsg(("\n\n!!! '%s' does not start with" + \
+ " $ROOT.\n") % x, noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, []
+ # Queue these up since it's most efficient to handle
+ # multiple files in a single iter_owners() call.
+ lookup_owners.append(x)
+ elif x.startswith("." + os.sep) or \
+ x.startswith(".." + os.sep):
+ f = os.path.abspath(x)
+ if not f.startswith(myroot):
+ portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
+ " $ROOT.\n") % (f, x), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, []
+ lookup_owners.append(f)
+ else:
+ if x in ("system", "world"):
+ x = SETPREFIX + x
+ if x.startswith(SETPREFIX):
+ s = x[len(SETPREFIX):]
+ if s not in sets:
+ raise portage.exception.PackageSetNotFound(s)
+ if s in depgraph_sets.sets:
+ continue
+ pset = sets[s]
+ depgraph_sets.sets[s] = pset
+ args.append(SetArg(arg=x, pset=pset,
+ root_config=root_config))
+ continue
+ if not is_valid_package_atom(x, allow_repo=True):
+ portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
+ noiselevel=-1)
+ portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+ portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+ self._dynamic_config._skip_restart = True
+ return (0,[])
+ # Don't expand categories or old-style virtuals here unless
+ # necessary. Expansion of old-style virtuals here causes at
+ # least the following problems:
+ # 1) It's more difficult to determine which set(s) an atom
+ # came from, if any.
+ # 2) It takes away freedom from the resolver to choose other
+ # possible expansions when necessary.
+ if "/" in x:
+ args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
+ root_config=root_config))
+ continue
+ expanded_atoms = self._dep_expand(root_config, x)
+ installed_cp_set = set()
+ for atom in expanded_atoms:
+ if vardb.cp_list(atom.cp):
+ installed_cp_set.add(atom.cp)
+
+ if len(installed_cp_set) > 1:
+ non_virtual_cps = set()
+ for atom_cp in installed_cp_set:
+ if not atom_cp.startswith("virtual/"):
+ non_virtual_cps.add(atom_cp)
+ if len(non_virtual_cps) == 1:
+ installed_cp_set = non_virtual_cps
+
+ if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
+ installed_cp = next(iter(installed_cp_set))
+ for atom in expanded_atoms:
+ if atom.cp == installed_cp:
+ available = False
+ for pkg in self._iter_match_pkgs_any(
+ root_config, atom.without_use,
+ onlydeps=onlydeps):
+ if not pkg.installed:
+ available = True
+ break
+ if available:
+ expanded_atoms = [atom]
+ break
+
+ # If a non-virtual package and one or more virtual packages
+ # are in expanded_atoms, use the non-virtual package.
+ if len(expanded_atoms) > 1:
+ number_of_virtuals = 0
+ for expanded_atom in expanded_atoms:
+ if expanded_atom.cp.startswith("virtual/"):
+ number_of_virtuals += 1
+ else:
+ candidate = expanded_atom
+ if len(expanded_atoms) - number_of_virtuals == 1:
+ expanded_atoms = [ candidate ]
+
+ if len(expanded_atoms) > 1:
+ writemsg("\n\n", noiselevel=-1)
+ ambiguous_package_name(x, expanded_atoms, root_config,
+ self._frozen_config.spinner, self._frozen_config.myopts)
+ self._dynamic_config._skip_restart = True
+ return False, myfavorites
+ if expanded_atoms:
+ atom = expanded_atoms[0]
+ else:
+ null_atom = Atom(insert_category_into_atom(x, "null"),
+ allow_repo=True)
+ cat, atom_pn = portage.catsplit(null_atom.cp)
+ virts_p = root_config.settings.get_virts_p().get(atom_pn)
+ if virts_p:
+ # Allow the depgraph to choose which virtual.
+ atom = Atom(null_atom.replace('null/', 'virtual/', 1),
+ allow_repo=True)
+ else:
+ atom = null_atom
+
+ if atom.use and atom.use.conditional:
+ writemsg(
+ ("\n\n!!! '%s' contains a conditional " + \
+ "which is not allowed.\n") % (x,), noiselevel=-1)
+ writemsg("!!! Please check ebuild(5) for full details.\n")
+ self._dynamic_config._skip_restart = True
+ return (0,[])
+
+ args.append(AtomArg(arg=x, atom=atom,
+ root_config=root_config))
+
+ if lookup_owners:
+ relative_paths = []
+ search_for_multiple = False
+ if len(lookup_owners) > 1:
+ search_for_multiple = True
+
+ for x in lookup_owners:
+ if not search_for_multiple and os.path.isdir(x):
+ search_for_multiple = True
+ relative_paths.append(x[len(myroot)-1:])
+
+ owners = set()
+ for pkg, relative_path in \
+ real_vardb._owners.iter_owners(relative_paths):
+ owners.add(pkg.mycpv)
+ if not search_for_multiple:
+ break
+
+ if not owners:
+ portage.writemsg(("\n\n!!! '%s' is not claimed " + \
+ "by any package.\n") % lookup_owners[0], noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, []
+
+ for cpv in owners:
+ slot = vardb.aux_get(cpv, ["SLOT"])[0]
+ if not slot:
+ # portage now masks packages with missing slot, but it's
+ # possible that one was installed by an older version
+ atom = Atom(portage.cpv_getkey(cpv))
+ else:
+ atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
+ args.append(AtomArg(arg=atom, atom=atom,
+ root_config=root_config))
+
+ if "--update" in self._frozen_config.myopts:
+ # In some cases, the greedy slots behavior can pull in a slot that
+ # the user would want to uninstall due to it being blocked by a
+ # newer version in a different slot. Therefore, it's necessary to
+ # detect and discard any that should be uninstalled. Each time
+ # that arguments are updated, package selections are repeated in
+ # order to ensure consistency with the current arguments:
+ #
+ # 1) Initialize args
+ # 2) Select packages and generate initial greedy atoms
+ # 3) Update args with greedy atoms
+ # 4) Select packages and generate greedy atoms again, while
+ # accounting for any blockers between selected packages
+ # 5) Update args with revised greedy atoms
+
+ self._set_args(args)
+ greedy_args = []
+ for arg in args:
+ greedy_args.append(arg)
+ if not isinstance(arg, AtomArg):
+ continue
+ for atom in self._greedy_slots(arg.root_config, arg.atom):
+ greedy_args.append(
+ AtomArg(arg=arg.arg, atom=atom,
+ root_config=arg.root_config))
+
+ self._set_args(greedy_args)
+ del greedy_args
+
+ # Revise greedy atoms, accounting for any blockers
+ # between selected packages.
+ revised_greedy_args = []
+ for arg in args:
+ revised_greedy_args.append(arg)
+ if not isinstance(arg, AtomArg):
+ continue
+ for atom in self._greedy_slots(arg.root_config, arg.atom,
+ blocker_lookahead=True):
+ revised_greedy_args.append(
+ AtomArg(arg=arg.arg, atom=atom,
+ root_config=arg.root_config))
+ args = revised_greedy_args
+ del revised_greedy_args
+
+ self._set_args(args)
+
+ myfavorites = set(myfavorites)
+ for arg in args:
+ if isinstance(arg, (AtomArg, PackageArg)):
+ myfavorites.add(arg.atom)
+ elif isinstance(arg, SetArg):
+ myfavorites.add(arg.arg)
+ myfavorites = list(myfavorites)
+
+ if debug:
+ portage.writemsg("\n", noiselevel=-1)
+ # Order needs to be preserved since a feature of --nodeps
+ # is to allow the user to force a specific merge order.
+ self._dynamic_config._initial_arg_list = args[:]
+
+ return self._resolve(myfavorites)
+
+ def _resolve(self, myfavorites):
+ """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
+ call self._creategraph to process theier deps and return
+ a favorite list."""
+ debug = "--debug" in self._frozen_config.myopts
+ onlydeps = "--onlydeps" in self._frozen_config.myopts
+ myroot = self._frozen_config.target_root
+ pkgsettings = self._frozen_config.pkgsettings[myroot]
+ pprovideddict = pkgsettings.pprovideddict
+ virtuals = pkgsettings.getvirtuals()
+ args = self._dynamic_config._initial_arg_list[:]
+ for root, atom in chain(self._rebuild.rebuild_list,
+ self._rebuild.reinstall_list):
+ args.append(AtomArg(arg=atom, atom=atom,
+ root_config=self._frozen_config.roots[root]))
+ for arg in self._expand_set_args(args, add_to_digraph=True):
+ for atom in arg.pset.getAtoms():
+ self._spinner_update()
+ dep = Dependency(atom=atom, onlydeps=onlydeps,
+ root=myroot, parent=arg)
+ try:
+ pprovided = pprovideddict.get(atom.cp)
+ if pprovided and portage.match_from_list(atom, pprovided):
+ # A provided package has been specified on the command line.
+ self._dynamic_config._pprovided_args.append((arg, atom))
+ continue
+ if isinstance(arg, PackageArg):
+ if not self._add_pkg(arg.package, dep) or \
+ not self._create_graph():
+ if not self.need_restart():
+ sys.stderr.write(("\n\n!!! Problem " + \
+ "resolving dependencies for %s\n") % \
+ arg.arg)
+ return 0, myfavorites
+ continue
+ if debug:
+ writemsg_level("\n Arg: %s\n Atom: %s\n" %
+ (arg, atom), noiselevel=-1, level=logging.DEBUG)
+ pkg, existing_node = self._select_package(
+ myroot, atom, onlydeps=onlydeps)
+ if not pkg:
+ pprovided_match = False
+ for virt_choice in virtuals.get(atom.cp, []):
+ expanded_atom = portage.dep.Atom(
+ atom.replace(atom.cp, virt_choice.cp, 1))
+ pprovided = pprovideddict.get(expanded_atom.cp)
+ if pprovided and \
+ portage.match_from_list(expanded_atom, pprovided):
+ # A provided package has been
+ # specified on the command line.
+ self._dynamic_config._pprovided_args.append((arg, atom))
+ pprovided_match = True
+ break
+ if pprovided_match:
+ continue
+
+ if not (isinstance(arg, SetArg) and \
+ arg.name in ("selected", "system", "world")):
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((myroot, atom), {"myparent" : arg}))
+ return 0, myfavorites
+
+ self._dynamic_config._missing_args.append((arg, atom))
+ continue
+ if atom.cp != pkg.cp:
+ # For old-style virtuals, we need to repeat the
+ # package.provided check against the selected package.
+ expanded_atom = atom.replace(atom.cp, pkg.cp)
+ pprovided = pprovideddict.get(pkg.cp)
+ if pprovided and \
+ portage.match_from_list(expanded_atom, pprovided):
+ # A provided package has been
+ # specified on the command line.
+ self._dynamic_config._pprovided_args.append((arg, atom))
+ continue
+ if pkg.installed and \
+ "selective" not in self._dynamic_config.myparams and \
+ not self._frozen_config.excluded_pkgs.findAtomForPackage(
+ pkg, modified_use=self._pkg_use_enabled(pkg)):
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((myroot, atom), {"myparent" : arg}))
+ # Previous behavior was to bail out in this case, but
+ # since the dep is satisfied by the installed package,
+ # it's more friendly to continue building the graph
+ # and just show a warning message. Therefore, only bail
+ # out here if the atom is not from either the system or
+ # world set.
+ if not (isinstance(arg, SetArg) and \
+ arg.name in ("selected", "system", "world")):
+ return 0, myfavorites
+
+ # Add the selected package to the graph as soon as possible
+ # so that later dep_check() calls can use it as feedback
+ # for making more consistent atom selections.
+ if not self._add_pkg(pkg, dep):
+ if self.need_restart():
+ pass
+ elif isinstance(arg, SetArg):
+ writemsg(("\n\n!!! Problem resolving " + \
+ "dependencies for %s from %s\n") % \
+ (atom, arg.arg), noiselevel=-1)
+ else:
+ writemsg(("\n\n!!! Problem resolving " + \
+ "dependencies for %s\n") % \
+ (atom,), noiselevel=-1)
+ return 0, myfavorites
+
+ except SystemExit as e:
+ raise # Needed else can't exit
+ except Exception as e:
+ writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
+ writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
+ raise
+
+ # Now that the root packages have been added to the graph,
+ # process the dependencies.
+ if not self._create_graph():
+ return 0, myfavorites
+
+ try:
+ self.altlist()
+ except self._unknown_internal_error:
+ return False, myfavorites
+
+ digraph_set = frozenset(self._dynamic_config.digraph)
+
+ if digraph_set.intersection(
+ self._dynamic_config._needed_unstable_keywords) or \
+ digraph_set.intersection(
+ self._dynamic_config._needed_p_mask_changes) or \
+ digraph_set.intersection(
+ self._dynamic_config._needed_use_config_changes) or \
+ digraph_set.intersection(
+ self._dynamic_config._needed_license_changes) :
+ #We failed if the user needs to change the configuration
+ self._dynamic_config._success_without_autounmask = True
+ return False, myfavorites
+
+ digraph_set = None
+
+ if self._rebuild.trigger_rebuilds():
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+ config["rebuild_list"] = self._rebuild.rebuild_list
+ config["reinstall_list"] = self._rebuild.reinstall_list
+ self._dynamic_config._need_restart = True
+ return False, myfavorites
+
+ # We're true here unless we are missing binaries.
+ return (True, myfavorites)
+
+ def _set_args(self, args):
+ """
+ Create the "__non_set_args__" package set from atoms and packages given as
+ arguments. This method can be called multiple times if necessary.
+ The package selection cache is automatically invalidated, since
+ arguments influence package selections.
+ """
+
+ set_atoms = {}
+ non_set_atoms = {}
+ for root in self._dynamic_config.sets:
+ depgraph_sets = self._dynamic_config.sets[root]
+ depgraph_sets.sets.setdefault('__non_set_args__',
+ InternalPackageSet(allow_repo=True)).clear()
+ depgraph_sets.atoms.clear()
+ depgraph_sets.atom_arg_map.clear()
+ set_atoms[root] = []
+ non_set_atoms[root] = []
+
+ # We don't add set args to the digraph here since that
+ # happens at a later stage and we don't want to make
+ # any state changes here that aren't reversed by a
+ # another call to this method.
+ for arg in self._expand_set_args(args, add_to_digraph=False):
+ atom_arg_map = self._dynamic_config.sets[
+ arg.root_config.root].atom_arg_map
+ if isinstance(arg, SetArg):
+ atom_group = set_atoms[arg.root_config.root]
+ else:
+ atom_group = non_set_atoms[arg.root_config.root]
+
+ for atom in arg.pset.getAtoms():
+ atom_group.append(atom)
+ atom_key = (atom, arg.root_config.root)
+ refs = atom_arg_map.get(atom_key)
+ if refs is None:
+ refs = []
+ atom_arg_map[atom_key] = refs
+ if arg not in refs:
+ refs.append(arg)
+
+ for root in self._dynamic_config.sets:
+ depgraph_sets = self._dynamic_config.sets[root]
+ depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
+ non_set_atoms.get(root, [])))
+ depgraph_sets.sets['__non_set_args__'].update(
+ non_set_atoms.get(root, []))
+
+ # Invalidate the package selection cache, since
+ # arguments influence package selections.
+ self._dynamic_config._highest_pkg_cache.clear()
+ for trees in self._dynamic_config._filtered_trees.values():
+ trees["porttree"].dbapi._clear_cache()
+
+ def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
+ """
+ Return a list of slot atoms corresponding to installed slots that
+ differ from the slot of the highest visible match. When
+ blocker_lookahead is True, slot atoms that would trigger a blocker
+ conflict are automatically discarded, potentially allowing automatic
+ uninstallation of older slots when appropriate.
+ """
+ highest_pkg, in_graph = self._select_package(root_config.root, atom)
+ if highest_pkg is None:
+ return []
+ vardb = root_config.trees["vartree"].dbapi
+ slots = set()
+ for cpv in vardb.match(atom):
+ # don't mix new virtuals with old virtuals
+ if portage.cpv_getkey(cpv) == highest_pkg.cp:
+ slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
+
+ slots.add(highest_pkg.metadata["SLOT"])
+ if len(slots) == 1:
+ return []
+ greedy_pkgs = []
+ slots.remove(highest_pkg.metadata["SLOT"])
+ while slots:
+ slot = slots.pop()
+ slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
+ pkg, in_graph = self._select_package(root_config.root, slot_atom)
+ if pkg is not None and \
+ pkg.cp == highest_pkg.cp and pkg < highest_pkg:
+ greedy_pkgs.append(pkg)
+ if not greedy_pkgs:
+ return []
+ if not blocker_lookahead:
+ return [pkg.slot_atom for pkg in greedy_pkgs]
+
+ blockers = {}
+ blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
+ for pkg in greedy_pkgs + [highest_pkg]:
+ dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
+ try:
+ selected_atoms = self._select_atoms(
+ pkg.root, dep_str, self._pkg_use_enabled(pkg),
+ parent=pkg, strict=True)
+ except portage.exception.InvalidDependString:
+ continue
+ blocker_atoms = []
+ for atoms in selected_atoms.values():
+ blocker_atoms.extend(x for x in atoms if x.blocker)
+ blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
+
+ if highest_pkg not in blockers:
+ return []
+
+ # filter packages with invalid deps
+ greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
+
+ # filter packages that conflict with highest_pkg
+ greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
+ (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
+ blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
+
+ if not greedy_pkgs:
+ return []
+
+ # If two packages conflict, discard the lower version.
+ discard_pkgs = set()
+ greedy_pkgs.sort(reverse=True)
+ for i in range(len(greedy_pkgs) - 1):
+ pkg1 = greedy_pkgs[i]
+ if pkg1 in discard_pkgs:
+ continue
+ for j in range(i + 1, len(greedy_pkgs)):
+ pkg2 = greedy_pkgs[j]
+ if pkg2 in discard_pkgs:
+ continue
+ if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
+ blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
+ # pkg1 > pkg2
+ discard_pkgs.add(pkg2)
+
+ return [pkg.slot_atom for pkg in greedy_pkgs \
+ if pkg not in discard_pkgs]
+
+ def _select_atoms_from_graph(self, *pargs, **kwargs):
+ """
+ Prefer atoms matching packages that have already been
+ added to the graph or those that are installed and have
+ not been scheduled for replacement.
+ """
+ kwargs["trees"] = self._dynamic_config._graph_trees
+ return self._select_atoms_highest_available(*pargs, **kwargs)
+
+ def _select_atoms_highest_available(self, root, depstring,
+ myuse=None, parent=None, strict=True, trees=None, priority=None):
+ """This will raise InvalidDependString if necessary. If trees is
+ None then self._dynamic_config._filtered_trees is used."""
+
+ pkgsettings = self._frozen_config.pkgsettings[root]
+ if trees is None:
+ trees = self._dynamic_config._filtered_trees
+ mytrees = trees[root]
+ atom_graph = digraph()
+ if True:
+ # Temporarily disable autounmask so that || preferences
+ # account for masking and USE settings.
+ _autounmask_backup = self._dynamic_config._autounmask
+ self._dynamic_config._autounmask = False
+ mytrees["pkg_use_enabled"] = self._pkg_use_enabled
+ try:
+ if parent is not None:
+ trees[root]["parent"] = parent
+ trees[root]["atom_graph"] = atom_graph
+ if priority is not None:
+ trees[root]["priority"] = priority
+ mycheck = portage.dep_check(depstring, None,
+ pkgsettings, myuse=myuse,
+ myroot=root, trees=trees)
+ finally:
+ self._dynamic_config._autounmask = _autounmask_backup
+ del mytrees["pkg_use_enabled"]
+ if parent is not None:
+ trees[root].pop("parent")
+ trees[root].pop("atom_graph")
+ if priority is not None:
+ trees[root].pop("priority")
+ if not mycheck[0]:
+ raise portage.exception.InvalidDependString(mycheck[1])
+ if parent is None:
+ selected_atoms = mycheck[1]
+ elif parent not in atom_graph:
+ selected_atoms = {parent : mycheck[1]}
+ else:
+ # Recursively traversed virtual dependencies, and their
+ # direct dependencies, are considered to have the same
+ # depth as direct dependencies.
+ if parent.depth is None:
+ virt_depth = None
+ else:
+ virt_depth = parent.depth + 1
+ chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
+ selected_atoms = OrderedDict()
+ node_stack = [(parent, None, None)]
+ traversed_nodes = set()
+ while node_stack:
+ node, node_parent, parent_atom = node_stack.pop()
+ traversed_nodes.add(node)
+ if node is parent:
+ k = parent
+ else:
+ if node_parent is parent:
+ if priority is None:
+ node_priority = None
+ else:
+ node_priority = priority.copy()
+ else:
+ # virtuals only have runtime deps
+ node_priority = self._priority(runtime=True)
+
+ k = Dependency(atom=parent_atom,
+ blocker=parent_atom.blocker, child=node,
+ depth=virt_depth, parent=node_parent,
+ priority=node_priority, root=node.root)
+
+ child_atoms = []
+ selected_atoms[k] = child_atoms
+ for atom_node in atom_graph.child_nodes(node):
+ child_atom = atom_node[0]
+ if id(child_atom) not in chosen_atom_ids:
+ continue
+ child_atoms.append(child_atom)
+ for child_node in atom_graph.child_nodes(atom_node):
+ if child_node in traversed_nodes:
+ continue
+ if not portage.match_from_list(
+ child_atom, [child_node]):
+ # Typically this means that the atom
+ # specifies USE deps that are unsatisfied
+ # by the selected package. The caller will
+ # record this as an unsatisfied dependency
+ # when necessary.
+ continue
+ node_stack.append((child_node, node, child_atom))
+
+ return selected_atoms
+
+ def _expand_virt_from_graph(self, root, atom):
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+ graphdb = self._dynamic_config.mydbapi[root]
+ match = graphdb.match_pkgs(atom)
+ if not match:
+ yield atom
+ return
+ pkg = match[-1]
+ if not pkg.cpv.startswith("virtual/"):
+ yield atom
+ return
+ try:
+ rdepend = self._select_atoms_from_graph(
+ pkg.root, pkg.metadata.get("RDEPEND", ""),
+ myuse=self._pkg_use_enabled(pkg),
+ parent=pkg, strict=False)
+ except InvalidDependString as e:
+ writemsg_level("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (pkg.root, pkg.cpv, e),
+ noiselevel=-1, level=logging.ERROR)
+ yield atom
+ return
+
+ for atoms in rdepend.values():
+ for atom in atoms:
+ if hasattr(atom, "_orig_atom"):
+ # Ignore virtual atoms since we're only
+ # interested in expanding the real atoms.
+ continue
+ yield atom
+
+ def _get_dep_chain(self, start_node, target_atom=None,
+ unsatisfied_dependency=False):
+ """
+ Returns a list of (atom, node_type) pairs that represent a dep chain.
+ If target_atom is None, the first package shown is pkg's parent.
+ If target_atom is not None the first package shown is pkg.
+ If unsatisfied_dependency is True, the first parent is select who's
+ dependency is not satisfied by 'pkg'. This is need for USE changes.
+ (Does not support target_atom.)
+ """
+ traversed_nodes = set()
+ dep_chain = []
+ node = start_node
+ child = None
+ all_parents = self._dynamic_config._parent_atoms
+
+ if target_atom is not None and isinstance(node, Package):
+ affecting_use = set()
+ for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
+ try:
+ affecting_use.update(extract_affecting_use(
+ node.metadata[dep_str], target_atom,
+ eapi=node.metadata["EAPI"]))
+ except InvalidDependString:
+ if not node.installed:
+ raise
+ affecting_use.difference_update(node.use.mask, node.use.force)
+ pkg_name = _unicode_decode("%s") % (node.cpv,)
+ if affecting_use:
+ usedep = []
+ for flag in affecting_use:
+ if flag in self._pkg_use_enabled(node):
+ usedep.append(flag)
+ else:
+ usedep.append("-"+flag)
+ pkg_name += "[%s]" % ",".join(usedep)
+
+ dep_chain.append((pkg_name, node.type_name))
+
+ while node is not None:
+ traversed_nodes.add(node)
+
+ if isinstance(node, DependencyArg):
+ if self._dynamic_config.digraph.parent_nodes(node):
+ node_type = "set"
+ else:
+ node_type = "argument"
+ dep_chain.append((_unicode_decode("%s") % (node,), node_type))
+
+ elif node is not start_node:
+ for ppkg, patom in all_parents[child]:
+ if ppkg == node:
+ atom = patom.unevaluated_atom
+ break
+
+ dep_strings = set()
+ for priority in self._dynamic_config.digraph.nodes[node][0][child]:
+ if priority.buildtime:
+ dep_strings.add(node.metadata["DEPEND"])
+ if priority.runtime:
+ dep_strings.add(node.metadata["RDEPEND"])
+ if priority.runtime_post:
+ dep_strings.add(node.metadata["PDEPEND"])
+
+ affecting_use = set()
+ for dep_str in dep_strings:
+ try:
+ affecting_use.update(extract_affecting_use(
+ dep_str, atom, eapi=node.metadata["EAPI"]))
+ except InvalidDependString:
+ if not node.installed:
+ raise
+
+ #Don't show flags as 'affecting' if the user can't change them,
+ affecting_use.difference_update(node.use.mask, \
+ node.use.force)
+
+ pkg_name = _unicode_decode("%s") % (node.cpv,)
+ if affecting_use:
+ usedep = []
+ for flag in affecting_use:
+ if flag in self._pkg_use_enabled(node):
+ usedep.append(flag)
+ else:
+ usedep.append("-"+flag)
+ pkg_name += "[%s]" % ",".join(usedep)
+
+ dep_chain.append((pkg_name, node.type_name))
+
+ if node not in self._dynamic_config.digraph:
+ # The parent is not in the graph due to backtracking.
+ break
+
+ # When traversing to parents, prefer arguments over packages
+ # since arguments are root nodes. Never traverse the same
+ # package twice, in order to prevent an infinite loop.
+ child = node
+ selected_parent = None
+ parent_arg = None
+ parent_merge = None
+ parent_unsatisfied = None
+
+ for parent in self._dynamic_config.digraph.parent_nodes(node):
+ if parent in traversed_nodes:
+ continue
+ if isinstance(parent, DependencyArg):
+ parent_arg = parent
+ else:
+ if isinstance(parent, Package) and \
+ parent.operation == "merge":
+ parent_merge = parent
+ if unsatisfied_dependency and node is start_node:
+ # Make sure that pkg doesn't satisfy parent's dependency.
+ # This ensures that we select the correct parent for use
+ # flag changes.
+ for ppkg, atom in all_parents[start_node]:
+ if parent is ppkg:
+ atom_set = InternalPackageSet(initial_atoms=(atom,))
+ if not atom_set.findAtomForPackage(start_node):
+ parent_unsatisfied = parent
+ break
+ else:
+ selected_parent = parent
+
+ if parent_unsatisfied is not None:
+ selected_parent = parent_unsatisfied
+ elif parent_merge is not None:
+ # Prefer parent in the merge list (bug #354747).
+ selected_parent = parent_merge
+ elif parent_arg is not None:
+ if self._dynamic_config.digraph.parent_nodes(parent_arg):
+ selected_parent = parent_arg
+ else:
+ dep_chain.append(
+ (_unicode_decode("%s") % (parent_arg,), "argument"))
+ selected_parent = None
+
+ node = selected_parent
+ return dep_chain
+
+ def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
+ dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
+ display_list = []
+ for node, node_type in dep_chain:
+ if node_type == "argument":
+ display_list.append("required by %s (argument)" % node)
+ else:
+ display_list.append("required by %s" % node)
+
+ msg = "#" + ", ".join(display_list) + "\n"
+ return msg
+
+
+ def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
+ check_backtrack=False, check_autounmask_breakage=False):
+ """
+ When check_backtrack=True, no output is produced and
+ the method either returns or raises _backtrack_mask if
+ a matching package has been masked by backtracking.
+ """
+ backtrack_mask = False
+ autounmask_broke_use_dep = False
+ atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
+ allow_repo=True)
+ atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ xinfo = '"%s"' % atom.unevaluated_atom
+ if arg:
+ xinfo='"%s"' % arg
+ if isinstance(myparent, AtomArg):
+ xinfo = _unicode_decode('"%s"') % (myparent,)
+ # Discard null/ from failed cpv_expand category expansion.
+ xinfo = xinfo.replace("null/", "")
+ if root != "/":
+ xinfo = "%s for %s" % (xinfo, root)
+ masked_packages = []
+ missing_use = []
+ missing_use_adjustable = set()
+ required_use_unsatisfied = []
+ masked_pkg_instances = set()
+ missing_licenses = []
+ have_eapi_mask = False
+ pkgsettings = self._frozen_config.pkgsettings[root]
+ root_config = self._frozen_config.roots[root]
+ portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
+ vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
+ bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
+ dbs = self._dynamic_config._filtered_trees[root]["dbs"]
+ for db, pkg_type, built, installed, db_keys in dbs:
+ if installed:
+ continue
+ match = db.match
+ if hasattr(db, "xmatch"):
+ cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
+ else:
+ cpv_list = db.match(atom.without_use)
+
+ if atom.repo is None and hasattr(db, "getRepositories"):
+ repo_list = db.getRepositories()
+ else:
+ repo_list = [atom.repo]
+
+ # descending order
+ cpv_list.reverse()
+ for cpv in cpv_list:
+ for repo in repo_list:
+ if not db.cpv_exists(cpv, myrepo=repo):
+ continue
+
+ metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
+ built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
+ if metadata is not None and \
+ portage.eapi_is_supported(metadata["EAPI"]):
+ if not repo:
+ repo = metadata.get('repository')
+ pkg = self._pkg(cpv, pkg_type, root_config,
+ installed=installed, myrepo=repo)
+ if not atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ # pkg.metadata contains calculated USE for ebuilds,
+ # required later for getMissingLicenses.
+ metadata = pkg.metadata
+ if pkg in self._dynamic_config._runtime_pkg_mask:
+ backtrack_reasons = \
+ self._dynamic_config._runtime_pkg_mask[pkg]
+ mreasons.append('backtracking: %s' % \
+ ', '.join(sorted(backtrack_reasons)))
+ backtrack_mask = True
+ if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ mreasons = ["exclude option"]
+ if mreasons:
+ masked_pkg_instances.add(pkg)
+ if atom.unevaluated_atom.use:
+ try:
+ if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
+ or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
+ missing_use.append(pkg)
+ if atom_set_with_use.findAtomForPackage(pkg):
+ autounmask_broke_use_dep = True
+ if not mreasons:
+ continue
+ except InvalidAtom:
+ writemsg("violated_conditionals raised " + \
+ "InvalidAtom: '%s' parent: %s" % \
+ (atom, myparent), noiselevel=-1)
+ raise
+ if not mreasons and \
+ not pkg.built and \
+ pkg.metadata["REQUIRED_USE"] and \
+ eapi_has_required_use(pkg.metadata["EAPI"]):
+ if not check_required_use(
+ pkg.metadata["REQUIRED_USE"],
+ self._pkg_use_enabled(pkg),
+ pkg.iuse.is_valid_flag):
+ required_use_unsatisfied.append(pkg)
+ continue
+ root_slot = (pkg.root, pkg.slot_atom)
+ if pkg.built and root_slot in self._rebuild.rebuild_list:
+ mreasons = ["need to rebuild from source"]
+ elif pkg.installed and root_slot in self._rebuild.reinstall_list:
+ mreasons = ["need to rebuild from source"]
+ elif pkg.built and not mreasons:
+ mreasons = ["use flag configuration mismatch"]
+ masked_packages.append(
+ (root_config, pkgsettings, cpv, repo, metadata, mreasons))
+
+ if check_backtrack:
+ if backtrack_mask:
+ raise self._backtrack_mask()
+ else:
+ return
+
+ if check_autounmask_breakage:
+ if autounmask_broke_use_dep:
+ raise self._autounmask_breakage()
+ else:
+ return
+
+ missing_use_reasons = []
+ missing_iuse_reasons = []
+ for pkg in missing_use:
+ use = self._pkg_use_enabled(pkg)
+ missing_iuse = []
+ #Use the unevaluated atom here, because some flags might have gone
+ #lost during evaluation.
+ required_flags = atom.unevaluated_atom.use.required
+ missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
+
+ mreasons = []
+ if missing_iuse:
+ mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
+ missing_iuse_reasons.append((pkg, mreasons))
+ else:
+ need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
+ need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
+
+ untouchable_flags = \
+ frozenset(chain(pkg.use.mask, pkg.use.force))
+ if untouchable_flags.intersection(
+ chain(need_enable, need_disable)):
+ continue
+
+ missing_use_adjustable.add(pkg)
+ required_use = pkg.metadata["REQUIRED_USE"]
+ required_use_warning = ""
+ if required_use:
+ old_use = self._pkg_use_enabled(pkg)
+ new_use = set(self._pkg_use_enabled(pkg))
+ for flag in need_enable:
+ new_use.add(flag)
+ for flag in need_disable:
+ new_use.discard(flag)
+ if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
+ not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+ required_use_warning = ", this change violates use flag constraints " + \
+ "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
+
+ if need_enable or need_disable:
+ changes = []
+ changes.extend(colorize("red", "+" + x) \
+ for x in need_enable)
+ changes.extend(colorize("blue", "-" + x) \
+ for x in need_disable)
+ mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
+ missing_use_reasons.append((pkg, mreasons))
+
+ if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
+ # Lets see if the violated use deps are conditional.
+ # If so, suggest to change them on the parent.
+
+ # If the child package is masked then a change to
+ # parent USE is not a valid solution (a normal mask
+ # message should be displayed instead).
+ if pkg in masked_pkg_instances:
+ continue
+
+ mreasons = []
+ violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
+ pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
+ if not (violated_atom.use.enabled or violated_atom.use.disabled):
+ #all violated use deps are conditional
+ changes = []
+ conditional = violated_atom.use.conditional
+ involved_flags = set(chain(conditional.equal, conditional.not_equal, \
+ conditional.enabled, conditional.disabled))
+
+ untouchable_flags = \
+ frozenset(chain(myparent.use.mask, myparent.use.force))
+ if untouchable_flags.intersection(involved_flags):
+ continue
+
+ required_use = myparent.metadata["REQUIRED_USE"]
+ required_use_warning = ""
+ if required_use:
+ old_use = self._pkg_use_enabled(myparent)
+ new_use = set(self._pkg_use_enabled(myparent))
+ for flag in involved_flags:
+ if flag in old_use:
+ new_use.discard(flag)
+ else:
+ new_use.add(flag)
+ if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
+ not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
+ required_use_warning = ", this change violates use flag constraints " + \
+ "defined by %s: '%s'" % (myparent.cpv, \
+ human_readable_required_use(required_use))
+
+ for flag in involved_flags:
+ if flag in self._pkg_use_enabled(myparent):
+ changes.append(colorize("blue", "-" + flag))
+ else:
+ changes.append(colorize("red", "+" + flag))
+ mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
+ if (myparent, mreasons) not in missing_use_reasons:
+ missing_use_reasons.append((myparent, mreasons))
+
+ unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
+ in missing_use_reasons if pkg not in masked_pkg_instances]
+
+ unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
+ in missing_iuse_reasons if pkg not in masked_pkg_instances]
+
+ show_missing_use = False
+ if unmasked_use_reasons:
+ # Only show the latest version.
+ show_missing_use = []
+ pkg_reason = None
+ parent_reason = None
+ for pkg, mreasons in unmasked_use_reasons:
+ if pkg is myparent:
+ if parent_reason is None:
+ #This happens if a use change on the parent
+ #leads to a satisfied conditional use dep.
+ parent_reason = (pkg, mreasons)
+ elif pkg_reason is None:
+ #Don't rely on the first pkg in unmasked_use_reasons,
+ #being the highest version of the dependency.
+ pkg_reason = (pkg, mreasons)
+ if pkg_reason:
+ show_missing_use.append(pkg_reason)
+ if parent_reason:
+ show_missing_use.append(parent_reason)
+
+ elif unmasked_iuse_reasons:
+ masked_with_iuse = False
+ for pkg in masked_pkg_instances:
+ #Use atom.unevaluated here, because some flags might have gone
+ #lost during evaluation.
+ if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
+ # Package(s) with required IUSE are masked,
+ # so display a normal masking message.
+ masked_with_iuse = True
+ break
+ if not masked_with_iuse:
+ show_missing_use = unmasked_iuse_reasons
+
+ if required_use_unsatisfied:
+ # If there's a higher unmasked version in missing_use_adjustable
+ # then we want to show that instead.
+ for pkg in missing_use_adjustable:
+ if pkg not in masked_pkg_instances and \
+ pkg > required_use_unsatisfied[0]:
+ required_use_unsatisfied = False
+ break
+
+ mask_docs = False
+
+ if required_use_unsatisfied:
+ # We have an unmasked package that only requires USE adjustment
+ # in order to satisfy REQUIRED_USE, and nothing more. We assume
+ # that the user wants the latest version, so only the first
+ # instance is displayed.
+ pkg = required_use_unsatisfied[0]
+ output_cpv = pkg.cpv + _repo_separator + pkg.repo
+ writemsg_stdout("\n!!! " + \
+ colorize("BAD", "The ebuild selected to satisfy ") + \
+ colorize("INFORM", xinfo) + \
+ colorize("BAD", " has unmet requirements.") + "\n",
+ noiselevel=-1)
+ use_display = pkg_use_display(pkg, self._frozen_config.myopts)
+ writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
+ noiselevel=-1)
+ writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
+ "are unsatisfied:\n", noiselevel=-1)
+ reduced_noise = check_required_use(
+ pkg.metadata["REQUIRED_USE"],
+ self._pkg_use_enabled(pkg),
+ pkg.iuse.is_valid_flag).tounicode()
+ writemsg_stdout(" %s\n" % \
+ human_readable_required_use(reduced_noise),
+ noiselevel=-1)
+ normalized_required_use = \
+ " ".join(pkg.metadata["REQUIRED_USE"].split())
+ if reduced_noise != normalized_required_use:
+ writemsg_stdout("\n The above constraints " + \
+ "are a subset of the following complete expression:\n",
+ noiselevel=-1)
+ writemsg_stdout(" %s\n" % \
+ human_readable_required_use(normalized_required_use),
+ noiselevel=-1)
+ writemsg_stdout("\n", noiselevel=-1)
+
+ elif show_missing_use:
+ writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+ writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
+ for pkg, mreasons in show_missing_use:
+ writemsg_stdout("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
+
+ elif masked_packages:
+ writemsg_stdout("\n!!! " + \
+ colorize("BAD", "All ebuilds that could satisfy ") + \
+ colorize("INFORM", xinfo) + \
+ colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
+ writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
+ have_eapi_mask = show_masked_packages(masked_packages)
+ if have_eapi_mask:
+ writemsg_stdout("\n", noiselevel=-1)
+ msg = ("The current version of portage supports " + \
+ "EAPI '%s'. You must upgrade to a newer version" + \
+ " of portage before EAPI masked packages can" + \
+ " be installed.") % portage.const.EAPI
+ writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
+ writemsg_stdout("\n", noiselevel=-1)
+ mask_docs = True
+ else:
+ cp_exists = False
+ if not atom.cp.startswith("null/"):
+ for pkg in self._iter_match_pkgs_any(
+ root_config, Atom(atom.cp)):
+ cp_exists = True
+ break
+
+ writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+ if isinstance(myparent, AtomArg) and \
+ not cp_exists and \
+ self._frozen_config.myopts.get(
+ "--misspell-suggestions", "y") != "n":
+ cp = myparent.atom.cp.lower()
+ cat, pkg = portage.catsplit(cp)
+ if cat == "null":
+ cat = None
+
+ writemsg_stdout("\nemerge: searching for similar names..."
+ , noiselevel=-1)
+
+ all_cp = set()
+ all_cp.update(vardb.cp_all())
+ all_cp.update(portdb.cp_all())
+ if "--usepkg" in self._frozen_config.myopts:
+ all_cp.update(bindb.cp_all())
+ # discard dir containing no ebuilds
+ all_cp.discard(cp)
+
+ orig_cp_map = {}
+ for cp_orig in all_cp:
+ orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
+ all_cp = set(orig_cp_map)
+
+ if cat:
+ matches = difflib.get_close_matches(cp, all_cp)
+ else:
+ pkg_to_cp = {}
+ for other_cp in list(all_cp):
+ other_pkg = portage.catsplit(other_cp)[1]
+ if other_pkg == pkg:
+ # discard dir containing no ebuilds
+ all_cp.discard(other_cp)
+ continue
+ pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
+ pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
+ matches = []
+ for pkg_match in pkg_matches:
+ matches.extend(pkg_to_cp[pkg_match])
+
+ matches_orig_case = []
+ for cp in matches:
+ matches_orig_case.extend(orig_cp_map[cp])
+ matches = matches_orig_case
+
+ if len(matches) == 1:
+ writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ , noiselevel=-1)
+ elif len(matches) > 1:
+ writemsg_stdout(
+ "\nemerge: Maybe you meant any of these: %s?\n" % \
+ (", ".join(matches),), noiselevel=-1)
+ else:
+ # Generally, this would only happen if
+ # all dbapis are empty.
+ writemsg_stdout(" nothing similar found.\n"
+ , noiselevel=-1)
+ msg = []
+ if not isinstance(myparent, AtomArg):
+ # It's redundant to show parent for AtomArg since
+ # it's the same as 'xinfo' displayed above.
+ dep_chain = self._get_dep_chain(myparent, atom)
+ for node, node_type in dep_chain:
+ msg.append('(dependency required by "%s" [%s])' % \
+ (colorize('INFORM', _unicode_decode("%s") % \
+ (node)), node_type))
+
+ if msg:
+ writemsg_stdout("\n".join(msg), noiselevel=-1)
+ writemsg_stdout("\n", noiselevel=-1)
+
+ if mask_docs:
+ show_mask_docs()
+ writemsg_stdout("\n", noiselevel=-1)
+
+ def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
+ for db, pkg_type, built, installed, db_keys in \
+ self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
+ for pkg in self._iter_match_pkgs(root_config,
+ pkg_type, atom, onlydeps=onlydeps):
+ yield pkg
+
+ def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
+ """
+ Iterate over Package instances of pkg_type matching the given atom.
+ This does not check visibility and it also does not match USE for
+ unbuilt ebuilds since USE are lazily calculated after visibility
+ checks (to avoid the expense when possible).
+ """
+
+ db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
+
+ if hasattr(db, "xmatch"):
+ # For portdbapi we match only against the cpv, in order
+ # to bypass unnecessary cache access for things like IUSE
+ # and SLOT. Later, we cache the metadata in a Package
+ # instance, and use that for further matching. This
+ # optimization is especially relevant since
+ # pordbapi.aux_get() does not cache calls that have
+ # myrepo or mytree arguments.
+ cpv_list = db.xmatch("match-all-cpv-only", atom)
+ else:
+ cpv_list = db.match(atom)
+
+ # USE=multislot can make an installed package appear as if
+ # it doesn't satisfy a slot dependency. Rebuilding the ebuild
+ # won't do any good as long as USE=multislot is enabled since
+ # the newly built package still won't have the expected slot.
+ # Therefore, assume that such SLOT dependencies are already
+ # satisfied rather than forcing a rebuild.
+ installed = pkg_type == 'installed'
+ if installed and not cpv_list and atom.slot:
+ for cpv in db.match(atom.cp):
+ slot_available = False
+ for other_db, other_type, other_built, \
+ other_installed, other_keys in \
+ self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
+ try:
+ if atom.slot == \
+ other_db.aux_get(cpv, ["SLOT"])[0]:
+ slot_available = True
+ break
+ except KeyError:
+ pass
+ if not slot_available:
+ continue
+ inst_pkg = self._pkg(cpv, "installed",
+ root_config, installed=installed, myrepo = atom.repo)
+ # Remove the slot from the atom and verify that
+ # the package matches the resulting atom.
+ if portage.match_from_list(
+ atom.without_slot, [inst_pkg]):
+ yield inst_pkg
+ return
+
+ if cpv_list:
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ if atom.repo is None and hasattr(db, "getRepositories"):
+ repo_list = db.getRepositories()
+ else:
+ repo_list = [atom.repo]
+
+ # descending order
+ cpv_list.reverse()
+ for cpv in cpv_list:
+ for repo in repo_list:
+
+ try:
+ pkg = self._pkg(cpv, pkg_type, root_config,
+ installed=installed, onlydeps=onlydeps, myrepo=repo)
+ except portage.exception.PackageNotFound:
+ pass
+ else:
+ # A cpv can be returned from dbapi.match() as an
+ # old-style virtual match even in cases when the
+ # package does not actually PROVIDE the virtual.
+ # Filter out any such false matches here.
+
+ # Make sure that cpv from the current repo satisfies the atom.
+ # This might not be the case if there are several repos with
+ # the same cpv, but different metadata keys, like SLOT.
+ # Also, for portdbapi, parts of the match that require
+ # metadata access are deferred until we have cached the
+ # metadata in a Package instance.
+ if not atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ yield pkg
+
+ def _select_pkg_highest_available(self, root, atom, onlydeps=False):
+ cache_key = (root, atom, onlydeps)
+ ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
+ if ret is not None:
+ pkg, existing = ret
+ if pkg and not existing:
+ existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+ if existing and existing == pkg:
+ # Update the cache to reflect that the
+ # package has been added to the graph.
+ ret = pkg, pkg
+ self._dynamic_config._highest_pkg_cache[cache_key] = ret
+ return ret
+ ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
+ self._dynamic_config._highest_pkg_cache[cache_key] = ret
+ pkg, existing = ret
+ if pkg is not None:
+ settings = pkg.root_config.settings
+ if self._pkg_visibility_check(pkg) and \
+ not (pkg.installed and pkg.masks):
+ self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
+ return ret
+
+ def _want_installed_pkg(self, pkg):
+ """
+ Given an installed package returned from select_pkg, return
+ True if the user has not explicitly requested for this package
+ to be replaced (typically via an atom on the command line).
+ """
+ if "selective" not in self._dynamic_config.myparams and \
+ pkg.root == self._frozen_config.target_root:
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ return True
+ try:
+ next(self._iter_atoms_for_pkg(pkg))
+ except StopIteration:
+ pass
+ except portage.exception.InvalidDependString:
+ pass
+ else:
+ return False
+ return True
+
+ def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
+ pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
+
+ default_selection = (pkg, existing)
+
+ if self._dynamic_config._autounmask is True:
+ if pkg is not None and \
+ pkg.installed and \
+ not self._want_installed_pkg(pkg):
+ pkg = None
+
+ for only_use_changes in True, False:
+ if pkg is not None:
+ break
+
+ for allow_unmasks in (False, True):
+ if only_use_changes and allow_unmasks:
+ continue
+
+ if pkg is not None:
+ break
+
+ pkg, existing = \
+ self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps,
+ allow_use_changes=True,
+ allow_unstable_keywords=(not only_use_changes),
+ allow_license_changes=(not only_use_changes),
+ allow_unmasks=allow_unmasks)
+
+ if pkg is not None and \
+ pkg.installed and \
+ not self._want_installed_pkg(pkg):
+ pkg = None
+
+ if self._dynamic_config._need_restart:
+ return None, None
+
+ if pkg is None:
+ # This ensures that we can fall back to an installed package
+ # that may have been rejected in the autounmask path above.
+ return default_selection
+
+ return pkg, existing
+
+ def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
+
+ if pkg.visible:
+ return True
+
+ if pkg in self._dynamic_config.digraph:
+ # Sometimes we need to temporarily disable
+ # dynamic_config._autounmask, but for overall
+ # consistency in dependency resolution, in any
+ # case we want to respect autounmask visibity
+ # for packages that have already been added to
+ # the dependency graph.
+ return True
+
+ if not self._dynamic_config._autounmask:
+ return False
+
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ root_config = self._frozen_config.roots[pkg.root]
+ mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
+
+ masked_by_unstable_keywords = False
+ masked_by_missing_keywords = False
+ missing_licenses = None
+ masked_by_something_else = False
+ masked_by_p_mask = False
+
+ for reason in mreasons:
+ hint = reason.unmask_hint
+
+ if hint is None:
+ masked_by_something_else = True
+ elif hint.key == "unstable keyword":
+ masked_by_unstable_keywords = True
+ if hint.value == "**":
+ masked_by_missing_keywords = True
+ elif hint.key == "p_mask":
+ masked_by_p_mask = True
+ elif hint.key == "license":
+ missing_licenses = hint.value
+ else:
+ masked_by_something_else = True
+
+ if masked_by_something_else:
+ return False
+
+ if pkg in self._dynamic_config._needed_unstable_keywords:
+ #If the package is already keyworded, remove the mask.
+ masked_by_unstable_keywords = False
+ masked_by_missing_keywords = False
+
+ if pkg in self._dynamic_config._needed_p_mask_changes:
+ #If the package is already keyworded, remove the mask.
+ masked_by_p_mask = False
+
+ if missing_licenses:
+ #If the needed licenses are already unmasked, remove the mask.
+ missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
+
+ if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
+ #Package has already been unmasked.
+ return True
+
+ #We treat missing keywords in the same way as masks.
+ if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
+ (masked_by_missing_keywords and not allow_unmasks) or \
+ (masked_by_p_mask and not allow_unmasks) or \
+ (missing_licenses and not allow_license_changes):
+ #We are not allowed to do the needed changes.
+ return False
+
+ if masked_by_unstable_keywords:
+ self._dynamic_config._needed_unstable_keywords.add(pkg)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
+ backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
+
+ if masked_by_p_mask:
+ self._dynamic_config._needed_p_mask_changes.add(pkg)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
+ backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
+
+ if missing_licenses:
+ self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_license_changes", set())
+ backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
+
+ return True
+
+ def _pkg_use_enabled(self, pkg, target_use=None):
+ """
+ If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
+ If target_use is given, the need changes are computed to make the package useable.
+ Example: target_use = { "foo": True, "bar": False }
+ The flags target_use must be in the pkg's IUSE.
+ """
+ if pkg.built:
+ return pkg.use.enabled
+ needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
+
+ if target_use is None:
+ if needed_use_config_change is None:
+ return pkg.use.enabled
+ else:
+ return needed_use_config_change[0]
+
+ if needed_use_config_change is not None:
+ old_use = needed_use_config_change[0]
+ new_use = set()
+ old_changes = needed_use_config_change[1]
+ new_changes = old_changes.copy()
+ else:
+ old_use = pkg.use.enabled
+ new_use = set()
+ old_changes = {}
+ new_changes = {}
+
+ for flag, state in target_use.items():
+ if state:
+ if flag not in old_use:
+ if new_changes.get(flag) == False:
+ return old_use
+ new_changes[flag] = True
+ new_use.add(flag)
+ else:
+ if flag in old_use:
+ if new_changes.get(flag) == True:
+ return old_use
+ new_changes[flag] = False
+ new_use.update(old_use.difference(target_use))
+
+ def want_restart_for_use_change(pkg, new_use):
+ if pkg not in self._dynamic_config.digraph.nodes:
+ return False
+
+ for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
+ dep = pkg.metadata[key]
+ old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
+ new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
+
+ if old_val != new_val:
+ return True
+
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if not parent_atoms:
+ return False
+
+ new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
+ for ppkg, atom in parent_atoms:
+ if not atom.use or \
+ not atom.use.required.intersection(changes):
+ continue
+ else:
+ return True
+
+ return False
+
+ if new_changes != old_changes:
+ #Don't do the change if it violates REQUIRED_USE.
+ required_use = pkg.metadata["REQUIRED_USE"]
+ if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
+ not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+ return old_use
+
+ if pkg.use.mask.intersection(new_changes) or \
+ pkg.use.force.intersection(new_changes):
+ return old_use
+
+ self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_use_config_changes", [])
+ backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
+ if want_restart_for_use_change(pkg, new_use):
+ self._dynamic_config._need_restart = True
+ return new_use
+
+ def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
+ allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
+ root_config = self._frozen_config.roots[root]
+ pkgsettings = self._frozen_config.pkgsettings[root]
+ dbs = self._dynamic_config._filtered_trees[root]["dbs"]
+ vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
+ portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
+ # List of acceptable packages, ordered by type preference.
+ matched_packages = []
+ matched_pkgs_ignore_use = []
+ highest_version = None
+ if not isinstance(atom, portage.dep.Atom):
+ atom = portage.dep.Atom(atom)
+ atom_cp = atom.cp
+ atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
+ existing_node = None
+ myeb = None
+ rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
+ usepkg = "--usepkg" in self._frozen_config.myopts
+ usepkgonly = "--usepkgonly" in self._frozen_config.myopts
+ empty = "empty" in self._dynamic_config.myparams
+ selective = "selective" in self._dynamic_config.myparams
+ reinstall = False
+ avoid_update = "--update" not in self._frozen_config.myopts
+ dont_miss_updates = "--update" in self._frozen_config.myopts
+ use_ebuild_visibility = self._frozen_config.myopts.get(
+ '--use-ebuild-visibility', 'n') != 'n'
+ reinstall_atoms = self._frozen_config.reinstall_atoms
+ usepkg_exclude = self._frozen_config.usepkg_exclude
+ useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
+ matched_oldpkg = []
+ # Behavior of the "selective" parameter depends on
+ # whether or not a package matches an argument atom.
+ # If an installed package provides an old-style
+ # virtual that is no longer provided by an available
+ # package, the installed package may match an argument
+ # atom even though none of the available packages do.
+ # Therefore, "selective" logic does not consider
+ # whether or not an installed package matches an
+ # argument atom. It only considers whether or not
+ # available packages match argument atoms, which is
+ # represented by the found_available_arg flag.
+ found_available_arg = False
+ packages_with_invalid_use_config = []
+ for find_existing_node in True, False:
+ if existing_node:
+ break
+ for db, pkg_type, built, installed, db_keys in dbs:
+ if existing_node:
+ break
+ if installed and not find_existing_node:
+ want_reinstall = reinstall or empty or \
+ (found_available_arg and not selective)
+ if want_reinstall and matched_packages:
+ continue
+
+ # Ignore USE deps for the initial match since we want to
+ # ensure that updates aren't missed solely due to the user's
+ # USE configuration.
+ for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
+ onlydeps=onlydeps):
+ if pkg in self._dynamic_config._runtime_pkg_mask:
+ # The package has been masked by the backtracking logic
+ continue
+ root_slot = (pkg.root, pkg.slot_atom)
+ if pkg.built and root_slot in self._rebuild.rebuild_list:
+ continue
+ if (pkg.installed and
+ root_slot in self._rebuild.reinstall_list):
+ continue
+
+ if not pkg.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+
+ if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ break
+
+ useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg))
+
+ if packages_with_invalid_use_config and (not built or not useoldpkg) and \
+ (not pkg.installed or dont_miss_updates):
+ # Check if a higher version was rejected due to user
+ # USE configuration. The packages_with_invalid_use_config
+ # list only contains unbuilt ebuilds since USE can't
+ # be changed for built packages.
+ higher_version_rejected = False
+ repo_priority = pkg.repo_priority
+ for rejected in packages_with_invalid_use_config:
+ if rejected.cp != pkg.cp:
+ continue
+ if rejected > pkg:
+ higher_version_rejected = True
+ break
+ if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
+ # If version is identical then compare
+ # repo priority (see bug #350254).
+ rej_repo_priority = rejected.repo_priority
+ if rej_repo_priority is not None and \
+ (repo_priority is None or
+ rej_repo_priority > repo_priority):
+ higher_version_rejected = True
+ break
+ if higher_version_rejected:
+ continue
+
+ cpv = pkg.cpv
+ reinstall_for_flags = None
+
+ if not pkg.installed or \
+ (matched_packages and not avoid_update):
+ # Only enforce visibility on installed packages
+ # if there is at least one other visible package
+ # available. By filtering installed masked packages
+ # here, packages that have been masked since they
+ # were installed can be automatically downgraded
+ # to an unmasked version. NOTE: This code needs to
+ # be consistent with masking behavior inside
+ # _dep_check_composite_db, in order to prevent
+ # incorrect choices in || deps like bug #351828.
+
+ if not self._pkg_visibility_check(pkg, \
+ allow_unstable_keywords=allow_unstable_keywords,
+ allow_license_changes=allow_license_changes,
+ allow_unmasks=allow_unmasks):
+ continue
+
+ # Enable upgrade or downgrade to a version
+ # with visible KEYWORDS when the installed
+ # version is masked by KEYWORDS, but never
+ # reinstall the same exact version only due
+ # to a KEYWORDS mask. See bug #252167.
+
+ if pkg.type_name != "ebuild" and matched_packages:
+ # Don't re-install a binary package that is
+ # identical to the currently installed package
+ # (see bug #354441).
+ identical_binary = False
+ if usepkg and pkg.installed:
+ for selected_pkg in matched_packages:
+ if selected_pkg.type_name == "binary" and \
+ selected_pkg.cpv == pkg.cpv and \
+ selected_pkg.metadata.get('BUILD_TIME') == \
+ pkg.metadata.get('BUILD_TIME'):
+ identical_binary = True
+ break
+
+ if not identical_binary:
+ # If the ebuild no longer exists or it's
+ # keywords have been dropped, reject built
+ # instances (installed or binary).
+ # If --usepkgonly is enabled, assume that
+ # the ebuild status should be ignored.
+ if not use_ebuild_visibility and (usepkgonly or useoldpkg):
+ if pkg.installed and pkg.masks:
+ continue
+ else:
+ try:
+ pkg_eb = self._pkg(
+ pkg.cpv, "ebuild", root_config, myrepo=pkg.repo)
+ except portage.exception.PackageNotFound:
+ pkg_eb_visible = False
+ for pkg_eb in self._iter_match_pkgs(pkg.root_config,
+ "ebuild", Atom("=%s" % (pkg.cpv,))):
+ if self._pkg_visibility_check(pkg_eb, \
+ allow_unstable_keywords=allow_unstable_keywords,
+ allow_license_changes=allow_license_changes,
+ allow_unmasks=allow_unmasks):
+ pkg_eb_visible = True
+ break
+ if not pkg_eb_visible:
+ continue
+ else:
+ if not self._pkg_visibility_check(pkg_eb, \
+ allow_unstable_keywords=allow_unstable_keywords,
+ allow_license_changes=allow_license_changes,
+ allow_unmasks=allow_unmasks):
+ continue
+
+ # Calculation of USE for unbuilt ebuilds is relatively
+ # expensive, so it is only performed lazily, after the
+ # above visibility checks are complete.
+
+ myarg = None
+ if root == self._frozen_config.target_root:
+ try:
+ myarg = next(self._iter_atoms_for_pkg(pkg))
+ except StopIteration:
+ pass
+ except portage.exception.InvalidDependString:
+ if not installed:
+ # masked by corruption
+ continue
+ if not installed and myarg:
+ found_available_arg = True
+
+ if atom.unevaluated_atom.use:
+ #Make sure we don't miss a 'missing IUSE'.
+ if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
+ # Don't add this to packages_with_invalid_use_config
+ # since IUSE cannot be adjusted by the user.
+ continue
+
+ if atom.use:
+
+ matched_pkgs_ignore_use.append(pkg)
+ if allow_use_changes and not pkg.built:
+ target_use = {}
+ for flag in atom.use.enabled:
+ target_use[flag] = True
+ for flag in atom.use.disabled:
+ target_use[flag] = False
+ use = self._pkg_use_enabled(pkg, target_use)
+ else:
+ use = self._pkg_use_enabled(pkg)
+
+ use_match = True
+ can_adjust_use = not pkg.built
+ missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
+ missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
+
+ if atom.use.enabled:
+ if atom.use.enabled.intersection(missing_disabled):
+ use_match = False
+ can_adjust_use = False
+ need_enabled = atom.use.enabled.difference(use)
+ if need_enabled:
+ need_enabled = need_enabled.difference(missing_enabled)
+ if need_enabled:
+ use_match = False
+ if can_adjust_use:
+ if pkg.use.mask.intersection(need_enabled):
+ can_adjust_use = False
+
+ if atom.use.disabled:
+ if atom.use.disabled.intersection(missing_enabled):
+ use_match = False
+ can_adjust_use = False
+ need_disabled = atom.use.disabled.intersection(use)
+ if need_disabled:
+ need_disabled = need_disabled.difference(missing_disabled)
+ if need_disabled:
+ use_match = False
+ if can_adjust_use:
+ if pkg.use.force.difference(
+ pkg.use.mask).intersection(need_disabled):
+ can_adjust_use = False
+
+ if not use_match:
+ if can_adjust_use:
+ # Above we must ensure that this package has
+ # absolutely no use.force, use.mask, or IUSE
+ # issues that the user typically can't make
+ # adjustments to solve (see bug #345979).
+ # FIXME: Conditional USE deps complicate
+ # issues. This code currently excludes cases
+ # in which the user can adjust the parent
+ # package's USE in order to satisfy the dep.
+ packages_with_invalid_use_config.append(pkg)
+ continue
+
+ if pkg.cp == atom_cp:
+ if highest_version is None:
+ highest_version = pkg
+ elif pkg > highest_version:
+ highest_version = pkg
+ # At this point, we've found the highest visible
+ # match from the current repo. Any lower versions
+ # from this repo are ignored, so this so the loop
+ # will always end with a break statement below
+ # this point.
+ if find_existing_node:
+ e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+ if not e_pkg:
+ break
+ # Use PackageSet.findAtomForPackage()
+ # for PROVIDE support.
+ if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
+ if highest_version and \
+ e_pkg.cp == atom_cp and \
+ e_pkg < highest_version and \
+ e_pkg.slot_atom != highest_version.slot_atom:
+ # There is a higher version available in a
+ # different slot, so this existing node is
+ # irrelevant.
+ pass
+ else:
+ matched_packages.append(e_pkg)
+ existing_node = e_pkg
+ break
+ # Compare built package to current config and
+ # reject the built package if necessary.
+ if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
+ ("--newuse" in self._frozen_config.myopts or \
+ "--reinstall" in self._frozen_config.myopts or \
+ "--binpkg-respect-use" in self._frozen_config.myopts):
+ iuses = pkg.iuse.all
+ old_use = self._pkg_use_enabled(pkg)
+ if myeb:
+ pkgsettings.setcpv(myeb)
+ else:
+ pkgsettings.setcpv(pkg)
+ now_use = pkgsettings["PORTAGE_USE"].split()
+ forced_flags = set()
+ forced_flags.update(pkgsettings.useforce)
+ forced_flags.update(pkgsettings.usemask)
+ cur_iuse = iuses
+ if myeb and not usepkgonly and not useoldpkg:
+ cur_iuse = myeb.iuse.all
+ if self._reinstall_for_flags(forced_flags,
+ old_use, iuses,
+ now_use, cur_iuse):
+ break
+ # Compare current config to installed package
+ # and do not reinstall if possible.
+ if not installed and not useoldpkg and \
+ ("--newuse" in self._frozen_config.myopts or \
+ "--reinstall" in self._frozen_config.myopts) and \
+ cpv in vardb.match(atom):
+ forced_flags = set()
+ forced_flags.update(pkg.use.force)
+ forced_flags.update(pkg.use.mask)
+ inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
+ old_use = inst_pkg.use.enabled
+ old_iuse = inst_pkg.iuse.all
+ cur_use = self._pkg_use_enabled(pkg)
+ cur_iuse = pkg.iuse.all
+ reinstall_for_flags = \
+ self._reinstall_for_flags(
+ forced_flags, old_use, old_iuse,
+ cur_use, cur_iuse)
+ if reinstall_for_flags:
+ reinstall = True
+ if reinstall_atoms.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ reinstall = True
+ if not built:
+ myeb = pkg
+ elif useoldpkg:
+ matched_oldpkg.append(pkg)
+ matched_packages.append(pkg)
+ if reinstall_for_flags:
+ self._dynamic_config._reinstall_nodes[pkg] = \
+ reinstall_for_flags
+ break
+
+ if not matched_packages:
+ return None, None
+
+ if "--debug" in self._frozen_config.myopts:
+ for pkg in matched_packages:
+ portage.writemsg("%s %s%s%s\n" % \
+ ((pkg.type_name + ":").rjust(10),
+ pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
+
+ # Filter out any old-style virtual matches if they are
+ # mixed with new-style virtual matches.
+ cp = atom.cp
+ if len(matched_packages) > 1 and \
+ "virtual" == portage.catsplit(cp)[0]:
+ for pkg in matched_packages:
+ if pkg.cp != cp:
+ continue
+ # Got a new-style virtual, so filter
+ # out any old-style virtuals.
+ matched_packages = [pkg for pkg in matched_packages \
+ if pkg.cp == cp]
+ break
+
+ if existing_node is not None and \
+ existing_node in matched_packages:
+ return existing_node, existing_node
+
+ if len(matched_packages) > 1:
+ if rebuilt_binaries:
+ inst_pkg = None
+ built_pkg = None
+ unbuilt_pkg = None
+ for pkg in matched_packages:
+ if pkg.installed:
+ inst_pkg = pkg
+ elif pkg.built:
+ built_pkg = pkg
+ else:
+ if unbuilt_pkg is None or pkg > unbuilt_pkg:
+ unbuilt_pkg = pkg
+ if built_pkg is not None and inst_pkg is not None:
+ # Only reinstall if binary package BUILD_TIME is
+ # non-empty, in order to avoid cases like to
+ # bug #306659 where BUILD_TIME fields are missing
+ # in local and/or remote Packages file.
+ try:
+ built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
+ except (KeyError, ValueError):
+ built_timestamp = 0
+
+ try:
+ installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
+ except (KeyError, ValueError):
+ installed_timestamp = 0
+
+ if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
+ pass
+ elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
+ minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
+ if built_timestamp and \
+ built_timestamp > installed_timestamp and \
+ built_timestamp >= minimal_timestamp:
+ return built_pkg, existing_node
+ else:
+ #Don't care if the binary has an older BUILD_TIME than the installed
+ #package. This is for closely tracking a binhost.
+ #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
+ #pulled in here.
+ if built_timestamp and \
+ built_timestamp != installed_timestamp:
+ return built_pkg, existing_node
+
+ for pkg in matched_packages:
+ if pkg.installed and pkg.invalid:
+ matched_packages = [x for x in \
+ matched_packages if x is not pkg]
+
+ if avoid_update:
+ for pkg in matched_packages:
+ if pkg.installed and self._pkg_visibility_check(pkg, \
+ allow_unstable_keywords=allow_unstable_keywords,
+ allow_license_changes=allow_license_changes,
+ allow_unmasks=allow_unmasks):
+ return pkg, existing_node
+
+ visible_matches = []
+ if matched_oldpkg:
+ visible_matches = [pkg.cpv for pkg in matched_oldpkg \
+ if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
+ allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
+ if not visible_matches:
+ visible_matches = [pkg.cpv for pkg in matched_packages \
+ if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
+ allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
+ if visible_matches:
+ bestmatch = portage.best(visible_matches)
+ else:
+ # all are masked, so ignore visibility
+ bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
+ matched_packages = [pkg for pkg in matched_packages \
+ if portage.dep.cpvequal(pkg.cpv, bestmatch)]
+
+ # ordered by type preference ("ebuild" type is the last resort)
+ return matched_packages[-1], existing_node
+
+ def _select_pkg_from_graph(self, root, atom, onlydeps=False):
+ """
+ Select packages that have already been added to the graph or
+ those that are installed and have not been scheduled for
+ replacement.
+ """
+ graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
+ matches = graph_db.match_pkgs(atom)
+ if not matches:
+ return None, None
+ pkg = matches[-1] # highest match
+ in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+ return pkg, in_graph
+
+ def _select_pkg_from_installed(self, root, atom, onlydeps=False):
+ """
+ Select packages that are installed.
+ """
+ vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
+ matches = vardb.match_pkgs(atom)
+ if not matches:
+ return None, None
+ if len(matches) > 1:
+ unmasked = [pkg for pkg in matches if \
+ self._pkg_visibility_check(pkg)]
+ if unmasked:
+ if len(unmasked) == 1:
+ matches = unmasked
+ else:
+ # Account for packages with masks (like KEYWORDS masks)
+ # that are usually ignored in visibility checks for
+ # installed packages, in order to handle cases like
+ # bug #350285.
+ unmasked = [pkg for pkg in matches if not pkg.masks]
+ if unmasked:
+ matches = unmasked
+ pkg = matches[-1] # highest match
+ in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+ return pkg, in_graph
+
+ def _complete_graph(self, required_sets=None):
+ """
+ Add any deep dependencies of required sets (args, system, world) that
+ have not been pulled into the graph yet. This ensures that the graph
+ is consistent such that initially satisfied deep dependencies are not
+ broken in the new graph. Initially unsatisfied dependencies are
+ irrelevant since we only want to avoid breaking dependencies that are
+ initially satisfied.
+
+ Since this method can consume enough time to disturb users, it is
+ currently only enabled by the --complete-graph option.
+
+ @param required_sets: contains required sets (currently only used
+ for depclean and prune removal operations)
+ @type required_sets: dict
+ """
+ if "--buildpkgonly" in self._frozen_config.myopts or \
+ "recurse" not in self._dynamic_config.myparams:
+ return 1
+
+ if "complete" not in self._dynamic_config.myparams:
+ # Automatically enable complete mode if there are any
+ # downgrades, since they often break dependencies
+ # (like in bug #353613).
+ have_downgrade = False
+ for node in self._dynamic_config.digraph:
+ if not isinstance(node, Package) or \
+ node.operation != "merge":
+ continue
+ vardb = self._frozen_config.roots[
+ node.root].trees["vartree"].dbapi
+ inst_pkg = vardb.match_pkgs(node.slot_atom)
+ if inst_pkg and inst_pkg[0] > node:
+ have_downgrade = True
+ break
+
+ if have_downgrade:
+ self._dynamic_config.myparams["complete"] = True
+ else:
+ # Skip complete graph mode, in order to avoid consuming
+ # enough time to disturb users.
+ return 1
+
+ self._load_vdb()
+
+ # Put the depgraph into a mode that causes it to only
+ # select packages that have already been added to the
+ # graph or those that are installed and have not been
+ # scheduled for replacement. Also, toggle the "deep"
+ # parameter so that all dependencies are traversed and
+ # accounted for.
+ self._select_atoms = self._select_atoms_from_graph
+ if "remove" in self._dynamic_config.myparams:
+ self._select_package = self._select_pkg_from_installed
+ else:
+ self._select_package = self._select_pkg_from_graph
+ self._dynamic_config._traverse_ignored_deps = True
+ already_deep = self._dynamic_config.myparams.get("deep") is True
+ if not already_deep:
+ self._dynamic_config.myparams["deep"] = True
+
+ # Invalidate the package selection cache, since
+ # _select_package has just changed implementations.
+ for trees in self._dynamic_config._filtered_trees.values():
+ trees["porttree"].dbapi._clear_cache()
+
+ args = self._dynamic_config._initial_arg_list[:]
+ for root in self._frozen_config.roots:
+ if root != self._frozen_config.target_root and \
+ "remove" in self._dynamic_config.myparams:
+ # Only pull in deps for the relevant root.
+ continue
+ depgraph_sets = self._dynamic_config.sets[root]
+ required_set_names = self._frozen_config._required_set_names.copy()
+ remaining_args = required_set_names.copy()
+ if required_sets is None or root not in required_sets:
+ pass
+ else:
+ # Removal actions may override sets with temporary
+ # replacements that have had atoms removed in order
+ # to implement --deselect behavior.
+ required_set_names = set(required_sets[root])
+ depgraph_sets.sets.clear()
+ depgraph_sets.sets.update(required_sets[root])
+ if "remove" not in self._dynamic_config.myparams and \
+ root == self._frozen_config.target_root and \
+ already_deep:
+ remaining_args.difference_update(depgraph_sets.sets)
+ if not remaining_args and \
+ not self._dynamic_config._ignored_deps and \
+ not self._dynamic_config._dep_stack:
+ continue
+ root_config = self._frozen_config.roots[root]
+ for s in required_set_names:
+ pset = depgraph_sets.sets.get(s)
+ if pset is None:
+ pset = root_config.sets[s]
+ atom = SETPREFIX + s
+ args.append(SetArg(arg=atom, pset=pset,
+ root_config=root_config))
+
+ self._set_args(args)
+ for arg in self._expand_set_args(args, add_to_digraph=True):
+ for atom in arg.pset.getAtoms():
+ self._dynamic_config._dep_stack.append(
+ Dependency(atom=atom, root=arg.root_config.root,
+ parent=arg))
+
+ if True:
+ if self._dynamic_config._ignored_deps:
+ self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
+ self._dynamic_config._ignored_deps = []
+ if not self._create_graph(allow_unsatisfied=True):
+ return 0
+ # Check the unsatisfied deps to see if any initially satisfied deps
+ # will become unsatisfied due to an upgrade. Initially unsatisfied
+ # deps are irrelevant since we only want to avoid breaking deps
+ # that are initially satisfied.
+ while self._dynamic_config._unsatisfied_deps:
+ dep = self._dynamic_config._unsatisfied_deps.pop()
+ vardb = self._frozen_config.roots[
+ dep.root].trees["vartree"].dbapi
+ matches = vardb.match_pkgs(dep.atom)
+ if not matches:
+ self._dynamic_config._initially_unsatisfied_deps.append(dep)
+ continue
+ # An scheduled installation broke a deep dependency.
+ # Add the installed package to the graph so that it
+ # will be appropriately reported as a slot collision
+ # (possibly solvable via backtracking).
+ pkg = matches[-1] # highest match
+ if not self._add_pkg(pkg, dep):
+ return 0
+ if not self._create_graph(allow_unsatisfied=True):
+ return 0
+ return 1
+
+ def _pkg(self, cpv, type_name, root_config, installed=False,
+ onlydeps=False, myrepo = None):
+ """
+ Get a package instance from the cache, or create a new
+ one if necessary. Raises PackageNotFound from aux_get if it
+ failures for some reason (package does not exist or is
+ corrupt).
+ """
+
+ # Ensure that we use the specially optimized RootConfig instance
+ # that refers to FakeVartree instead of the real vartree.
+ root_config = self._frozen_config.roots[root_config.root]
+ pkg = self._frozen_config._pkg_cache.get(
+ Package._gen_hash_key(cpv=cpv, type_name=type_name,
+ repo_name=myrepo, root_config=root_config,
+ installed=installed, onlydeps=onlydeps))
+ if pkg is None and onlydeps and not installed:
+ # Maybe it already got pulled in as a "merge" node.
+ pkg = self._dynamic_config.mydbapi[root_config.root].get(
+ Package._gen_hash_key(cpv=cpv, type_name=type_name,
+ repo_name=myrepo, root_config=root_config,
+ installed=installed, onlydeps=False))
+
+ if pkg is None:
+ tree_type = self.pkg_tree_map[type_name]
+ db = root_config.trees[tree_type].dbapi
+ db_keys = list(self._frozen_config._trees_orig[root_config.root][
+ tree_type].dbapi._aux_cache_keys)
+
+ try:
+ metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+ except KeyError:
+ raise portage.exception.PackageNotFound(cpv)
+
+ pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
+ installed=installed, metadata=metadata, onlydeps=onlydeps,
+ root_config=root_config, type_name=type_name)
+
+ self._frozen_config._pkg_cache[pkg] = pkg
+
+ if not self._pkg_visibility_check(pkg) and \
+ 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
+ slot_key = (pkg.root, pkg.slot_atom)
+ other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
+ if other_pkg is None or pkg > other_pkg:
+ self._frozen_config._highest_license_masked[slot_key] = pkg
+
+ return pkg
+
+ def _validate_blockers(self):
+ """Remove any blockers from the digraph that do not match any of the
+ packages within the graph. If necessary, create hard deps to ensure
+ correct merge order such that mutually blocking packages are never
+ installed simultaneously. Also add runtime blockers from all installed
+ packages if any of them haven't been added already (bug 128809)."""
+
+ if "--buildpkgonly" in self._frozen_config.myopts or \
+ "--nodeps" in self._frozen_config.myopts:
+ return True
+
+ complete = "complete" in self._dynamic_config.myparams
+ deep = "deep" in self._dynamic_config.myparams
+
+ if True:
+ # Pull in blockers from all installed packages that haven't already
+ # been pulled into the depgraph, in order to ensure that they are
+ # respected (bug 128809). Due to the performance penalty that is
+ # incurred by all the additional dep_check calls that are required,
+ # blockers returned from dep_check are cached on disk by the
+ # BlockerCache class.
+
+ # For installed packages, always ignore blockers from DEPEND since
+ # only runtime dependencies should be relevant for packages that
+ # are already built.
+ dep_keys = ["RDEPEND", "PDEPEND"]
+ for myroot in self._frozen_config.trees:
+ vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
+ portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
+ pkgsettings = self._frozen_config.pkgsettings[myroot]
+ root_config = self._frozen_config.roots[myroot]
+ dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
+ final_db = self._dynamic_config.mydbapi[myroot]
+
+ blocker_cache = BlockerCache(myroot, vardb)
+ stale_cache = set(blocker_cache)
+ for pkg in vardb:
+ cpv = pkg.cpv
+ stale_cache.discard(cpv)
+ pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
+ pkg_deps_added = \
+ pkg in self._dynamic_config._traversed_pkg_deps
+
+ # Check for masked installed packages. Only warn about
+ # packages that are in the graph in order to avoid warning
+ # about those that will be automatically uninstalled during
+ # the merge process or by --depclean. Always warn about
+ # packages masked by license, since the user likely wants
+ # to adjust ACCEPT_LICENSE.
+ if pkg in final_db:
+ if not self._pkg_visibility_check(pkg) and \
+ (pkg_in_graph or 'LICENSE' in pkg.masks):
+ self._dynamic_config._masked_installed.add(pkg)
+ else:
+ self._check_masks(pkg)
+
+ blocker_atoms = None
+ blockers = None
+ if pkg_deps_added:
+ blockers = []
+ try:
+ blockers.extend(
+ self._dynamic_config._blocker_parents.child_nodes(pkg))
+ except KeyError:
+ pass
+ try:
+ blockers.extend(
+ self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
+ except KeyError:
+ pass
+ if blockers:
+ # Select just the runtime blockers.
+ blockers = [blocker for blocker in blockers \
+ if blocker.priority.runtime or \
+ blocker.priority.runtime_post]
+ if blockers is not None:
+ blockers = set(blocker.atom for blocker in blockers)
+
+ # If this node has any blockers, create a "nomerge"
+ # node for it so that they can be enforced.
+ self._spinner_update()
+ blocker_data = blocker_cache.get(cpv)
+ if blocker_data is not None and \
+ blocker_data.counter != long(pkg.metadata["COUNTER"]):
+ blocker_data = None
+
+ # If blocker data from the graph is available, use
+ # it to validate the cache and update the cache if
+ # it seems invalid.
+ if blocker_data is not None and \
+ blockers is not None:
+ if not blockers.symmetric_difference(
+ blocker_data.atoms):
+ continue
+ blocker_data = None
+
+ if blocker_data is None and \
+ blockers is not None:
+ # Re-use the blockers from the graph.
+ blocker_atoms = sorted(blockers)
+ counter = long(pkg.metadata["COUNTER"])
+ blocker_data = \
+ blocker_cache.BlockerData(counter, blocker_atoms)
+ blocker_cache[pkg.cpv] = blocker_data
+ continue
+
+ if blocker_data:
+ blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
+ else:
+ # Use aux_get() to trigger FakeVartree global
+ # updates on *DEPEND when appropriate.
+ depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
+ # It is crucial to pass in final_db here in order to
+ # optimize dep_check calls by eliminating atoms via
+ # dep_wordreduce and dep_eval calls.
+ try:
+ success, atoms = portage.dep_check(depstr,
+ final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
+ trees=self._dynamic_config._graph_trees, myroot=myroot)
+ except SystemExit:
+ raise
+ except Exception as e:
+ # This is helpful, for example, if a ValueError
+ # is thrown from cpv_expand due to multiple
+ # matches (this can happen if an atom lacks a
+ # category).
+ show_invalid_depstring_notice(
+ pkg, depstr, str(e))
+ del e
+ raise
+ if not success:
+ replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
+ if replacement_pkg and \
+ replacement_pkg[0].operation == "merge":
+ # This package is being replaced anyway, so
+ # ignore invalid dependencies so as not to
+ # annoy the user too much (otherwise they'd be
+ # forced to manually unmerge it first).
+ continue
+ show_invalid_depstring_notice(pkg, depstr, atoms)
+ return False
+ blocker_atoms = [myatom for myatom in atoms \
+ if myatom.blocker]
+ blocker_atoms.sort()
+ counter = long(pkg.metadata["COUNTER"])
+ blocker_cache[cpv] = \
+ blocker_cache.BlockerData(counter, blocker_atoms)
+ if blocker_atoms:
+ try:
+ for atom in blocker_atoms:
+ blocker = Blocker(atom=atom,
+ eapi=pkg.metadata["EAPI"],
+ priority=self._priority(runtime=True),
+ root=myroot)
+ self._dynamic_config._blocker_parents.add(blocker, pkg)
+ except portage.exception.InvalidAtom as e:
+ depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
+ show_invalid_depstring_notice(
+ pkg, depstr, "Invalid Atom: %s" % (e,))
+ return False
+ for cpv in stale_cache:
+ del blocker_cache[cpv]
+ blocker_cache.flush()
+ del blocker_cache
+
+ # Discard any "uninstall" tasks scheduled by previous calls
+ # to this method, since those tasks may not make sense given
+ # the current graph state.
+ previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
+ if previous_uninstall_tasks:
+ self._dynamic_config._blocker_uninstalls = digraph()
+ self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
+
+ for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
+ self._spinner_update()
+ root_config = self._frozen_config.roots[blocker.root]
+ virtuals = root_config.settings.getvirtuals()
+ myroot = blocker.root
+ initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
+ final_db = self._dynamic_config.mydbapi[myroot]
+
+ provider_virtual = False
+ if blocker.cp in virtuals and \
+ not self._have_new_virt(blocker.root, blocker.cp):
+ provider_virtual = True
+
+ # Use this to check PROVIDE for each matched package
+ # when necessary.
+ atom_set = InternalPackageSet(
+ initial_atoms=[blocker.atom])
+
+ if provider_virtual:
+ atoms = []
+ for provider_entry in virtuals[blocker.cp]:
+ atoms.append(Atom(blocker.atom.replace(
+ blocker.cp, provider_entry.cp, 1)))
+ else:
+ atoms = [blocker.atom]
+
+ blocked_initial = set()
+ for atom in atoms:
+ for pkg in initial_db.match_pkgs(atom):
+ if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+ blocked_initial.add(pkg)
+
+ blocked_final = set()
+ for atom in atoms:
+ for pkg in final_db.match_pkgs(atom):
+ if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+ blocked_final.add(pkg)
+
+ if not blocked_initial and not blocked_final:
+ parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
+ self._dynamic_config._blocker_parents.remove(blocker)
+ # Discard any parents that don't have any more blockers.
+ for pkg in parent_pkgs:
+ self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
+ if not self._dynamic_config._blocker_parents.child_nodes(pkg):
+ self._dynamic_config._blocker_parents.remove(pkg)
+ continue
+ for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
+ unresolved_blocks = False
+ depends_on_order = set()
+ for pkg in blocked_initial:
+ if pkg.slot_atom == parent.slot_atom and \
+ not blocker.atom.blocker.overlap.forbid:
+ # New !!atom blockers do not allow temporary
+ # simulaneous installation, so unlike !atom
+ # blockers, !!atom blockers aren't ignored
+ # when they match other packages occupying
+ # the same slot.
+ continue
+ if parent.installed:
+ # Two currently installed packages conflict with
+ # eachother. Ignore this case since the damage
+ # is already done and this would be likely to
+ # confuse users if displayed like a normal blocker.
+ continue
+
+ self._dynamic_config._blocked_pkgs.add(pkg, blocker)
+
+ if parent.operation == "merge":
+ # Maybe the blocked package can be replaced or simply
+ # unmerged to resolve this block.
+ depends_on_order.add((pkg, parent))
+ continue
+ # None of the above blocker resolutions techniques apply,
+ # so apparently this one is unresolvable.
+ unresolved_blocks = True
+ for pkg in blocked_final:
+ if pkg.slot_atom == parent.slot_atom and \
+ not blocker.atom.blocker.overlap.forbid:
+ # New !!atom blockers do not allow temporary
+ # simulaneous installation, so unlike !atom
+ # blockers, !!atom blockers aren't ignored
+ # when they match other packages occupying
+ # the same slot.
+ continue
+ if parent.operation == "nomerge" and \
+ pkg.operation == "nomerge":
+ # This blocker will be handled the next time that a
+ # merge of either package is triggered.
+ continue
+
+ self._dynamic_config._blocked_pkgs.add(pkg, blocker)
+
+ # Maybe the blocking package can be
+ # unmerged to resolve this block.
+ if parent.operation == "merge" and pkg.installed:
+ depends_on_order.add((pkg, parent))
+ continue
+ elif parent.operation == "nomerge":
+ depends_on_order.add((parent, pkg))
+ continue
+ # None of the above blocker resolutions techniques apply,
+ # so apparently this one is unresolvable.
+ unresolved_blocks = True
+
+ # Make sure we don't unmerge any package that have been pulled
+ # into the graph.
+ if not unresolved_blocks and depends_on_order:
+ for inst_pkg, inst_task in depends_on_order:
+ if self._dynamic_config.digraph.contains(inst_pkg) and \
+ self._dynamic_config.digraph.parent_nodes(inst_pkg):
+ unresolved_blocks = True
+ break
+
+ if not unresolved_blocks and depends_on_order:
+ for inst_pkg, inst_task in depends_on_order:
+ uninst_task = Package(built=inst_pkg.built,
+ cpv=inst_pkg.cpv, installed=inst_pkg.installed,
+ metadata=inst_pkg.metadata,
+ operation="uninstall",
+ root_config=inst_pkg.root_config,
+ type_name=inst_pkg.type_name)
+ # Enforce correct merge order with a hard dep.
+ self._dynamic_config.digraph.addnode(uninst_task, inst_task,
+ priority=BlockerDepPriority.instance)
+ # Count references to this blocker so that it can be
+ # invalidated after nodes referencing it have been
+ # merged.
+ self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
+ if not unresolved_blocks and not depends_on_order:
+ self._dynamic_config._irrelevant_blockers.add(blocker, parent)
+ self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
+ if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
+ self._dynamic_config._blocker_parents.remove(blocker)
+ if not self._dynamic_config._blocker_parents.child_nodes(parent):
+ self._dynamic_config._blocker_parents.remove(parent)
+ if unresolved_blocks:
+ self._dynamic_config._unsolvable_blockers.add(blocker, parent)
+
+ return True
+
+ def _accept_blocker_conflicts(self):
+ acceptable = False
+ for x in ("--buildpkgonly", "--fetchonly",
+ "--fetch-all-uri", "--nodeps"):
+ if x in self._frozen_config.myopts:
+ acceptable = True
+ break
+ return acceptable
+
+ def _merge_order_bias(self, mygraph):
+ """
+ For optimal leaf node selection, promote deep system runtime deps and
+ order nodes from highest to lowest overall reference count.
+ """
+
+ node_info = {}
+ for node in mygraph.order:
+ node_info[node] = len(mygraph.parent_nodes(node))
+ deep_system_deps = _find_deep_system_runtime_deps(mygraph)
+
+ def cmp_merge_preference(node1, node2):
+
+ if node1.operation == 'uninstall':
+ if node2.operation == 'uninstall':
+ return 0
+ return 1
+
+ if node2.operation == 'uninstall':
+ if node1.operation == 'uninstall':
+ return 0
+ return -1
+
+ node1_sys = node1 in deep_system_deps
+ node2_sys = node2 in deep_system_deps
+ if node1_sys != node2_sys:
+ if node1_sys:
+ return -1
+ return 1
+
+ return node_info[node2] - node_info[node1]
+
+ mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
+
+ def altlist(self, reversed=False):
+
+ while self._dynamic_config._serialized_tasks_cache is None:
+ self._resolve_conflicts()
+ try:
+ self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
+ self._serialize_tasks()
+ except self._serialize_tasks_retry:
+ pass
+
+ retlist = self._dynamic_config._serialized_tasks_cache[:]
+ if reversed:
+ retlist.reverse()
+ return retlist
+
+ def _implicit_libc_deps(self, mergelist, graph):
+ """
+ Create implicit dependencies on libc, in order to ensure that libc
+ is installed as early as possible (see bug #303567).
+ """
+ libc_pkgs = {}
+ implicit_libc_roots = (self._frozen_config._running_root.root,)
+ for root in implicit_libc_roots:
+ graphdb = self._dynamic_config.mydbapi[root]
+ vardb = self._frozen_config.trees[root]["vartree"].dbapi
+ for atom in self._expand_virt_from_graph(root,
+ portage.const.LIBC_PACKAGE_ATOM):
+ if atom.blocker:
+ continue
+ match = graphdb.match_pkgs(atom)
+ if not match:
+ continue
+ pkg = match[-1]
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.setdefault(pkg.root, set()).add(pkg)
+
+ if not libc_pkgs:
+ return
+
+ earlier_libc_pkgs = set()
+
+ for pkg in mergelist:
+ if not isinstance(pkg, Package):
+ # a satisfied blocker
+ continue
+ root_libc_pkgs = libc_pkgs.get(pkg.root)
+ if root_libc_pkgs is not None and \
+ pkg.operation == "merge":
+ if pkg in root_libc_pkgs:
+ earlier_libc_pkgs.add(pkg)
+ else:
+ for libc_pkg in root_libc_pkgs:
+ if libc_pkg in earlier_libc_pkgs:
+ graph.add(libc_pkg, pkg,
+ priority=DepPriority(buildtime=True))
+
+ def schedulerGraph(self):
+ """
+ The scheduler graph is identical to the normal one except that
+ uninstall edges are reversed in specific cases that require
+ conflicting packages to be temporarily installed simultaneously.
+ This is intended for use by the Scheduler in it's parallelization
+ logic. It ensures that temporary simultaneous installation of
+ conflicting packages is avoided when appropriate (especially for
+ !!atom blockers), but allowed in specific cases that require it.
+
+ Note that this method calls break_refs() which alters the state of
+ internal Package instances such that this depgraph instance should
+ not be used to perform any more calculations.
+ """
+
+ # NOTE: altlist initializes self._dynamic_config._scheduler_graph
+ mergelist = self.altlist()
+ self._implicit_libc_deps(mergelist,
+ self._dynamic_config._scheduler_graph)
+
+ # Break DepPriority.satisfied attributes which reference
+ # installed Package instances.
+ for parents, children, node in \
+ self._dynamic_config._scheduler_graph.nodes.values():
+ for priorities in chain(parents.values(), children.values()):
+ for priority in priorities:
+ if priority.satisfied:
+ priority.satisfied = True
+
+ pkg_cache = self._frozen_config._pkg_cache
+ graph = self._dynamic_config._scheduler_graph
+ trees = self._frozen_config.trees
+ pruned_pkg_cache = {}
+ for key, pkg in pkg_cache.items():
+ if pkg in graph or \
+ (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
+ pruned_pkg_cache[key] = pkg
+
+ for root in trees:
+ trees[root]['vartree']._pkg_cache = pruned_pkg_cache
+
+ self.break_refs()
+ sched_config = \
+ _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
+
+ return sched_config
+
+ def break_refs(self):
+ """
+ Break any references in Package instances that lead back to the depgraph.
+ This is useful if you want to hold references to packages without also
+ holding the depgraph on the heap. It should only be called after the
+ depgraph and _frozen_config will not be used for any more calculations.
+ """
+ for root_config in self._frozen_config.roots.values():
+ root_config.update(self._frozen_config._trees_orig[
+ root_config.root]["root_config"])
+ # Both instances are now identical, so discard the
+ # original which should have no other references.
+ self._frozen_config._trees_orig[
+ root_config.root]["root_config"] = root_config
+
+ def _resolve_conflicts(self):
+ if not self._complete_graph():
+ raise self._unknown_internal_error()
+
+ if not self._validate_blockers():
+ self._dynamic_config._skip_restart = True
+ raise self._unknown_internal_error()
+
+ if self._dynamic_config._slot_collision_info:
+ self._process_slot_conflicts()
+
+ def _serialize_tasks(self):
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ if debug:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ self._dynamic_config.digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ scheduler_graph = self._dynamic_config.digraph.copy()
+
+ if '--nodeps' in self._frozen_config.myopts:
+ # Preserve the package order given on the command line.
+ return ([node for node in scheduler_graph \
+ if isinstance(node, Package) \
+ and node.operation == 'merge'], scheduler_graph)
+
+ mygraph=self._dynamic_config.digraph.copy()
+
+ removed_nodes = set()
+
+ # Prune off all DependencyArg instances since they aren't
+ # needed, and because of nested sets this is faster than doing
+ # it with multiple digraph.root_nodes() calls below. This also
+ # takes care of nested sets that have circular references,
+ # which wouldn't be matched by digraph.root_nodes().
+ for node in mygraph:
+ if isinstance(node, DependencyArg):
+ removed_nodes.add(node)
+ if removed_nodes:
+ mygraph.difference_update(removed_nodes)
+ removed_nodes.clear()
+
+ # Prune "nomerge" root nodes if nothing depends on them, since
+ # otherwise they slow down merge order calculation. Don't remove
+ # non-root nodes since they help optimize merge order in some cases
+ # such as revdep-rebuild.
+
+ while True:
+ for node in mygraph.root_nodes():
+ if not isinstance(node, Package) or \
+ node.installed or node.onlydeps:
+ removed_nodes.add(node)
+ if removed_nodes:
+ self._spinner_update()
+ mygraph.difference_update(removed_nodes)
+ if not removed_nodes:
+ break
+ removed_nodes.clear()
+ self._merge_order_bias(mygraph)
+ def cmp_circular_bias(n1, n2):
+ """
+ RDEPEND is stronger than PDEPEND and this function
+ measures such a strength bias within a circular
+ dependency relationship.
+ """
+ n1_n2_medium = n2 in mygraph.child_nodes(n1,
+ ignore_priority=priority_range.ignore_medium_soft)
+ n2_n1_medium = n1 in mygraph.child_nodes(n2,
+ ignore_priority=priority_range.ignore_medium_soft)
+ if n1_n2_medium == n2_n1_medium:
+ return 0
+ elif n1_n2_medium:
+ return 1
+ return -1
+ myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
+ retlist=[]
+ # Contains uninstall tasks that have been scheduled to
+ # occur after overlapping blockers have been installed.
+ scheduled_uninstalls = set()
+ # Contains any Uninstall tasks that have been ignored
+ # in order to avoid the circular deps code path. These
+ # correspond to blocker conflicts that could not be
+ # resolved.
+ ignored_uninstall_tasks = set()
+ have_uninstall_task = False
+ complete = "complete" in self._dynamic_config.myparams
+ asap_nodes = []
+
+ def get_nodes(**kwargs):
+ """
+ Returns leaf nodes excluding Uninstall instances
+ since those should be executed as late as possible.
+ """
+ return [node for node in mygraph.leaf_nodes(**kwargs) \
+ if isinstance(node, Package) and \
+ (node.operation != "uninstall" or \
+ node in scheduled_uninstalls)]
+
+ # sys-apps/portage needs special treatment if ROOT="/"
+ running_root = self._frozen_config._running_root.root
+ runtime_deps = InternalPackageSet(
+ initial_atoms=[PORTAGE_PACKAGE_ATOM])
+ running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
+ PORTAGE_PACKAGE_ATOM)
+ replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
+ PORTAGE_PACKAGE_ATOM)
+
+ if running_portage:
+ running_portage = running_portage[0]
+ else:
+ running_portage = None
+
+ if replacement_portage:
+ replacement_portage = replacement_portage[0]
+ else:
+ replacement_portage = None
+
+ if replacement_portage == running_portage:
+ replacement_portage = None
+
+ if replacement_portage is not None and \
+ (running_portage is None or \
+ running_portage.cpv != replacement_portage.cpv or \
+ '9999' in replacement_portage.cpv or \
+ 'git' in replacement_portage.inherited or \
+ 'git-2' in replacement_portage.inherited):
+ # update from running_portage to replacement_portage asap
+ asap_nodes.append(replacement_portage)
+
+ if running_portage is not None:
+ try:
+ portage_rdepend = self._select_atoms_highest_available(
+ running_root, running_portage.metadata["RDEPEND"],
+ myuse=self._pkg_use_enabled(running_portage),
+ parent=running_portage, strict=False)
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (running_root, running_portage.cpv, e), noiselevel=-1)
+ del e
+ portage_rdepend = {running_portage : []}
+ for atoms in portage_rdepend.values():
+ runtime_deps.update(atom for atom in atoms \
+ if not atom.blocker)
+
+ # Merge libc asap, in order to account for implicit
+ # dependencies. See bug #303567.
+ implicit_libc_roots = (running_root,)
+ for root in implicit_libc_roots:
+ libc_pkgs = set()
+ vardb = self._frozen_config.trees[root]["vartree"].dbapi
+ graphdb = self._dynamic_config.mydbapi[root]
+ for atom in self._expand_virt_from_graph(root,
+ portage.const.LIBC_PACKAGE_ATOM):
+ if atom.blocker:
+ continue
+ match = graphdb.match_pkgs(atom)
+ if not match:
+ continue
+ pkg = match[-1]
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.add(pkg)
+
+ if libc_pkgs:
+ # If there's also an os-headers upgrade, we need to
+ # pull that in first. See bug #328317.
+ for atom in self._expand_virt_from_graph(root,
+ portage.const.OS_HEADERS_PACKAGE_ATOM):
+ if atom.blocker:
+ continue
+ match = graphdb.match_pkgs(atom)
+ if not match:
+ continue
+ pkg = match[-1]
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ asap_nodes.append(pkg)
+
+ asap_nodes.extend(libc_pkgs)
+
+ def gather_deps(ignore_priority, mergeable_nodes,
+ selected_nodes, node):
+ """
+ Recursively gather a group of nodes that RDEPEND on
+ eachother. This ensures that they are merged as a group
+ and get their RDEPENDs satisfied as soon as possible.
+ """
+ if node in selected_nodes:
+ return True
+ if node not in mergeable_nodes:
+ return False
+ if node == replacement_portage and \
+ mygraph.child_nodes(node,
+ ignore_priority=priority_range.ignore_medium_soft):
+ # Make sure that portage always has all of it's
+ # RDEPENDs installed first.
+ return False
+ selected_nodes.add(node)
+ for child in mygraph.child_nodes(node,
+ ignore_priority=ignore_priority):
+ if not gather_deps(ignore_priority,
+ mergeable_nodes, selected_nodes, child):
+ return False
+ return True
+
+ def ignore_uninst_or_med(priority):
+ if priority is BlockerDepPriority.instance:
+ return True
+ return priority_range.ignore_medium(priority)
+
+ def ignore_uninst_or_med_soft(priority):
+ if priority is BlockerDepPriority.instance:
+ return True
+ return priority_range.ignore_medium_soft(priority)
+
+ tree_mode = "--tree" in self._frozen_config.myopts
+ # Tracks whether or not the current iteration should prefer asap_nodes
+ # if available. This is set to False when the previous iteration
+ # failed to select any nodes. It is reset whenever nodes are
+ # successfully selected.
+ prefer_asap = True
+
+ # Controls whether or not the current iteration should drop edges that
+ # are "satisfied" by installed packages, in order to solve circular
+ # dependencies. The deep runtime dependencies of installed packages are
+ # not checked in this case (bug #199856), so it must be avoided
+ # whenever possible.
+ drop_satisfied = False
+
+ # State of variables for successive iterations that loosen the
+ # criteria for node selection.
+ #
+ # iteration prefer_asap drop_satisfied
+ # 1 True False
+ # 2 False False
+ # 3 False True
+ #
+ # If no nodes are selected on the last iteration, it is due to
+ # unresolved blockers or circular dependencies.
+
+ while mygraph:
+ self._spinner_update()
+ selected_nodes = None
+ ignore_priority = None
+ if drop_satisfied or (prefer_asap and asap_nodes):
+ priority_range = DepPrioritySatisfiedRange
+ else:
+ priority_range = DepPriorityNormalRange
+ if prefer_asap and asap_nodes:
+ # ASAP nodes are merged before their soft deps. Go ahead and
+ # select root nodes here if necessary, since it's typical for
+ # the parent to have been removed from the graph already.
+ asap_nodes = [node for node in asap_nodes \
+ if mygraph.contains(node)]
+ for i in range(priority_range.SOFT,
+ priority_range.MEDIUM_SOFT + 1):
+ ignore_priority = priority_range.ignore_priority[i]
+ for node in asap_nodes:
+ if not mygraph.child_nodes(node,
+ ignore_priority=ignore_priority):
+ selected_nodes = [node]
+ asap_nodes.remove(node)
+ break
+ if selected_nodes:
+ break
+
+ if not selected_nodes and \
+ not (prefer_asap and asap_nodes):
+ for i in range(priority_range.NONE,
+ priority_range.MEDIUM_SOFT + 1):
+ ignore_priority = priority_range.ignore_priority[i]
+ nodes = get_nodes(ignore_priority=ignore_priority)
+ if nodes:
+ # If there is a mixture of merges and uninstalls,
+ # do the uninstalls first.
+ good_uninstalls = None
+ if len(nodes) > 1:
+ good_uninstalls = []
+ for node in nodes:
+ if node.operation == "uninstall":
+ good_uninstalls.append(node)
+
+ if good_uninstalls:
+ nodes = good_uninstalls
+ else:
+ nodes = nodes
+
+ if good_uninstalls or len(nodes) == 1 or \
+ (ignore_priority is None and \
+ not asap_nodes and not tree_mode):
+ # Greedily pop all of these nodes since no
+ # relationship has been ignored. This optimization
+ # destroys --tree output, so it's disabled in tree
+ # mode.
+ selected_nodes = nodes
+ else:
+ # For optimal merge order:
+ # * Only pop one node.
+ # * Removing a root node (node without a parent)
+ # will not produce a leaf node, so avoid it.
+ # * It's normal for a selected uninstall to be a
+ # root node, so don't check them for parents.
+ if asap_nodes:
+ prefer_asap_parents = (True, False)
+ else:
+ prefer_asap_parents = (False,)
+ for check_asap_parent in prefer_asap_parents:
+ if check_asap_parent:
+ for node in nodes:
+ parents = mygraph.parent_nodes(node,
+ ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
+ if parents and set(parents).intersection(asap_nodes):
+ selected_nodes = [node]
+ break
+ else:
+ for node in nodes:
+ if mygraph.parent_nodes(node):
+ selected_nodes = [node]
+ break
+ if selected_nodes:
+ break
+ if selected_nodes:
+ break
+
+ if not selected_nodes:
+ nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
+ if nodes:
+ mergeable_nodes = set(nodes)
+ if prefer_asap and asap_nodes:
+ nodes = asap_nodes
+ # When gathering the nodes belonging to a runtime cycle,
+ # we want to minimize the number of nodes gathered, since
+ # this tends to produce a more optimal merge order.
+ # Ignoring all medium_soft deps serves this purpose.
+ # In the case of multiple runtime cycles, where some cycles
+ # may depend on smaller independent cycles, it's optimal
+ # to merge smaller independent cycles before other cycles
+ # that depend on them. Therefore, we search for the
+ # smallest cycle in order to try and identify and prefer
+ # these smaller independent cycles.
+ ignore_priority = priority_range.ignore_medium_soft
+ smallest_cycle = None
+ for node in nodes:
+ if not mygraph.parent_nodes(node):
+ continue
+ selected_nodes = set()
+ if gather_deps(ignore_priority,
+ mergeable_nodes, selected_nodes, node):
+ # When selecting asap_nodes, we need to ensure
+ # that we haven't selected a large runtime cycle
+ # that is obviously sub-optimal. This will be
+ # obvious if any of the non-asap selected_nodes
+ # is a leaf node when medium_soft deps are
+ # ignored.
+ if prefer_asap and asap_nodes and \
+ len(selected_nodes) > 1:
+ for node in selected_nodes.difference(
+ asap_nodes):
+ if not mygraph.child_nodes(node,
+ ignore_priority =
+ DepPriorityNormalRange.ignore_medium_soft):
+ selected_nodes = None
+ break
+ if selected_nodes:
+ if smallest_cycle is None or \
+ len(selected_nodes) < len(smallest_cycle):
+ smallest_cycle = selected_nodes
+
+ selected_nodes = smallest_cycle
+
+ if selected_nodes and debug:
+ writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
+ (len(selected_nodes),), noiselevel=-1)
+ cycle_digraph = mygraph.copy()
+ cycle_digraph.difference_update([x for x in
+ cycle_digraph if x not in selected_nodes])
+ cycle_digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ if prefer_asap and asap_nodes and not selected_nodes:
+ # We failed to find any asap nodes to merge, so ignore
+ # them for the next iteration.
+ prefer_asap = False
+ continue
+
+ if selected_nodes and ignore_priority is not None:
+ # Try to merge ignored medium_soft deps as soon as possible
+ # if they're not satisfied by installed packages.
+ for node in selected_nodes:
+ children = set(mygraph.child_nodes(node))
+ soft = children.difference(
+ mygraph.child_nodes(node,
+ ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
+ medium_soft = children.difference(
+ mygraph.child_nodes(node,
+ ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium_soft))
+ medium_soft.difference_update(soft)
+ for child in medium_soft:
+ if child in selected_nodes:
+ continue
+ if child in asap_nodes:
+ continue
+ # Merge PDEPEND asap for bug #180045.
+ asap_nodes.append(child)
+
+ if selected_nodes and len(selected_nodes) > 1:
+ if not isinstance(selected_nodes, list):
+ selected_nodes = list(selected_nodes)
+ selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
+
+ if not selected_nodes and myblocker_uninstalls:
+ # An Uninstall task needs to be executed in order to
+ # avoid conflict if possible.
+
+ if drop_satisfied:
+ priority_range = DepPrioritySatisfiedRange
+ else:
+ priority_range = DepPriorityNormalRange
+
+ mergeable_nodes = get_nodes(
+ ignore_priority=ignore_uninst_or_med)
+
+ min_parent_deps = None
+ uninst_task = None
+
+ for task in myblocker_uninstalls.leaf_nodes():
+ # Do some sanity checks so that system or world packages
+ # don't get uninstalled inappropriately here (only really
+ # necessary when --complete-graph has not been enabled).
+
+ if task in ignored_uninstall_tasks:
+ continue
+
+ if task in scheduled_uninstalls:
+ # It's been scheduled but it hasn't
+ # been executed yet due to dependence
+ # on installation of blocking packages.
+ continue
+
+ root_config = self._frozen_config.roots[task.root]
+ inst_pkg = self._pkg(task.cpv, "installed", root_config,
+ installed=True)
+
+ if self._dynamic_config.digraph.contains(inst_pkg):
+ continue
+
+ forbid_overlap = False
+ heuristic_overlap = False
+ for blocker in myblocker_uninstalls.parent_nodes(task):
+ if not eapi_has_strong_blocks(blocker.eapi):
+ heuristic_overlap = True
+ elif blocker.atom.blocker.overlap.forbid:
+ forbid_overlap = True
+ break
+ if forbid_overlap and running_root == task.root:
+ continue
+
+ if heuristic_overlap and running_root == task.root:
+ # Never uninstall sys-apps/portage or it's essential
+ # dependencies, except through replacement.
+ try:
+ runtime_dep_atoms = \
+ list(runtime_deps.iterAtomsForPackage(task))
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid PROVIDE in " + \
+ "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+ (task.root, task.cpv, e), noiselevel=-1)
+ del e
+ continue
+
+ # Don't uninstall a runtime dep if it appears
+ # to be the only suitable one installed.
+ skip = False
+ vardb = root_config.trees["vartree"].dbapi
+ for atom in runtime_dep_atoms:
+ other_version = None
+ for pkg in vardb.match_pkgs(atom):
+ if pkg.cpv == task.cpv and \
+ pkg.metadata["COUNTER"] == \
+ task.metadata["COUNTER"]:
+ continue
+ other_version = pkg
+ break
+ if other_version is None:
+ skip = True
+ break
+ if skip:
+ continue
+
+ # For packages in the system set, don't take
+ # any chances. If the conflict can't be resolved
+ # by a normal replacement operation then abort.
+ skip = False
+ try:
+ for atom in root_config.sets[
+ "system"].iterAtomsForPackage(task):
+ skip = True
+ break
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid PROVIDE in " + \
+ "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+ (task.root, task.cpv, e), noiselevel=-1)
+ del e
+ skip = True
+ if skip:
+ continue
+
+ # Note that the world check isn't always
+ # necessary since self._complete_graph() will
+ # add all packages from the system and world sets to the
+ # graph. This just allows unresolved conflicts to be
+ # detected as early as possible, which makes it possible
+ # to avoid calling self._complete_graph() when it is
+ # unnecessary due to blockers triggering an abortion.
+ if not complete:
+ # For packages in the world set, go ahead an uninstall
+ # when necessary, as long as the atom will be satisfied
+ # in the final state.
+ graph_db = self._dynamic_config.mydbapi[task.root]
+ skip = False
+ try:
+ for atom in root_config.sets[
+ "selected"].iterAtomsForPackage(task):
+ satisfied = False
+ for pkg in graph_db.match_pkgs(atom):
+ if pkg == inst_pkg:
+ continue
+ satisfied = True
+ break
+ if not satisfied:
+ skip = True
+ self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
+ break
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid PROVIDE in " + \
+ "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+ (task.root, task.cpv, e), noiselevel=-1)
+ del e
+ skip = True
+ if skip:
+ continue
+
+ # Check the deps of parent nodes to ensure that
+ # the chosen task produces a leaf node. Maybe
+ # this can be optimized some more to make the
+ # best possible choice, but the current algorithm
+ # is simple and should be near optimal for most
+ # common cases.
+ self._spinner_update()
+ mergeable_parent = False
+ parent_deps = set()
+ parent_deps.add(task)
+ for parent in mygraph.parent_nodes(task):
+ parent_deps.update(mygraph.child_nodes(parent,
+ ignore_priority=priority_range.ignore_medium_soft))
+ if min_parent_deps is not None and \
+ len(parent_deps) >= min_parent_deps:
+ # This task is no better than a previously selected
+ # task, so abort search now in order to avoid wasting
+ # any more cpu time on this task. This increases
+ # performance dramatically in cases when there are
+ # hundreds of blockers to solve, like when
+ # upgrading to a new slot of kde-meta.
+ mergeable_parent = None
+ break
+ if parent in mergeable_nodes and \
+ gather_deps(ignore_uninst_or_med_soft,
+ mergeable_nodes, set(), parent):
+ mergeable_parent = True
+
+ if not mergeable_parent:
+ continue
+
+ if min_parent_deps is None or \
+ len(parent_deps) < min_parent_deps:
+ min_parent_deps = len(parent_deps)
+ uninst_task = task
+
+ if uninst_task is not None and min_parent_deps == 1:
+ # This is the best possible result, so so abort search
+ # now in order to avoid wasting any more cpu time.
+ break
+
+ if uninst_task is not None:
+ # The uninstall is performed only after blocking
+ # packages have been merged on top of it. File
+ # collisions between blocking packages are detected
+ # and removed from the list of files to be uninstalled.
+ scheduled_uninstalls.add(uninst_task)
+ parent_nodes = mygraph.parent_nodes(uninst_task)
+
+ # Reverse the parent -> uninstall edges since we want
+ # to do the uninstall after blocking packages have
+ # been merged on top of it.
+ mygraph.remove(uninst_task)
+ for blocked_pkg in parent_nodes:
+ mygraph.add(blocked_pkg, uninst_task,
+ priority=BlockerDepPriority.instance)
+ scheduler_graph.remove_edge(uninst_task, blocked_pkg)
+ scheduler_graph.add(blocked_pkg, uninst_task,
+ priority=BlockerDepPriority.instance)
+
+ # Sometimes a merge node will render an uninstall
+ # node unnecessary (due to occupying the same SLOT),
+ # and we want to avoid executing a separate uninstall
+ # task in that case.
+ slot_node = self._dynamic_config.mydbapi[uninst_task.root
+ ].match_pkgs(uninst_task.slot_atom)
+ if slot_node and \
+ slot_node[0].operation == "merge":
+ mygraph.add(slot_node[0], uninst_task,
+ priority=BlockerDepPriority.instance)
+
+ # Reset the state variables for leaf node selection and
+ # continue trying to select leaf nodes.
+ prefer_asap = True
+ drop_satisfied = False
+ continue
+
+ if not selected_nodes:
+ # Only select root nodes as a last resort. This case should
+ # only trigger when the graph is nearly empty and the only
+ # remaining nodes are isolated (no parents or children). Since
+ # the nodes must be isolated, ignore_priority is not needed.
+ selected_nodes = get_nodes()
+
+ if not selected_nodes and not drop_satisfied:
+ drop_satisfied = True
+ continue
+
+ if not selected_nodes and myblocker_uninstalls:
+ # If possible, drop an uninstall task here in order to avoid
+ # the circular deps code path. The corresponding blocker will
+ # still be counted as an unresolved conflict.
+ uninst_task = None
+ for node in myblocker_uninstalls.leaf_nodes():
+ try:
+ mygraph.remove(node)
+ except KeyError:
+ pass
+ else:
+ uninst_task = node
+ ignored_uninstall_tasks.add(node)
+ break
+
+ if uninst_task is not None:
+ # Reset the state variables for leaf node selection and
+ # continue trying to select leaf nodes.
+ prefer_asap = True
+ drop_satisfied = False
+ continue
+
+ if not selected_nodes:
+ self._dynamic_config._circular_deps_for_display = mygraph
+ self._dynamic_config._skip_restart = True
+ raise self._unknown_internal_error()
+
+ # At this point, we've succeeded in selecting one or more nodes, so
+ # reset state variables for leaf node selection.
+ prefer_asap = True
+ drop_satisfied = False
+
+ mygraph.difference_update(selected_nodes)
+
+ for node in selected_nodes:
+ if isinstance(node, Package) and \
+ node.operation == "nomerge":
+ continue
+
+ # Handle interactions between blockers
+ # and uninstallation tasks.
+ solved_blockers = set()
+ uninst_task = None
+ if isinstance(node, Package) and \
+ "uninstall" == node.operation:
+ have_uninstall_task = True
+ uninst_task = node
+ else:
+ vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
+ inst_pkg = vardb.match_pkgs(node.slot_atom)
+ if inst_pkg:
+ # The package will be replaced by this one, so remove
+ # the corresponding Uninstall task if necessary.
+ inst_pkg = inst_pkg[0]
+ uninst_task = Package(built=inst_pkg.built,
+ cpv=inst_pkg.cpv, installed=inst_pkg.installed,
+ metadata=inst_pkg.metadata,
+ operation="uninstall",
+ root_config=inst_pkg.root_config,
+ type_name=inst_pkg.type_name)
+ try:
+ mygraph.remove(uninst_task)
+ except KeyError:
+ pass
+
+ if uninst_task is not None and \
+ uninst_task not in ignored_uninstall_tasks and \
+ myblocker_uninstalls.contains(uninst_task):
+ blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
+ myblocker_uninstalls.remove(uninst_task)
+ # Discard any blockers that this Uninstall solves.
+ for blocker in blocker_nodes:
+ if not myblocker_uninstalls.child_nodes(blocker):
+ myblocker_uninstalls.remove(blocker)
+ if blocker not in \
+ self._dynamic_config._unsolvable_blockers:
+ solved_blockers.add(blocker)
+
+ retlist.append(node)
+
+ if (isinstance(node, Package) and \
+ "uninstall" == node.operation) or \
+ (uninst_task is not None and \
+ uninst_task in scheduled_uninstalls):
+ # Include satisfied blockers in the merge list
+ # since the user might be interested and also
+ # it serves as an indicator that blocking packages
+ # will be temporarily installed simultaneously.
+ for blocker in solved_blockers:
+ retlist.append(blocker)
+
+ unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
+ for node in myblocker_uninstalls.root_nodes():
+ unsolvable_blockers.add(node)
+
+ # If any Uninstall tasks need to be executed in order
+ # to avoid a conflict, complete the graph with any
+ # dependencies that may have been initially
+ # neglected (to ensure that unsafe Uninstall tasks
+ # are properly identified and blocked from execution).
+ if have_uninstall_task and \
+ not complete and \
+ not unsolvable_blockers:
+ self._dynamic_config.myparams["complete"] = True
+ if '--debug' in self._frozen_config.myopts:
+ msg = []
+ msg.append("enabling 'complete' depgraph mode " + \
+ "due to uninstall task(s):")
+ msg.append("")
+ for node in retlist:
+ if isinstance(node, Package) and \
+ node.operation == 'uninstall':
+ msg.append("\t%s" % (node,))
+ writemsg_level("\n%s\n" % \
+ "".join("%s\n" % line for line in msg),
+ level=logging.DEBUG, noiselevel=-1)
+ raise self._serialize_tasks_retry("")
+
+ # Set satisfied state on blockers, but not before the
+ # above retry path, since we don't want to modify the
+ # state in that case.
+ for node in retlist:
+ if isinstance(node, Blocker):
+ node.satisfied = True
+
+ for blocker in unsolvable_blockers:
+ retlist.append(blocker)
+
+ if unsolvable_blockers and \
+ not self._accept_blocker_conflicts():
+ self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
+ self._dynamic_config._serialized_tasks_cache = retlist[:]
+ self._dynamic_config._scheduler_graph = scheduler_graph
+ self._dynamic_config._skip_restart = True
+ raise self._unknown_internal_error()
+
+ if self._dynamic_config._slot_collision_info and \
+ not self._accept_blocker_conflicts():
+ self._dynamic_config._serialized_tasks_cache = retlist[:]
+ self._dynamic_config._scheduler_graph = scheduler_graph
+ raise self._unknown_internal_error()
+
+ return retlist, scheduler_graph
+
+ def _show_circular_deps(self, mygraph):
+ self._dynamic_config._circular_dependency_handler = \
+ circular_dependency_handler(self, mygraph)
+ handler = self._dynamic_config._circular_dependency_handler
+
+ self._frozen_config.myopts.pop("--quiet", None)
+ self._frozen_config.myopts["--verbose"] = True
+ self._frozen_config.myopts["--tree"] = True
+ portage.writemsg("\n\n", noiselevel=-1)
+ self.display(handler.merge_list)
+ prefix = colorize("BAD", " * ")
+ portage.writemsg("\n", noiselevel=-1)
+ portage.writemsg(prefix + "Error: circular dependencies:\n",
+ noiselevel=-1)
+ portage.writemsg("\n", noiselevel=-1)
+
+ if handler.circular_dep_message is None:
+ handler.debug_print()
+ portage.writemsg("\n", noiselevel=-1)
+
+ if handler.circular_dep_message is not None:
+ portage.writemsg(handler.circular_dep_message, noiselevel=-1)
+
+ suggestions = handler.suggestions
+ if suggestions:
+ writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
+ if len(suggestions) == 1:
+ writemsg("by applying the following change:\n", noiselevel=-1)
+ else:
+ writemsg("by applying " + colorize("bold", "any of") + \
+ " the following changes:\n", noiselevel=-1)
+ writemsg("".join(suggestions), noiselevel=-1)
+ writemsg("\nNote that this change can be reverted, once the package has" + \
+ " been installed.\n", noiselevel=-1)
+ if handler.large_cycle_count:
+ writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
+ "Several changes might be required to resolve all cycles.\n" + \
+ "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
+ else:
+ writemsg("\n\n", noiselevel=-1)
+ writemsg(prefix + "Note that circular dependencies " + \
+ "can often be avoided by temporarily\n", noiselevel=-1)
+ writemsg(prefix + "disabling USE flags that trigger " + \
+ "optional dependencies.\n", noiselevel=-1)
+
+ def _show_merge_list(self):
+ if self._dynamic_config._serialized_tasks_cache is not None and \
+ not (self._dynamic_config._displayed_list is not None and \
+ (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
+ self._dynamic_config._displayed_list == \
+ list(reversed(self._dynamic_config._serialized_tasks_cache)))):
+ display_list = self._dynamic_config._serialized_tasks_cache[:]
+ if "--tree" in self._frozen_config.myopts:
+ display_list.reverse()
+ self.display(display_list)
+
+ def _show_unsatisfied_blockers(self, blockers):
+ self._show_merge_list()
+ msg = "Error: The above package list contains " + \
+ "packages which cannot be installed " + \
+ "at the same time on the same system."
+ prefix = colorize("BAD", " * ")
+ portage.writemsg("\n", noiselevel=-1)
+ for line in textwrap.wrap(msg, 70):
+ portage.writemsg(prefix + line + "\n", noiselevel=-1)
+
+ # Display the conflicting packages along with the packages
+ # that pulled them in. This is helpful for troubleshooting
+ # cases in which blockers don't solve automatically and
+ # the reasons are not apparent from the normal merge list
+ # display.
+
+ conflict_pkgs = {}
+ for blocker in blockers:
+ for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
+ self._dynamic_config._blocker_parents.parent_nodes(blocker)):
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if not parent_atoms:
+ atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
+ if atom is not None:
+ parent_atoms = set([("@selected", atom)])
+ if parent_atoms:
+ conflict_pkgs[pkg] = parent_atoms
+
+ if conflict_pkgs:
+ # Reduce noise by pruning packages that are only
+ # pulled in by other conflict packages.
+ pruned_pkgs = set()
+ for pkg, parent_atoms in conflict_pkgs.items():
+ relevant_parent = False
+ for parent, atom in parent_atoms:
+ if parent not in conflict_pkgs:
+ relevant_parent = True
+ break
+ if not relevant_parent:
+ pruned_pkgs.add(pkg)
+ for pkg in pruned_pkgs:
+ del conflict_pkgs[pkg]
+
+ if conflict_pkgs:
+ msg = []
+ msg.append("\n")
+ indent = " "
+ for pkg, parent_atoms in conflict_pkgs.items():
+
+ # Prefer packages that are not directly involved in a conflict.
+ # It can be essential to see all the packages here, so don't
+ # omit any. If the list is long, people can simply use a pager.
+ preferred_parents = set()
+ for parent_atom in parent_atoms:
+ parent, atom = parent_atom
+ if parent not in conflict_pkgs:
+ preferred_parents.add(parent_atom)
+
+ ordered_list = list(preferred_parents)
+ if len(parent_atoms) > len(ordered_list):
+ for parent_atom in parent_atoms:
+ if parent_atom not in preferred_parents:
+ ordered_list.append(parent_atom)
+
+ msg.append(indent + "%s pulled in by\n" % pkg)
+
+ for parent_atom in ordered_list:
+ parent, atom = parent_atom
+ msg.append(2*indent)
+ if isinstance(parent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ msg.append(str(parent))
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ msg.append("%s required by %s" % (atom, parent))
+ msg.append("\n")
+
+ msg.append("\n")
+
+ writemsg("".join(msg), noiselevel=-1)
+
+ if "--quiet" not in self._frozen_config.myopts:
+ show_blocker_docs_link()
+
+ def display(self, mylist, favorites=[], verbosity=None):
+
+ # This is used to prevent display_problems() from
+ # redundantly displaying this exact same merge list
+ # again via _show_merge_list().
+ self._dynamic_config._displayed_list = mylist
+ display = Display()
+
+ return display(self, mylist, favorites, verbosity)
+
+ def _display_autounmask(self):
+ """
+ Display --autounmask message and optionally write it to config files
+ (using CONFIG_PROTECT). The message includes the comments and the changes.
+ """
+
+ autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
+ quiet = "--quiet" in self._frozen_config.myopts
+ pretend = "--pretend" in self._frozen_config.myopts
+ ask = "--ask" in self._frozen_config.myopts
+ enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
+
+ def check_if_latest(pkg):
+ is_latest = True
+ is_latest_in_slot = True
+ dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
+ root_config = self._frozen_config.roots[pkg.root]
+
+ for db, pkg_type, built, installed, db_keys in dbs:
+ for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
+ if other_pkg.cp != pkg.cp:
+ # old-style PROVIDE virtual means there are no
+ # normal matches for this pkg_type
+ break
+ if other_pkg > pkg:
+ is_latest = False
+ if other_pkg.slot_atom == pkg.slot_atom:
+ is_latest_in_slot = False
+ break
+ else:
+ # iter_match_pkgs yields highest version first, so
+ # there's no need to search this pkg_type any further
+ break
+
+ if not is_latest_in_slot:
+ break
+
+ return is_latest, is_latest_in_slot
+
+ #Set of roots we have autounmask changes for.
+ roots = set()
+
+ unstable_keyword_msg = {}
+ for pkg in self._dynamic_config._needed_unstable_keywords:
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ unstable_keyword_msg.setdefault(root, [])
+ is_latest, is_latest_in_slot = check_if_latest(pkg)
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
+ use=self._pkg_use_enabled(pkg))
+ for reason in mreasons:
+ if reason.unmask_hint and \
+ reason.unmask_hint.key == 'unstable keyword':
+ keyword = reason.unmask_hint.value
+
+ unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
+ if is_latest:
+ unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
+ elif is_latest_in_slot:
+ unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+ else:
+ unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
+
+ p_mask_change_msg = {}
+ for pkg in self._dynamic_config._needed_p_mask_changes:
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ p_mask_change_msg.setdefault(root, [])
+ is_latest, is_latest_in_slot = check_if_latest(pkg)
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
+ use=self._pkg_use_enabled(pkg))
+ for reason in mreasons:
+ if reason.unmask_hint and \
+ reason.unmask_hint.key == 'p_mask':
+ keyword = reason.unmask_hint.value
+
+ comment, filename = portage.getmaskingreason(
+ pkg.cpv, metadata=pkg.metadata,
+ settings=pkgsettings,
+ portdb=pkg.root_config.trees["porttree"].dbapi,
+ return_location=True)
+
+ p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
+ if filename:
+ p_mask_change_msg[root].append("# %s:\n" % filename)
+ if comment:
+ comment = [line for line in
+ comment.splitlines() if line]
+ for line in comment:
+ p_mask_change_msg[root].append("%s\n" % line)
+ if is_latest:
+ p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
+ elif is_latest_in_slot:
+ p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+ else:
+ p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
+
+ use_changes_msg = {}
+ for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ use_changes_msg.setdefault(root, [])
+ is_latest, is_latest_in_slot = check_if_latest(pkg)
+ changes = needed_use_config_change[1]
+ adjustments = []
+ for flag, state in changes.items():
+ if state:
+ adjustments.append(flag)
+ else:
+ adjustments.append("-" + flag)
+ use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
+ if is_latest:
+ use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
+ elif is_latest_in_slot:
+ use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
+ else:
+ use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
+
+ license_msg = {}
+ for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ license_msg.setdefault(root, [])
+ is_latest, is_latest_in_slot = check_if_latest(pkg)
+
+ license_msg[root].append(self._get_dep_chain_as_comment(pkg))
+ if is_latest:
+ license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
+ elif is_latest_in_slot:
+ license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
+ else:
+ license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
+
+ def find_config_file(abs_user_config, file_name):
+ """
+ Searches /etc/portage for an appropriate file to append changes to.
+ If the file_name is a file it is returned, if it is a directory, the
+ last file in it is returned. Order of traversal is the identical to
+ portage.util.grablines(recursive=True).
+
+ file_name - String containing a file name like "package.use"
+ return value - String. Absolute path of file to write to. None if
+ no suitable file exists.
+ """
+ file_path = os.path.join(abs_user_config, file_name)
+
+ try:
+ os.lstat(file_path)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ # The file doesn't exist, so we'll
+ # simply create it.
+ return file_path
+
+ # Disk or file system trouble?
+ return None
+
+ last_file_path = None
+ stack = [file_path]
+ while stack:
+ p = stack.pop()
+ try:
+ st = os.stat(p)
+ except OSError:
+ pass
+ else:
+ if stat.S_ISREG(st.st_mode):
+ last_file_path = p
+ elif stat.S_ISDIR(st.st_mode):
+ if os.path.basename(p) in _ignorecvs_dirs:
+ continue
+ try:
+ contents = os.listdir(p)
+ except OSError:
+ pass
+ else:
+ contents.sort(reverse=True)
+ for child in contents:
+ if child.startswith(".") or \
+ child.endswith("~"):
+ continue
+ stack.append(os.path.join(p, child))
+
+ return last_file_path
+
+ write_to_file = autounmask_write and not pretend
+ #Make sure we have a file to write to before doing any write.
+ file_to_write_to = {}
+ problems = []
+ if write_to_file:
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ abs_user_config = os.path.join(
+ settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+
+ if root in unstable_keyword_msg:
+ if not os.path.exists(os.path.join(abs_user_config,
+ "package.keywords")):
+ filename = "package.accept_keywords"
+ else:
+ filename = "package.keywords"
+ file_to_write_to[(abs_user_config, "package.keywords")] = \
+ find_config_file(abs_user_config, filename)
+
+ if root in p_mask_change_msg:
+ file_to_write_to[(abs_user_config, "package.unmask")] = \
+ find_config_file(abs_user_config, "package.unmask")
+
+ if root in use_changes_msg:
+ file_to_write_to[(abs_user_config, "package.use")] = \
+ find_config_file(abs_user_config, "package.use")
+
+ if root in license_msg:
+ file_to_write_to[(abs_user_config, "package.license")] = \
+ find_config_file(abs_user_config, "package.license")
+
+ for (abs_user_config, f), path in file_to_write_to.items():
+ if path is None:
+ problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
+
+ write_to_file = not problems
+
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ abs_user_config = os.path.join(
+ settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+
+ if len(roots) > 1:
+ writemsg_stdout("\nFor %s:\n" % abs_user_config, noiselevel=-1)
+
+ if root in unstable_keyword_msg:
+ writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
+ " are necessary to proceed:\n", noiselevel=-1)
+ writemsg_stdout("".join(unstable_keyword_msg[root]), noiselevel=-1)
+
+ if root in p_mask_change_msg:
+ writemsg_stdout("\nThe following " + colorize("BAD", "mask changes") + \
+ " are necessary to proceed:\n", noiselevel=-1)
+ writemsg_stdout("".join(p_mask_change_msg[root]), noiselevel=-1)
+
+ if root in use_changes_msg:
+ writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
+ " are necessary to proceed:\n", noiselevel=-1)
+ writemsg_stdout("".join(use_changes_msg[root]), noiselevel=-1)
+
+ if root in license_msg:
+ writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
+ " are necessary to proceed:\n", noiselevel=-1)
+ writemsg_stdout("".join(license_msg[root]), noiselevel=-1)
+
+ protect_obj = {}
+ if write_to_file:
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ protect_obj[root] = ConfigProtect(settings["EROOT"], \
+ shlex_split(settings.get("CONFIG_PROTECT", "")),
+ shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
+
+ def write_changes(root, changes, file_to_write_to):
+ file_contents = None
+ try:
+ file_contents = io.open(
+ _unicode_encode(file_to_write_to,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'],
+ errors='replace').readlines()
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ file_contents = []
+ else:
+ problems.append("!!! Failed to read '%s': %s\n" % \
+ (file_to_write_to, e))
+ if file_contents is not None:
+ file_contents.extend(changes)
+ if protect_obj[root].isprotected(file_to_write_to):
+ # We want to force new_protect_filename to ensure
+ # that the user will see all our changes via
+ # etc-update, even if file_to_write_to doesn't
+ # exist yet, so we specify force=True.
+ file_to_write_to = new_protect_filename(file_to_write_to,
+ force=True)
+ try:
+ write_atomic(file_to_write_to, "".join(file_contents))
+ except PortageException:
+ problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
+
+ if not quiet and \
+ (unstable_keyword_msg or \
+ p_mask_change_msg or \
+ use_changes_msg or \
+ license_msg):
+ msg = [
+ "",
+ "NOTE: This --autounmask behavior can be disabled by setting",
+ " EMERGE_DEFAULT_OPTS=\"--autounmask=n\" in make.conf."
+ ]
+ for line in msg:
+ if line:
+ line = colorize("INFORM", line)
+ writemsg_stdout(line + "\n", noiselevel=-1)
+
+ if ask and write_to_file and file_to_write_to:
+ prompt = "\nWould you like to add these " + \
+ "changes to your config files?"
+ if userquery(prompt, enter_invalid) == 'No':
+ write_to_file = False
+
+ if write_to_file and file_to_write_to:
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ abs_user_config = os.path.join(
+ settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+ ensure_dirs(abs_user_config)
+
+ if root in unstable_keyword_msg:
+ write_changes(root, unstable_keyword_msg[root],
+ file_to_write_to.get((abs_user_config, "package.keywords")))
+
+ if root in p_mask_change_msg:
+ write_changes(root, p_mask_change_msg[root],
+ file_to_write_to.get((abs_user_config, "package.unmask")))
+
+ if root in use_changes_msg:
+ write_changes(root, use_changes_msg[root],
+ file_to_write_to.get((abs_user_config, "package.use")))
+
+ if root in license_msg:
+ write_changes(root, license_msg[root],
+ file_to_write_to.get((abs_user_config, "package.license")))
+
+ if problems:
+ writemsg_stdout("\nThe following problems occurred while writing autounmask changes:\n", \
+ noiselevel=-1)
+ writemsg_stdout("".join(problems), noiselevel=-1)
+ elif write_to_file and roots:
+ writemsg_stdout("\nAutounmask changes successfully written. Remember to run etc-update.\n", \
+ noiselevel=-1)
+ elif not pretend and not autounmask_write and roots:
+ writemsg_stdout("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
+ noiselevel=-1)
+
+
+ def display_problems(self):
+ """
+ Display problems with the dependency graph such as slot collisions.
+ This is called internally by display() to show the problems _after_
+ the merge list where it is most likely to be seen, but if display()
+ is not going to be called then this method should be called explicitly
+ to ensure that the user is notified of problems with the graph.
+
+ All output goes to stderr, except for unsatisfied dependencies which
+ go to stdout for parsing by programs such as autounmask.
+ """
+
+ # Note that show_masked_packages() sends its output to
+ # stdout, and some programs such as autounmask parse the
+ # output in cases when emerge bails out. However, when
+ # show_masked_packages() is called for installed packages
+ # here, the message is a warning that is more appropriate
+ # to send to stderr, so temporarily redirect stdout to
+ # stderr. TODO: Fix output code so there's a cleaner way
+ # to redirect everything to stderr.
+ sys.stdout.flush()
+ sys.stderr.flush()
+ stdout = sys.stdout
+ try:
+ sys.stdout = sys.stderr
+ self._display_problems()
+ finally:
+ sys.stdout = stdout
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ # This goes to stdout for parsing by programs like autounmask.
+ for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+ self._show_unsatisfied_dep(*pargs, **kwargs)
+
+ def _display_problems(self):
+ if self._dynamic_config._circular_deps_for_display is not None:
+ self._show_circular_deps(
+ self._dynamic_config._circular_deps_for_display)
+
+ # The user is only notified of a slot conflict if
+ # there are no unresolvable blocker conflicts.
+ if self._dynamic_config._unsatisfied_blockers_for_display is not None:
+ self._show_unsatisfied_blockers(
+ self._dynamic_config._unsatisfied_blockers_for_display)
+ elif self._dynamic_config._slot_collision_info:
+ self._show_slot_collision_notice()
+ else:
+ self._show_missed_update()
+
+ self._display_autounmask()
+
+ # TODO: Add generic support for "set problem" handlers so that
+ # the below warnings aren't special cases for world only.
+
+ if self._dynamic_config._missing_args:
+ world_problems = False
+ if "world" in self._dynamic_config.sets[
+ self._frozen_config.target_root].sets:
+ # Filter out indirect members of world (from nested sets)
+ # since only direct members of world are desired here.
+ world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
+ for arg, atom in self._dynamic_config._missing_args:
+ if arg.name in ("selected", "world") and atom in world_set:
+ world_problems = True
+ break
+
+ if world_problems:
+ sys.stderr.write("\n!!! Problems have been " + \
+ "detected with your world file\n")
+ sys.stderr.write("!!! Please run " + \
+ green("emaint --check world")+"\n\n")
+
+ if self._dynamic_config._missing_args:
+ sys.stderr.write("\n" + colorize("BAD", "!!!") + \
+ " Ebuilds for the following packages are either all\n")
+ sys.stderr.write(colorize("BAD", "!!!") + \
+ " masked or don't exist:\n")
+ sys.stderr.write(" ".join(str(atom) for arg, atom in \
+ self._dynamic_config._missing_args) + "\n")
+
+ if self._dynamic_config._pprovided_args:
+ arg_refs = {}
+ for arg, atom in self._dynamic_config._pprovided_args:
+ if isinstance(arg, SetArg):
+ parent = arg.name
+ arg_atom = (atom, atom)
+ else:
+ parent = "args"
+ arg_atom = (arg.arg, atom)
+ refs = arg_refs.setdefault(arg_atom, [])
+ if parent not in refs:
+ refs.append(parent)
+ msg = []
+ msg.append(bad("\nWARNING: "))
+ if len(self._dynamic_config._pprovided_args) > 1:
+ msg.append("Requested packages will not be " + \
+ "merged because they are listed in\n")
+ else:
+ msg.append("A requested package will not be " + \
+ "merged because it is listed in\n")
+ msg.append("package.provided:\n\n")
+ problems_sets = set()
+ for (arg, atom), refs in arg_refs.items():
+ ref_string = ""
+ if refs:
+ problems_sets.update(refs)
+ refs.sort()
+ ref_string = ", ".join(["'%s'" % name for name in refs])
+ ref_string = " pulled in by " + ref_string
+ msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
+ msg.append("\n")
+ if "selected" in problems_sets or "world" in problems_sets:
+ msg.append("This problem can be solved in one of the following ways:\n\n")
+ msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
+ msg.append(" B) Uninstall offending packages (cleans them from world).\n")
+ msg.append(" C) Remove offending entries from package.provided.\n\n")
+ msg.append("The best course of action depends on the reason that an offending\n")
+ msg.append("package.provided entry exists.\n\n")
+ sys.stderr.write("".join(msg))
+
+ masked_packages = []
+ for pkg in self._dynamic_config._masked_license_updates:
+ root_config = pkg.root_config
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
+ masked_packages.append((root_config, pkgsettings,
+ pkg.cpv, pkg.repo, pkg.metadata, mreasons))
+ if masked_packages:
+ writemsg("\n" + colorize("BAD", "!!!") + \
+ " The following updates are masked by LICENSE changes:\n",
+ noiselevel=-1)
+ show_masked_packages(masked_packages)
+ show_mask_docs()
+ writemsg("\n", noiselevel=-1)
+
+ masked_packages = []
+ for pkg in self._dynamic_config._masked_installed:
+ root_config = pkg.root_config
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
+ masked_packages.append((root_config, pkgsettings,
+ pkg.cpv, pkg.repo, pkg.metadata, mreasons))
+ if masked_packages:
+ writemsg("\n" + colorize("BAD", "!!!") + \
+ " The following installed packages are masked:\n",
+ noiselevel=-1)
+ show_masked_packages(masked_packages)
+ show_mask_docs()
+ writemsg("\n", noiselevel=-1)
+
+ def saveNomergeFavorites(self):
+ """Find atoms in favorites that are not in the mergelist and add them
+ to the world file if necessary."""
+ for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
+ "--oneshot", "--onlydeps", "--pretend"):
+ if x in self._frozen_config.myopts:
+ return
+ root_config = self._frozen_config.roots[self._frozen_config.target_root]
+ world_set = root_config.sets["selected"]
+
+ world_locked = False
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
+ if hasattr(world_set, "load"):
+ world_set.load() # maybe it's changed on disk
+
+ args_set = self._dynamic_config.sets[
+ self._frozen_config.target_root].sets['__non_set_args__']
+ portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
+ added_favorites = set()
+ for x in self._dynamic_config._set_nodes:
+ if x.operation != "nomerge":
+ continue
+
+ if x.root != root_config.root:
+ continue
+
+ try:
+ myfavkey = create_world_atom(x, args_set, root_config)
+ if myfavkey:
+ if myfavkey in added_favorites:
+ continue
+ added_favorites.add(myfavkey)
+ except portage.exception.InvalidDependString as e:
+ writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
+ (x.cpv, e), noiselevel=-1)
+ writemsg("!!! see '%s'\n\n" % os.path.join(
+ x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
+ del e
+ all_added = []
+ for arg in self._dynamic_config._initial_arg_list:
+ if not isinstance(arg, SetArg):
+ continue
+ if arg.root_config.root != root_config.root:
+ continue
+ k = arg.name
+ if k in ("selected", "world") or \
+ not root_config.sets[k].world_candidate:
+ continue
+ s = SETPREFIX + k
+ if s in world_set:
+ continue
+ all_added.append(SETPREFIX + k)
+ all_added.extend(added_favorites)
+ all_added.sort()
+ for a in all_added:
+ writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
+ colorize("INFORM", str(a)), noiselevel=-1)
+ if all_added:
+ world_set.update(all_added)
+
+ if world_locked:
+ world_set.unlock()
+
+ def _loadResumeCommand(self, resume_data, skip_masked=True,
+ skip_missing=True):
+ """
+ Add a resume command to the graph and validate it in the process. This
+ will raise a PackageNotFound exception if a package is not available.
+ """
+
+ self._load_vdb()
+
+ if not isinstance(resume_data, dict):
+ return False
+
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ mergelist = []
+
+ favorites = resume_data.get("favorites")
+ args_set = self._dynamic_config.sets[
+ self._frozen_config.target_root].sets['__non_set_args__']
+ if isinstance(favorites, list):
+ args = self._load_favorites(favorites)
+ else:
+ args = []
+
+ fakedb = self._dynamic_config.mydbapi
+ trees = self._frozen_config.trees
+ serialized_tasks = []
+ masked_tasks = []
+ for x in mergelist:
+ if not (isinstance(x, list) and len(x) == 4):
+ continue
+ pkg_type, myroot, pkg_key, action = x
+ if pkg_type not in self.pkg_tree_map:
+ continue
+ if action != "merge":
+ continue
+ root_config = self._frozen_config.roots[myroot]
+
+ # Use the resume "favorites" list to see if a repo was specified
+ # for this package.
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ repo = None
+ for atom in depgraph_sets.atoms.getAtoms():
+ if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
+ repo = atom.repo
+ break
+
+ atom = "=" + pkg_key
+ if repo:
+ atom = atom + _repo_separator + repo
+
+ try:
+ atom = Atom(atom, allow_repo=True)
+ except InvalidAtom:
+ continue
+
+ pkg = None
+ for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
+ if not self._pkg_visibility_check(pkg) or \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ break
+
+ if pkg is None:
+ # It does no exist or it is corrupt.
+ if skip_missing:
+ # TODO: log these somewhere
+ continue
+ raise portage.exception.PackageNotFound(pkg_key)
+
+ if "merge" == pkg.operation and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+
+ if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
+ if skip_masked:
+ masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
+ else:
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((pkg.root, "="+pkg.cpv), {"myparent":None}))
+
+ fakedb[myroot].cpv_inject(pkg)
+ serialized_tasks.append(pkg)
+ self._spinner_update()
+
+ if self._dynamic_config._unsatisfied_deps_for_display:
+ return False
+
+ if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
+ self._dynamic_config._serialized_tasks_cache = serialized_tasks
+ self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
+ else:
+ self._select_package = self._select_pkg_from_graph
+ self._dynamic_config.myparams["selective"] = True
+ # Always traverse deep dependencies in order to account for
+ # potentially unsatisfied dependencies of installed packages.
+ # This is necessary for correct --keep-going or --resume operation
+ # in case a package from a group of circularly dependent packages
+ # fails. In this case, a package which has recently been installed
+ # may have an unsatisfied circular dependency (pulled in by
+ # PDEPEND, for example). So, even though a package is already
+ # installed, it may not have all of it's dependencies satisfied, so
+ # it may not be usable. If such a package is in the subgraph of
+ # deep depenedencies of a scheduled build, that build needs to
+ # be cancelled. In order for this type of situation to be
+ # recognized, deep traversal of dependencies is required.
+ self._dynamic_config.myparams["deep"] = True
+
+ for task in serialized_tasks:
+ if isinstance(task, Package) and \
+ task.operation == "merge":
+ if not self._add_pkg(task, None):
+ return False
+
+ # Packages for argument atoms need to be explicitly
+ # added via _add_pkg() so that they are included in the
+ # digraph (needed at least for --tree display).
+ for arg in self._expand_set_args(args, add_to_digraph=True):
+ for atom in arg.pset.getAtoms():
+ pkg, existing_node = self._select_package(
+ arg.root_config.root, atom)
+ if existing_node is None and \
+ pkg is not None:
+ if not self._add_pkg(pkg, Dependency(atom=atom,
+ root=pkg.root, parent=arg)):
+ return False
+
+ # Allow unsatisfied deps here to avoid showing a masking
+ # message for an unsatisfied dep that isn't necessarily
+ # masked.
+ if not self._create_graph(allow_unsatisfied=True):
+ return False
+
+ unsatisfied_deps = []
+ for dep in self._dynamic_config._unsatisfied_deps:
+ if not isinstance(dep.parent, Package):
+ continue
+ if dep.parent.operation == "merge":
+ unsatisfied_deps.append(dep)
+ continue
+
+ # For unsatisfied deps of installed packages, only account for
+ # them if they are in the subgraph of dependencies of a package
+ # which is scheduled to be installed.
+ unsatisfied_install = False
+ traversed = set()
+ dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
+ while dep_stack:
+ node = dep_stack.pop()
+ if not isinstance(node, Package):
+ continue
+ if node.operation == "merge":
+ unsatisfied_install = True
+ break
+ if node in traversed:
+ continue
+ traversed.add(node)
+ dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
+
+ if unsatisfied_install:
+ unsatisfied_deps.append(dep)
+
+ if masked_tasks or unsatisfied_deps:
+ # This probably means that a required package
+ # was dropped via --skipfirst. It makes the
+ # resume list invalid, so convert it to a
+ # UnsatisfiedResumeDep exception.
+ raise self.UnsatisfiedResumeDep(self,
+ masked_tasks + unsatisfied_deps)
+ self._dynamic_config._serialized_tasks_cache = None
+ try:
+ self.altlist()
+ except self._unknown_internal_error:
+ return False
+
+ return True
+
+ def _load_favorites(self, favorites):
+ """
+ Use a list of favorites to resume state from a
+ previous select_files() call. This creates similar
+ DependencyArg instances to those that would have
+ been created by the original select_files() call.
+ This allows Package instances to be matched with
+ DependencyArg instances during graph creation.
+ """
+ root_config = self._frozen_config.roots[self._frozen_config.target_root]
+ sets = root_config.sets
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ args = []
+ for x in favorites:
+ if not isinstance(x, basestring):
+ continue
+ if x in ("system", "world"):
+ x = SETPREFIX + x
+ if x.startswith(SETPREFIX):
+ s = x[len(SETPREFIX):]
+ if s not in sets:
+ continue
+ if s in depgraph_sets.sets:
+ continue
+ pset = sets[s]
+ depgraph_sets.sets[s] = pset
+ args.append(SetArg(arg=x, pset=pset,
+ root_config=root_config))
+ else:
+ try:
+ x = Atom(x, allow_repo=True)
+ except portage.exception.InvalidAtom:
+ continue
+ args.append(AtomArg(arg=x, atom=x,
+ root_config=root_config))
+
+ self._set_args(args)
+ return args
+
+ class UnsatisfiedResumeDep(portage.exception.PortageException):
+ """
+ A dependency of a resume list is not installed. This
+ can occur when a required package is dropped from the
+ merge list via --skipfirst.
+ """
+ def __init__(self, depgraph, value):
+ portage.exception.PortageException.__init__(self, value)
+ self.depgraph = depgraph
+
+ class _internal_exception(portage.exception.PortageException):
+ def __init__(self, value=""):
+ portage.exception.PortageException.__init__(self, value)
+
+ class _unknown_internal_error(_internal_exception):
+ """
+ Used by the depgraph internally to terminate graph creation.
+ The specific reason for the failure should have been dumped
+ to stderr, unfortunately, the exact reason for the failure
+ may not be known.
+ """
+
+ class _serialize_tasks_retry(_internal_exception):
+ """
+ This is raised by the _serialize_tasks() method when it needs to
+ be called again for some reason. The only case that it's currently
+ used for is when neglected dependencies need to be added to the
+ graph in order to avoid making a potentially unsafe decision.
+ """
+
+ class _backtrack_mask(_internal_exception):
+ """
+ This is raised by _show_unsatisfied_dep() when it's called with
+ check_backtrack=True and a matching package has been masked by
+ backtracking.
+ """
+
+ class _autounmask_breakage(_internal_exception):
+ """
+ This is raised by _show_unsatisfied_dep() when it's called with
+ check_autounmask_breakage=True and a matching package has been
+ been disqualified due to autounmask changes.
+ """
+
+ def need_restart(self):
+ return self._dynamic_config._need_restart and \
+ not self._dynamic_config._skip_restart
+
+ def success_without_autounmask(self):
+ return self._dynamic_config._success_without_autounmask
+
+ def autounmask_breakage_detected(self):
+ try:
+ for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+ self._show_unsatisfied_dep(
+ *pargs, check_autounmask_breakage=True, **kwargs)
+ except self._autounmask_breakage:
+ return True
+ return False
+
+ def get_backtrack_infos(self):
+ return self._dynamic_config._backtrack_infos
+
+
+class _dep_check_composite_db(dbapi):
+ """
+ A dbapi-like interface that is optimized for use in dep_check() calls.
+ This is built on top of the existing depgraph package selection logic.
+ Some packages that have been added to the graph may be masked from this
+ view in order to influence the atom preference selection that occurs
+ via dep_check().
+ """
+ def __init__(self, depgraph, root):
+ dbapi.__init__(self)
+ self._depgraph = depgraph
+ self._root = root
+ self._match_cache = {}
+ self._cpv_pkg_map = {}
+
+ def _clear_cache(self):
+ self._match_cache.clear()
+ self._cpv_pkg_map.clear()
+
+ def cp_list(self, cp):
+ """
+ Emulate cp_list just so it can be used to check for existence
+ of new-style virtuals. Since it's a waste of time to return
+ more than one cpv for this use case, a maximum of one cpv will
+ be returned.
+ """
+ if isinstance(cp, Atom):
+ atom = cp
+ else:
+ atom = Atom(cp)
+ ret = []
+ for pkg in self._depgraph._iter_match_pkgs_any(
+ self._depgraph._frozen_config.roots[self._root], atom):
+ if pkg.cp == cp:
+ ret.append(pkg.cpv)
+ break
+
+ return ret
+
+ def match(self, atom):
+ ret = self._match_cache.get(atom)
+ if ret is not None:
+ return ret[:]
+ pkg, existing = self._depgraph._select_package(self._root, atom)
+ if not pkg:
+ ret = []
+ else:
+ # Return the highest available from select_package() as well as
+ # any matching slots in the graph db.
+ slots = set()
+ slots.add(pkg.metadata["SLOT"])
+ if pkg.cp.startswith("virtual/"):
+ # For new-style virtual lookahead that occurs inside
+ # dep_check(), examine all slots. This is needed
+ # so that newer slots will not unnecessarily be pulled in
+ # when a satisfying lower slot is already installed. For
+ # example, if virtual/jdk-1.4 is satisfied via kaffe then
+ # there's no need to pull in a newer slot to satisfy a
+ # virtual/jdk dependency.
+ for db, pkg_type, built, installed, db_keys in \
+ self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
+ for cpv in db.match(atom):
+ if portage.cpv_getkey(cpv) != pkg.cp:
+ continue
+ slots.add(db.aux_get(cpv, ["SLOT"])[0])
+ ret = []
+ if self._visible(pkg):
+ self._cpv_pkg_map[pkg.cpv] = pkg
+ ret.append(pkg.cpv)
+ slots.remove(pkg.metadata["SLOT"])
+ while slots:
+ slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
+ pkg, existing = self._depgraph._select_package(
+ self._root, slot_atom)
+ if not pkg:
+ continue
+ if not self._visible(pkg):
+ continue
+ self._cpv_pkg_map[pkg.cpv] = pkg
+ ret.append(pkg.cpv)
+ if ret:
+ self._cpv_sort_ascending(ret)
+ self._match_cache[atom] = ret
+ return ret[:]
+
+ def _visible(self, pkg):
+ if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
+ try:
+ arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
+ except (StopIteration, portage.exception.InvalidDependString):
+ arg = None
+ if arg:
+ return False
+ if pkg.installed and \
+ (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
+ # Account for packages with masks (like KEYWORDS masks)
+ # that are usually ignored in visibility checks for
+ # installed packages, in order to handle cases like
+ # bug #350285.
+ myopts = self._depgraph._frozen_config.myopts
+ use_ebuild_visibility = myopts.get(
+ '--use-ebuild-visibility', 'n') != 'n'
+ avoid_update = "--update" not in myopts and \
+ "remove" not in self._depgraph._dynamic_config.myparams
+ usepkgonly = "--usepkgonly" in myopts
+ if not avoid_update:
+ if not use_ebuild_visibility and usepkgonly:
+ return False
+ else:
+ try:
+ pkg_eb = self._depgraph._pkg(
+ pkg.cpv, "ebuild", pkg.root_config,
+ myrepo=pkg.repo)
+ except portage.exception.PackageNotFound:
+ pkg_eb_visible = False
+ for pkg_eb in self._depgraph._iter_match_pkgs(
+ pkg.root_config, "ebuild",
+ Atom("=%s" % (pkg.cpv,))):
+ if self._depgraph._pkg_visibility_check(pkg_eb):
+ pkg_eb_visible = True
+ break
+ if not pkg_eb_visible:
+ return False
+ else:
+ if not self._depgraph._pkg_visibility_check(pkg_eb):
+ return False
+
+ in_graph = self._depgraph._dynamic_config._slot_pkg_map[
+ self._root].get(pkg.slot_atom)
+ if in_graph is None:
+ # Mask choices for packages which are not the highest visible
+ # version within their slot (since they usually trigger slot
+ # conflicts).
+ highest_visible, in_graph = self._depgraph._select_package(
+ self._root, pkg.slot_atom)
+ # Note: highest_visible is not necessarily the real highest
+ # visible, especially when --update is not enabled, so use
+ # < operator instead of !=.
+ if pkg < highest_visible:
+ return False
+ elif in_graph != pkg:
+ # Mask choices for packages that would trigger a slot
+ # conflict with a previously selected package.
+ return False
+ return True
+
+ def aux_get(self, cpv, wants):
+ metadata = self._cpv_pkg_map[cpv].metadata
+ return [metadata.get(x, "") for x in wants]
+
+ def match_pkgs(self, atom):
+ return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
+
+def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
+
+ if "--quiet" in myopts:
+ writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
+ writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
+ for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
+ writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
+ return
+
+ s = search(root_config, spinner, "--searchdesc" in myopts,
+ "--quiet" not in myopts, "--usepkg" in myopts,
+ "--usepkgonly" in myopts)
+ null_cp = portage.dep_getkey(insert_category_into_atom(
+ arg, "null"))
+ cat, atom_pn = portage.catsplit(null_cp)
+ s.searchkey = atom_pn
+ for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
+ s.addCP(cp)
+ s.output()
+ writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
+ writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
+
+def _spinner_start(spinner, myopts):
+ if spinner is None:
+ return
+ if "--quiet" not in myopts and \
+ ("--pretend" in myopts or "--ask" in myopts or \
+ "--tree" in myopts or "--verbose" in myopts):
+ action = ""
+ if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
+ action = "fetched"
+ elif "--buildpkgonly" in myopts:
+ action = "built"
+ else:
+ action = "merged"
+ if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
+ if "--unordered-display" in myopts:
+ portage.writemsg_stdout("\n" + \
+ darkgreen("These are the packages that " + \
+ "would be %s:" % action) + "\n\n")
+ else:
+ portage.writemsg_stdout("\n" + \
+ darkgreen("These are the packages that " + \
+ "would be %s, in reverse order:" % action) + "\n\n")
+ else:
+ portage.writemsg_stdout("\n" + \
+ darkgreen("These are the packages that " + \
+ "would be %s, in order:" % action) + "\n\n")
+
+ show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
+ if not show_spinner:
+ spinner.update = spinner.update_quiet
+
+ if show_spinner:
+ portage.writemsg_stdout("Calculating dependencies ")
+
+def _spinner_stop(spinner):
+ if spinner is None or \
+ spinner.update == spinner.update_quiet:
+ return
+
+ if spinner.update != spinner.update_basic:
+ # update_basic is used for non-tty output,
+ # so don't output backspaces in that case.
+ portage.writemsg_stdout("\b\b")
+
+ portage.writemsg_stdout("... done!\n")
+
+def backtrack_depgraph(settings, trees, myopts, myparams,
+ myaction, myfiles, spinner):
+ """
+ Raises PackageSetNotFound if myfiles contains a missing package set.
+ """
+ _spinner_start(spinner, myopts)
+ try:
+ return _backtrack_depgraph(settings, trees, myopts, myparams,
+ myaction, myfiles, spinner)
+ finally:
+ _spinner_stop(spinner)
+
+
+def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
+
+ debug = "--debug" in myopts
+ mydepgraph = None
+ max_retries = myopts.get('--backtrack', 10)
+ max_depth = max(1, (max_retries + 1) / 2)
+ allow_backtracking = max_retries > 0
+ backtracker = Backtracker(max_depth)
+ backtracked = 0
+
+ frozen_config = _frozen_depgraph_config(settings, trees,
+ myopts, spinner)
+
+ while backtracker:
+
+ if debug and mydepgraph is not None:
+ writemsg_level(
+ "\n\nbacktracking try %s \n\n" % \
+ backtracked, noiselevel=-1, level=logging.DEBUG)
+ mydepgraph.display_problems()
+
+ backtrack_parameters = backtracker.get()
+
+ mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+ frozen_config=frozen_config,
+ allow_backtracking=allow_backtracking,
+ backtrack_parameters=backtrack_parameters)
+ success, favorites = mydepgraph.select_files(myfiles)
+
+ if success or mydepgraph.success_without_autounmask():
+ break
+ elif not allow_backtracking:
+ break
+ elif backtracked >= max_retries:
+ break
+ elif mydepgraph.need_restart():
+ backtracked += 1
+ backtracker.feedback(mydepgraph.get_backtrack_infos())
+ else:
+ break
+
+ if not (success or mydepgraph.success_without_autounmask()) and backtracked:
+
+ if debug:
+ writemsg_level(
+ "\n\nbacktracking aborted after %s tries\n\n" % \
+ backtracked, noiselevel=-1, level=logging.DEBUG)
+ mydepgraph.display_problems()
+
+ mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+ frozen_config=frozen_config,
+ allow_backtracking=False,
+ backtrack_parameters=backtracker.get_best_run())
+ success, favorites = mydepgraph.select_files(myfiles)
+
+ if not success and mydepgraph.autounmask_breakage_detected():
+ if debug:
+ writemsg_level(
+ "\n\nautounmask breakage detected\n\n",
+ noiselevel=-1, level=logging.DEBUG)
+ mydepgraph.display_problems()
+ myopts["--autounmask"] = "n"
+ mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+ frozen_config=frozen_config, allow_backtracking=False)
+ success, favorites = mydepgraph.select_files(myfiles)
+
+ return (success, mydepgraph, favorites)
+
+
+def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
+ """
+ Raises PackageSetNotFound if myfiles contains a missing package set.
+ """
+ _spinner_start(spinner, myopts)
+ try:
+ return _resume_depgraph(settings, trees, mtimedb, myopts,
+ myparams, spinner)
+ finally:
+ _spinner_stop(spinner)
+
+def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
+ """
+ Construct a depgraph for the given resume list. This will raise
+ PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
+ TODO: Return reasons for dropped_tasks, for display/logging.
+ @rtype: tuple
+ @returns: (success, depgraph, dropped_tasks)
+ """
+ skip_masked = True
+ skip_unsatisfied = True
+ mergelist = mtimedb["resume"]["mergelist"]
+ dropped_tasks = set()
+ frozen_config = _frozen_depgraph_config(settings, trees,
+ myopts, spinner)
+ while True:
+ mydepgraph = depgraph(settings, trees,
+ myopts, myparams, spinner, frozen_config=frozen_config)
+ try:
+ success = mydepgraph._loadResumeCommand(mtimedb["resume"],
+ skip_masked=skip_masked)
+ except depgraph.UnsatisfiedResumeDep as e:
+ if not skip_unsatisfied:
+ raise
+
+ graph = mydepgraph._dynamic_config.digraph
+ unsatisfied_parents = dict((dep.parent, dep.parent) \
+ for dep in e.value)
+ traversed_nodes = set()
+ unsatisfied_stack = list(unsatisfied_parents)
+ while unsatisfied_stack:
+ pkg = unsatisfied_stack.pop()
+ if pkg in traversed_nodes:
+ continue
+ traversed_nodes.add(pkg)
+
+ # If this package was pulled in by a parent
+ # package scheduled for merge, removing this
+ # package may cause the the parent package's
+ # dependency to become unsatisfied.
+ for parent_node in graph.parent_nodes(pkg):
+ if not isinstance(parent_node, Package) \
+ or parent_node.operation not in ("merge", "nomerge"):
+ continue
+ unsatisfied = \
+ graph.child_nodes(parent_node,
+ ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
+ if pkg in unsatisfied:
+ unsatisfied_parents[parent_node] = parent_node
+ unsatisfied_stack.append(parent_node)
+
+ unsatisfied_tuples = frozenset(tuple(parent_node)
+ for parent_node in unsatisfied_parents
+ if isinstance(parent_node, Package))
+ pruned_mergelist = []
+ for x in mergelist:
+ if isinstance(x, list) and \
+ tuple(x) not in unsatisfied_tuples:
+ pruned_mergelist.append(x)
+
+ # If the mergelist doesn't shrink then this loop is infinite.
+ if len(pruned_mergelist) == len(mergelist):
+ # This happens if a package can't be dropped because
+ # it's already installed, but it has unsatisfied PDEPEND.
+ raise
+ mergelist[:] = pruned_mergelist
+
+ # Exclude installed packages that have been removed from the graph due
+ # to failure to build/install runtime dependencies after the dependent
+ # package has already been installed.
+ dropped_tasks.update(pkg for pkg in \
+ unsatisfied_parents if pkg.operation != "nomerge")
+
+ del e, graph, traversed_nodes, \
+ unsatisfied_parents, unsatisfied_stack
+ continue
+ else:
+ break
+ return (success, mydepgraph, dropped_tasks)
+
+def get_mask_info(root_config, cpv, pkgsettings,
+ db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
+ eapi_masked = False
+ try:
+ metadata = dict(zip(db_keys,
+ db.aux_get(cpv, db_keys, myrepo=myrepo)))
+ except KeyError:
+ metadata = None
+
+ if metadata is None:
+ mreasons = ["corruption"]
+ else:
+ eapi = metadata['EAPI']
+ if eapi[:1] == '-':
+ eapi = eapi[1:]
+ if not portage.eapi_is_supported(eapi):
+ mreasons = ['EAPI %s' % eapi]
+ else:
+ pkg = Package(type_name=pkg_type, root_config=root_config,
+ cpv=cpv, built=built, installed=installed, metadata=metadata)
+
+ modified_use = None
+ if _pkg_use_enabled is not None:
+ modified_use = _pkg_use_enabled(pkg)
+
+ mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
+
+ return metadata, mreasons
+
+def show_masked_packages(masked_packages):
+ shown_licenses = set()
+ shown_comments = set()
+ # Maybe there is both an ebuild and a binary. Only
+ # show one of them to avoid redundant appearance.
+ shown_cpvs = set()
+ have_eapi_mask = False
+ for (root_config, pkgsettings, cpv, repo,
+ metadata, mreasons) in masked_packages:
+ output_cpv = cpv
+ if repo:
+ output_cpv += _repo_separator + repo
+ if output_cpv in shown_cpvs:
+ continue
+ shown_cpvs.add(output_cpv)
+ eapi_masked = metadata is not None and \
+ not portage.eapi_is_supported(metadata["EAPI"])
+ if eapi_masked:
+ have_eapi_mask = True
+ # When masked by EAPI, metadata is mostly useless since
+ # it doesn't contain essential things like SLOT.
+ metadata = None
+ comment, filename = None, None
+ if not eapi_masked and \
+ "package.mask" in mreasons:
+ comment, filename = \
+ portage.getmaskingreason(
+ cpv, metadata=metadata,
+ settings=pkgsettings,
+ portdb=root_config.trees["porttree"].dbapi,
+ return_location=True)
+ missing_licenses = []
+ if not eapi_masked and metadata is not None:
+ try:
+ missing_licenses = \
+ pkgsettings._getMissingLicenses(
+ cpv, metadata)
+ except portage.exception.InvalidDependString:
+ # This will have already been reported
+ # above via mreasons.
+ pass
+
+ writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
+
+ if comment and comment not in shown_comments:
+ writemsg_stdout(filename + ":\n" + comment + "\n",
+ noiselevel=-1)
+ shown_comments.add(comment)
+ portdb = root_config.trees["porttree"].dbapi
+ for l in missing_licenses:
+ l_path = portdb.findLicensePath(l)
+ if l in shown_licenses:
+ continue
+ msg = ("A copy of the '%s' license" + \
+ " is located at '%s'.\n\n") % (l, l_path)
+ writemsg_stdout(msg, noiselevel=-1)
+ shown_licenses.add(l)
+ return have_eapi_mask
+
+def show_mask_docs():
+ writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
+ writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
+
+def show_blocker_docs_link():
+ writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
+ writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
+ writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
+
+def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
+ return [mreason.message for \
+ mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
+
+def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
+ mreasons = _getmaskingstatus(
+ pkg, settings=pkgsettings,
+ portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
+
+ if not pkg.installed:
+ if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
+ mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
+ pkg.metadata["CHOST"]))
+
+ if pkg.invalid:
+ for msg_type, msgs in pkg.invalid.items():
+ for msg in msgs:
+ mreasons.append(
+ _MaskReason("invalid", "invalid: %s" % (msg,)))
+
+ if not pkg.metadata["SLOT"]:
+ mreasons.append(
+ _MaskReason("invalid", "SLOT: undefined"))
+
+ return mreasons
diff --git a/portage_with_autodep/pym/_emerge/emergelog.py b/portage_with_autodep/pym/_emerge/emergelog.py
new file mode 100644
index 0000000..d6ef1b4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/emergelog.py
@@ -0,0 +1,63 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import io
+import sys
+import time
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import secpass
+from portage.output import xtermTitle
+
+# We disable emergelog by default, since it's called from
+# dblink.merge() and we don't want that to trigger log writes
+# unless it's really called via emerge.
+_disable = True
+_emerge_log_dir = '/var/log'
+
+# Coerce to unicode, in order to prevent TypeError when writing
+# raw bytes to TextIOWrapper with python2.
+_log_fmt = _unicode_decode("%.0f: %s\n")
+
+def emergelog(xterm_titles, mystr, short_msg=None):
+
+ if _disable:
+ return
+
+ mystr = _unicode_decode(mystr)
+
+ if short_msg is not None:
+ short_msg = _unicode_decode(short_msg)
+
+ if xterm_titles and short_msg:
+ if "HOSTNAME" in os.environ:
+ short_msg = os.environ["HOSTNAME"]+": "+short_msg
+ xtermTitle(short_msg)
+ try:
+ file_path = os.path.join(_emerge_log_dir, 'emerge.log')
+ existing_log = os.path.isfile(file_path)
+ mylogfile = io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'],
+ errors='backslashreplace')
+ if not existing_log:
+ portage.util.apply_secpass_permissions(file_path,
+ uid=portage.portage_uid, gid=portage.portage_gid,
+ mode=0o660)
+ mylock = None
+ try:
+ mylock = portage.locks.lockfile(mylogfile)
+ mylogfile.write(_log_fmt % (time.time(), mystr))
+ mylogfile.flush()
+ finally:
+ if mylock:
+ portage.locks.unlockfile(mylock)
+ mylogfile.close()
+ except (IOError,OSError,portage.exception.PortageException) as e:
+ if secpass >= 1:
+ print("emergelog():",e, file=sys.stderr)
diff --git a/portage_with_autodep/pym/_emerge/getloadavg.py b/portage_with_autodep/pym/_emerge/getloadavg.py
new file mode 100644
index 0000000..e9babf1
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/getloadavg.py
@@ -0,0 +1,27 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+
+getloadavg = getattr(os, "getloadavg", None)
+if getloadavg is None:
+ def getloadavg():
+ """
+ Uses /proc/loadavg to emulate os.getloadavg().
+ Raises OSError if the load average was unobtainable.
+ """
+ try:
+ loadavg_str = open('/proc/loadavg').readline()
+ except IOError:
+ # getloadavg() is only supposed to raise OSError, so convert
+ raise OSError('unknown')
+ loadavg_split = loadavg_str.split()
+ if len(loadavg_split) < 3:
+ raise OSError('unknown')
+ loadavg_floats = []
+ for i in range(3):
+ try:
+ loadavg_floats.append(float(loadavg_split[i]))
+ except ValueError:
+ raise OSError('unknown')
+ return tuple(loadavg_floats)
diff --git a/portage_with_autodep/pym/_emerge/help.py b/portage_with_autodep/pym/_emerge/help.py
new file mode 100644
index 0000000..c978ce2
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/help.py
@@ -0,0 +1,815 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from portage.const import _ENABLE_DYN_LINK_MAP
+from portage.output import bold, turquoise, green
+
+def shorthelp():
+ print(bold("emerge:")+" the other white meat (command-line interface to the Portage system)")
+ print(bold("Usage:"))
+ print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuild")+" | "+turquoise("tbz2")+" | "+turquoise("file")+" | "+turquoise("@set")+" | "+turquoise("atom")+" ] [ ... ]")
+ print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("system")+" | "+turquoise("world")+" >")
+ print(" "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >")
+ print(" "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]")
+ print(" "+turquoise("emerge")+" "+turquoise("--help")+" [ "+green("--verbose")+" ] ")
+ print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuvV")+"]")
+ print(" [ " + green("--color")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ] [ "+green("--columns")+" ]")
+ print(" [ "+green("--complete-graph")+" ] [ "+green("--deep")+" ]")
+ print(" [ "+green("--jobs") + " " + turquoise("JOBS")+" ] [ "+green("--keep-going")+" ] [ " + green("--load-average")+" " + turquoise("LOAD") + " ]")
+ print(" [ "+green("--newuse")+" ] [ "+green("--noconfmem")+" ] [ "+green("--nospinner")+" ]")
+ print(" [ "+green("--oneshot")+" ] [ "+green("--onlydeps")+" ]")
+ print(" [ "+green("--reinstall ")+turquoise("changed-use")+" ] [ " + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ]")
+ print(bold("Actions:")+" [ "+green("--depclean")+" | "+green("--list-sets")+" | "+green("--search")+" | "+green("--sync")+" | "+green("--version")+" ]")
+
+def help(myopts, havecolor=1):
+ # TODO: Implement a wrap() that accounts for console color escape codes.
+ from textwrap import wrap
+ desc_left_margin = 14
+ desc_indent = desc_left_margin * " "
+ desc_width = 80 - desc_left_margin - 5
+ if "--verbose" not in myopts:
+ shorthelp()
+ print()
+ print(" For more help try 'emerge --help --verbose' or consult the man page.")
+ else:
+ shorthelp()
+ print()
+ print(turquoise("Help (this screen):"))
+ print(" "+green("--help")+" ("+green("-h")+" short option)")
+ print(" Displays this help; an additional argument (see above) will tell")
+ print(" emerge to display detailed help.")
+ print()
+ print(turquoise("Actions:"))
+ print(" "+green("--clean"))
+ print(" Cleans the system by removing outdated packages which will not")
+ print(" remove functionalities or prevent your system from working.")
+ print(" The arguments can be in several different formats :")
+ print(" * world ")
+ print(" * system or")
+ print(" * 'dependency specification' (in single quotes is best.)")
+ print(" Here are a few examples of the dependency specification format:")
+ print(" "+bold("binutils")+" matches")
+ print(" binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
+ print(" "+bold("sys-devel/binutils")+" matches")
+ print(" binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
+ print(" "+bold(">sys-devel/binutils-2.11.90.0.7")+" matches")
+ print(" binutils-2.11.92.0.12.3-r1")
+ print(" "+bold(">=sys-devel/binutils-2.11.90.0.7")+" matches")
+ print(" binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
+ print(" "+bold("<=sys-devel/binutils-2.11.92.0.12.3-r1")+" matches")
+ print(" binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
+ print()
+ print(" "+green("--config"))
+ print(" Runs package-specific operations that must be executed after an")
+ print(" emerge process has completed. This usually entails configuration")
+ print(" file setup or other similar setups that the user may wish to run.")
+ print()
+ print(" "+green("--depclean")+" ("+green("-c")+" short option)")
+
+ paragraph = "Cleans the system by removing packages that are " + \
+ "not associated with explicitly merged packages. Depclean works " + \
+ "by creating the full dependency tree from the " + \
+ "@world set, then comparing it to installed packages. Packages " + \
+ "installed, but not part of the dependency tree, will be " + \
+ "uninstalled by depclean. See --with-bdeps for behavior with " + \
+ "respect to build time dependencies that are not strictly " + \
+ "required. Packages that are part of the world set will " + \
+ "always be kept. They can be manually added to this set with " + \
+ "emerge --noreplace <atom>. As a safety measure, depclean " + \
+ "will not remove any packages unless *all* required dependencies " + \
+ "have been resolved. As a consequence, it is often necessary to " + \
+ "run emerge --update --newuse --deep @world " + \
+ "prior to depclean."
+
+ for line in wrap(paragraph, desc_width):
+ print(desc_indent + line)
+ print()
+
+ paragraph = "WARNING: Inexperienced users are advised to use " + \
+ "--pretend with this option in order to see a preview of which " + \
+ "packages will be uninstalled. Always study the list of packages " + \
+ "to be cleaned for any obvious mistakes. Note that packages " + \
+ "listed in package.provided (see portage(5)) may be removed by " + \
+ "depclean, even if they are part of the world set."
+
+ paragraph += " Also note that " + \
+ "depclean may break link level dependencies"
+
+ if _ENABLE_DYN_LINK_MAP:
+ paragraph += ", especially when the " + \
+ "--depclean-lib-check option is disabled"
+
+ paragraph += ". Thus, it is " + \
+ "recommended to use a tool such as revdep-rebuild(1) " + \
+ "in order to detect such breakage."
+
+ for line in wrap(paragraph, desc_width):
+ print(desc_indent + line)
+ print()
+
+ paragraph = "Depclean serves as a dependency aware version of " + \
+ "--unmerge. When given one or more atoms, it will unmerge " + \
+ "matched packages that have no reverse dependencies. Use " + \
+ "--depclean together with --verbose to show reverse dependencies."
+
+ for line in wrap(paragraph, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--deselect") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+
+ paragraph = \
+ "Remove atoms and/or sets from the world file. This action is implied " + \
+ "by uninstall actions, including --depclean, " + \
+ "--prune and --unmerge. Use --deselect=n " + \
+ "in order to prevent uninstall actions from removing " + \
+ "atoms from the world file."
+
+ for line in wrap(paragraph, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--ignore-default-opts"))
+
+ paragraph = \
+ "Causes EMERGE_DEFAULT_OPTS (see make.conf(5)) to be ignored."
+
+ for line in wrap(paragraph, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--info"))
+ print(" Displays important portage variables that will be exported to")
+ print(" ebuild.sh when performing merges. This information is useful")
+ print(" for bug reports and verification of settings. All settings in")
+ print(" make.{conf,globals,defaults} and the environment show up if")
+ print(" run with the '--verbose' flag.")
+ print()
+ print(" " + green("--list-sets"))
+ paragraph = "Displays a list of available package sets."
+
+ for line in wrap(paragraph, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--metadata"))
+ print(" Transfers metadata cache from ${PORTDIR}/metadata/cache/ to")
+ print(" /var/cache/edb/dep/ as is normally done on the tail end of an")
+ print(" rsync update using " + bold("emerge --sync") + ". This process populates the")
+ print(" cache database that portage uses for pre-parsed lookups of")
+ print(" package data. It does not populate cache for the overlays")
+ print(" listed in PORTDIR_OVERLAY. In order to generate cache for")
+ print(" overlays, use " + bold("--regen") + ".")
+ print()
+ print(" "+green("--prune")+" ("+green("-P")+" short option)")
+ print(" "+turquoise("WARNING: This action can remove important packages!"))
+ paragraph = "Removes all but the highest installed version of a " + \
+ "package from your system. Use --prune together with " + \
+ "--verbose to show reverse dependencies or with --nodeps " + \
+ "to ignore all dependencies. "
+
+ for line in wrap(paragraph, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--regen"))
+ print(" Causes portage to check and update the dependency cache of all")
+ print(" ebuilds in the portage tree. This is not recommended for rsync")
+ print(" users as rsync updates the cache using server-side caches.")
+ print(" Rsync users should simply 'emerge --sync' to regenerate.")
+ desc = "In order to specify parallel --regen behavior, use "+ \
+ "the ---jobs and --load-average options. If you would like to " + \
+ "generate and distribute cache for use by others, use egencache(1)."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--resume")+" ("+green("-r")+" short option)")
+ print(" Resumes the most recent merge list that has been aborted due to an")
+ print(" error. Please note that this operation will only return an error")
+ print(" on failure. If there is nothing for portage to do, then portage")
+ print(" will exit with a message and a success condition. A resume list")
+ print(" will persist until it has been completed in entirety or until")
+ print(" another aborted merge list replaces it. The resume history is")
+ print(" capable of storing two merge lists. After one resume list")
+ print(" completes, it is possible to invoke --resume once again in order")
+ print(" to resume an older list.")
+ print()
+ print(" "+green("--search")+" ("+green("-s")+" short option)")
+ print(" Searches for matches of the supplied string in the current local")
+ print(" portage tree. By default emerge uses a case-insensitive simple ")
+ print(" search, but you can enable a regular expression search by ")
+ print(" prefixing the search string with %.")
+ print(" Prepending the expression with a '@' will cause the category to")
+ print(" be included in the search.")
+ print(" A few examples:")
+ print(" "+bold("emerge --search libc"))
+ print(" list all packages that contain libc in their name")
+ print(" "+bold("emerge --search '%^kde'"))
+ print(" list all packages starting with kde")
+ print(" "+bold("emerge --search '%gcc$'"))
+ print(" list all packages ending with gcc")
+ print(" "+bold("emerge --search '%@^dev-java.*jdk'"))
+ print(" list all available Java JDKs")
+ print()
+ print(" "+green("--searchdesc")+" ("+green("-S")+" short option)")
+ print(" Matches the search string against the description field as well")
+ print(" the package's name. Take caution as the descriptions are also")
+ print(" matched as regular expressions.")
+ print(" emerge -S html")
+ print(" emerge -S applet")
+ print(" emerge -S 'perl.*module'")
+ print()
+ print(" "+green("--sync"))
+ desc = "This updates the portage tree that is located in the " + \
+ "directory that the PORTDIR variable refers to (default " + \
+ "location is /usr/portage). The SYNC variable specifies " + \
+ "the remote URI from which files will be synchronized. " + \
+ "The PORTAGE_SYNC_STALE variable configures " + \
+ "warnings that are shown when emerge --sync has not " + \
+ "been executed recently."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(desc_indent + turquoise("WARNING:"))
+ desc = "The emerge --sync action will modify and/or delete " + \
+ "files located inside the directory that the PORTDIR " + \
+ "variable refers to (default location is /usr/portage). " + \
+ "For more information, see the PORTDIR documentation in " + \
+ "the make.conf(5) man page."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(desc_indent + green("NOTE:"))
+ desc = "The emerge-webrsync program will download the entire " + \
+ "portage tree as a tarball, which is much faster than emerge " + \
+ "--sync for first time syncs."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--unmerge")+" ("+green("-C")+" short option)")
+ print(" "+turquoise("WARNING: This action can remove important packages!"))
+ print(" Removes all matching packages. This does no checking of")
+ print(" dependencies, so it may remove packages necessary for the proper")
+ print(" operation of your system. Its arguments can be atoms or")
+ print(" ebuilds. For a dependency aware version of --unmerge, use")
+ print(" --depclean or --prune.")
+ print()
+ print(" "+green("--version")+" ("+green("-V")+" short option)")
+ print(" Displays the currently installed version of portage along with")
+ print(" other information useful for quick reference on a system. See")
+ print(" "+bold("emerge info")+" for more advanced information.")
+ print()
+ print(turquoise("Options:"))
+ print(" "+green("--accept-properties=ACCEPT_PROPERTIES"))
+ desc = "This option temporarily overrides the ACCEPT_PROPERTIES " + \
+ "variable. The ACCEPT_PROPERTIES variable is incremental, " + \
+ "which means that the specified setting is appended to the " + \
+ "existing value from your configuration. The special -* " + \
+ "token can be used to discard the existing configuration " + \
+ "value and start fresh. See the MASKED PACKAGES section " + \
+ "and make.conf(5) for more information about " + \
+ "ACCEPT_PROPERTIES. A typical usage example for this option " + \
+ "would be to use --accept-properties=-interactive to " + \
+ "temporarily mask interactive packages. With default " + \
+ "configuration, this would result in an effective " + \
+ "ACCEPT_PROPERTIES value of \"* -interactive\"."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--alphabetical"))
+ print(" When displaying USE and other flag output, combines the enabled")
+ print(" and disabled flags into a single list and sorts it alphabetically.")
+ print(" With this option, output such as USE=\"dar -bar -foo\" will instead")
+ print(" be displayed as USE=\"-bar dar -foo\"")
+ print()
+ print(" " + green("--ask") + \
+ " [ %s | %s ] (%s short option)" % \
+ (turquoise("y"), turquoise("n"), green("-a")))
+ desc = "Before performing the action, display what will take place (server info for " + \
+ "--sync, --pretend output for merge, and so forth), then ask " + \
+ "whether to proceed with the action or abort. Using --ask is more " + \
+ "efficient than using --pretend and then executing the same command " + \
+ "without --pretend, as dependencies will only need to be calculated once. " + \
+ "WARNING: If the \"Enter\" key is pressed at the prompt (with no other input), " + \
+ "it is interpreted as acceptance of the first choice. Note that the input " + \
+ "buffer is not cleared prior to the prompt, so an accidental press of the " + \
+ "\"Enter\" key at any time prior to the prompt will be interpreted as a choice! " + \
+ "Use the --ask-enter-invalid option if you want a single \"Enter\" key " + \
+ "press to be interpreted as invalid input."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--ask-enter-invalid"))
+ desc = "When used together with the --ask option, " + \
+ "interpret a single \"Enter\" key press as " + \
+ "invalid input. This helps prevent accidental " + \
+ "acceptance of the first choice. This option is " + \
+ "intended to be set in the make.conf(5) " + \
+ "EMERGE_DEFAULT_OPTS variable."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--autounmask") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Automatically unmask packages and generate package.use " + \
+ "settings as necessary to satisfy dependencies. This " + \
+ "option is enabled by default. If any configuration " + \
+ "changes are required, then they will be displayed " + \
+ "after the merge list and emerge will immediately " + \
+ "abort. If the displayed configuration changes are " + \
+ "satisfactory, you should copy and paste them into " + \
+ "the specified configuration file(s), or enable the " + \
+ "--autounmask-write option. The " + \
+ "EMERGE_DEFAULT_OPTS variable may be used to " + \
+ "disable this option by default in make.conf(5)."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--autounmask-write") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "If --autounmask is enabled, changes are written " + \
+ "to config files, respecting CONFIG_PROTECT and --ask."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--backtrack") + " " + turquoise("COUNT"))
+ desc = "Specifies an integer number of times to backtrack if " + \
+ "dependency calculation fails due to a conflict or an " + \
+ "unsatisfied dependency (default: '10')."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--binpkg-respect-use") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Tells emerge to ignore binary packages if their use flags" + \
+ " don't match the current configuration. (default: 'n')"
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--buildpkg") + \
+ " [ %s | %s ] (%s short option)" % \
+ (turquoise("y"), turquoise("n"), green("-b")))
+ desc = "Tells emerge to build binary packages for all ebuilds processed in" + \
+ " addition to actually merging the packages. Useful for maintainers" + \
+ " or if you administrate multiple Gentoo Linux systems (build once," + \
+ " emerge tbz2s everywhere) as well as disaster recovery. The package" + \
+ " will be created in the" + \
+ " ${PKGDIR}/All directory. An alternative for already-merged" + \
+ " packages is to use quickpkg(1) which creates a tbz2 from the" + \
+ " live filesystem."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--buildpkgonly")+" ("+green("-B")+" short option)")
+ print(" Creates a binary package, but does not merge it to the")
+ print(" system. This has the restriction that unsatisfied dependencies")
+ print(" must not exist for the desired package as they cannot be used if")
+ print(" they do not exist on the system.")
+ print()
+ print(" " + green("--changed-use"))
+ desc = "This is an alias for --reinstall=changed-use."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--changelog")+" ("+green("-l")+" short option)")
+ print(" When pretending, also display the ChangeLog entries for packages")
+ print(" that will be upgraded.")
+ print()
+ print(" "+green("--color") + " < " + turquoise("y") + " | "+ turquoise("n")+" >")
+ print(" Enable or disable color output. This option will override NOCOLOR")
+ print(" (see make.conf(5)) and may also be used to force color output when")
+ print(" stdout is not a tty (by default, color is disabled unless stdout")
+ print(" is a tty).")
+ print()
+ print(" "+green("--columns"))
+ print(" Display the pretend output in a tabular form. Versions are")
+ print(" aligned vertically.")
+ print()
+ print(" "+green("--complete-graph") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "This causes emerge to consider the deep dependencies of all" + \
+ " packages from the world set. With this option enabled," + \
+ " emerge will bail out if it determines that the given operation will" + \
+ " break any dependencies of the packages that have been added to the" + \
+ " graph. Like the --deep option, the --complete-graph" + \
+ " option will significantly increase the time taken for dependency" + \
+ " calculations. Note that, unlike the --deep option, the" + \
+ " --complete-graph option does not cause any more packages to" + \
+ " be updated than would have otherwise " + \
+ "been updated with the option disabled. " + \
+ "Using --with-bdeps=y together with --complete-graph makes " + \
+ "the graph as complete as possible."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--config-root=DIR"))
+ desc = "Set the PORTAGE_CONFIGROOT environment variable " + \
+ "which is documented in the emerge(1) man page."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--debug")+" ("+green("-d")+" short option)")
+ print(" Tell emerge to run the ebuild command in --debug mode. In this")
+ print(" mode, the bash build environment will run with the -x option,")
+ print(" causing it to output verbose debug information print to stdout.")
+ print(" --debug is great for finding bash syntax errors as providing")
+ print(" very verbose information about the dependency and build process.")
+ print()
+ print(" "+green("--deep") + " " + turquoise("[DEPTH]") + \
+ " (" + green("-D") + " short option)")
+ print(" This flag forces emerge to consider the entire dependency tree of")
+ print(" packages, instead of checking only the immediate dependencies of")
+ print(" the packages. As an example, this catches updates in libraries")
+ print(" that are not directly listed in the dependencies of a package.")
+ print(" Also see --with-bdeps for behavior with respect to build time")
+ print(" dependencies that are not strictly required.")
+ print()
+
+ if _ENABLE_DYN_LINK_MAP:
+ print(" " + green("--depclean-lib-check") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Account for library link-level dependencies during " + \
+ "--depclean and --prune actions. This " + \
+ "option is enabled by default. In some cases this can " + \
+ "be somewhat time-consuming. This option is ignored " + \
+ "when FEATURES=\"preserve-libs\" is enabled in " + \
+ "make.conf(5), since any libraries that have " + \
+ "consumers will simply be preserved."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+
+ print(" "+green("--emptytree")+" ("+green("-e")+" short option)")
+ desc = "Reinstalls target atoms and their entire deep " + \
+ "dependency tree, as though no packages are currently " + \
+ "installed. You should run this with --pretend " + \
+ "first to make sure the result is what you expect."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--exclude") + " " + turquoise("ATOMS"))
+ desc = "A space separated list of package names or slot atoms. " + \
+ "Emerge won't install any ebuild or binary package that " + \
+ "matches any of the given package atoms."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--fail-clean") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Clean up temporary files after a build failure. This is " + \
+ "particularly useful if you have PORTAGE_TMPDIR on " + \
+ "tmpfs. If this option is enabled, you probably also want " + \
+ "to enable PORT_LOGDIR (see make.conf(5)) in " + \
+ "order to save the build log."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--fetchonly")+" ("+green("-f")+" short option)")
+ print(" Instead of doing any package building, just perform fetches for")
+ print(" all packages (main package as well as all dependencies.) When")
+ print(" used in combination with --pretend all the SRC_URIs will be")
+ print(" displayed multiple mirrors per line, one line per file.")
+ print()
+ print(" "+green("--fetch-all-uri")+" ("+green("-F")+" short option)")
+ print(" Same as --fetchonly except that all package files, including those")
+ print(" not required to build the package, will be processed.")
+ print()
+ print(" " + green("--getbinpkg") + \
+ " [ %s | %s ] (%s short option)" % \
+ (turquoise("y"), turquoise("n"), green("-g")))
+ print(" Using the server and location defined in PORTAGE_BINHOST, portage")
+ print(" will download the information from each binary file there and it")
+ print(" will use that information to help build the dependency list. This")
+ print(" option implies '-k'. (Use -gK for binary-only merging.)")
+ print()
+ print(" " + green("--getbinpkgonly") + \
+ " [ %s | %s ] (%s short option)" % \
+ (turquoise("y"), turquoise("n"), green("-G")))
+ print(" This option is identical to -g, as above, except it will not use")
+ print(" ANY information from the local machine. All binaries will be")
+ print(" downloaded from the remote server without consulting packages")
+ print(" existing in the packages directory.")
+ print()
+ print(" " + green("--jobs") + " " + turquoise("[JOBS]") + " ("+green("-j")+" short option)")
+ desc = "Specifies the number of packages " + \
+ "to build simultaneously. If this option is " + \
+ "given without an argument, emerge will not " + \
+ "limit the number of jobs that " + \
+ "can run simultaneously. Also see " + \
+ "the related --load-average option. " + \
+ "Note that interactive packages currently force a setting " + \
+ "of --jobs=1. This issue can be temporarily avoided " + \
+ "by specifying --accept-properties=-interactive."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--keep-going") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Continue as much as possible after " + \
+ "an error. When an error occurs, " + \
+ "dependencies are recalculated for " + \
+ "remaining packages and any with " + \
+ "unsatisfied dependencies are " + \
+ "automatically dropped. Also see " + \
+ "the related --skipfirst option."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--load-average") + " " + turquoise("LOAD"))
+ desc = "Specifies that no new builds should " + \
+ "be started if there are other builds " + \
+ "running and the load average is at " + \
+ "least LOAD (a floating-point number). " + \
+ "This option is recommended for use " + \
+ "in combination with --jobs in " + \
+ "order to avoid excess load. See " + \
+ "make(1) for information about " + \
+ "analogous options that should be " + \
+ "configured via MAKEOPTS in " + \
+ "make.conf(5)."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--misspell-suggestions") + " < %s | %s >" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Enable or disable misspell suggestions. By default, " + \
+ "emerge will show a list of packages with similar names " + \
+ "when a package doesn't exist. The EMERGE_DEFAULT_OPTS " + \
+ "variable may be used to disable this option by default"
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--newuse")+" ("+green("-N")+" short option)")
+ desc = "Tells emerge to include installed packages where USE " + \
+ "flags have changed since compilation. This option " + \
+ "also implies the --selective option. If you would " + \
+ "like to skip rebuilds for which disabled flags have " + \
+ "been added to or removed from IUSE, see the related " + \
+ "--reinstall=changed-use option."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--noconfmem"))
+ print(" Portage keeps track of files that have been placed into")
+ print(" CONFIG_PROTECT directories, and normally it will not merge the")
+ print(" same file more than once, as that would become annoying. This")
+ print(" can lead to problems when the user wants the file in the case")
+ print(" of accidental deletion. With this option, files will always be")
+ print(" merged to the live fs instead of silently dropped.")
+ print()
+ print(" "+green("--nodeps")+" ("+green("-O")+" short option)")
+ print(" Merge specified packages, but don't merge any dependencies.")
+ print(" Note that the build may fail if deps aren't satisfied.")
+ print()
+ print(" "+green("--noreplace")+" ("+green("-n")+" short option)")
+ print(" Skip the packages specified on the command-line that have")
+ print(" already been installed. Without this option, any packages,")
+ print(" ebuilds, or deps you specify on the command-line *will* cause")
+ print(" Portage to remerge the package, even if it is already installed.")
+ print(" Note that Portage won't remerge dependencies by default.")
+ print()
+ print(" "+green("--nospinner"))
+ print(" Disables the spinner regardless of terminal type.")
+ print()
+ print(" " + green("--usepkg-exclude") + " " + turquoise("ATOMS"))
+ desc = "A space separated list of package names or slot atoms." + \
+ " Emerge will ignore matching binary packages."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--rebuild-exclude") + " " + turquoise("ATOMS"))
+ desc = "A space separated list of package names or slot atoms." + \
+ " Emerge will not rebuild matching packages due to --rebuild."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--rebuild-ignore") + " " + turquoise("ATOMS"))
+ desc = "A space separated list of package names or slot atoms." + \
+ " Emerge will not rebuild packages that depend on matching " + \
+ " packages due to --rebuild."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--oneshot")+" ("+green("-1")+" short option)")
+ print(" Emerge as normal, but don't add packages to the world profile.")
+ print(" This package will only be updated if it is depended upon by")
+ print(" another package.")
+ print()
+ print(" "+green("--onlydeps")+" ("+green("-o")+" short option)")
+ print(" Only merge (or pretend to merge) the dependencies of the")
+ print(" specified packages, not the packages themselves.")
+ print()
+ print(" " + green("--package-moves") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Perform package moves when necessary. This option " + \
+ "is enabled by default. WARNING: This option " + \
+ "should remain enabled under normal circumstances. " + \
+ "Do not disable it unless you know what you are " + \
+ "doing."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--pretend")+" ("+green("-p")+" short option)")
+ print(" Instead of actually performing the merge, simply display what")
+ print(" ebuilds and tbz2s *would* have been installed if --pretend")
+ print(" weren't used. Using --pretend is strongly recommended before")
+ print(" installing an unfamiliar package. In the printout, N = new,")
+ print(" U = updating, R = replacing, F = fetch restricted, B = blocked")
+ print(" by an already installed package, D = possible downgrading,")
+ print(" S = slotted install. --verbose causes affecting use flags to be")
+ print(" printed out accompanied by a '+' for enabled and a '-' for")
+ print(" disabled USE flags.")
+ print()
+ print(" " + green("--quiet") + \
+ " [ %s | %s ] (%s short option)" % \
+ (turquoise("y"), turquoise("n"), green("-q")))
+ print(" Effects vary, but the general outcome is a reduced or condensed")
+ print(" output from portage's displays.")
+ print()
+ print(" " + green("--quiet-build") + \
+ " [ %s | %s ]" % (turquoise("y"), turquoise("n")))
+ desc = "Redirect all build output to logs alone, and do not " + \
+ "display it on stdout."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--quiet-unmerge-warn"))
+ desc = "Disable the warning message that's shown prior to " + \
+ "--unmerge actions. This option is intended " + \
+ "to be set in the make.conf(5) " + \
+ "EMERGE_DEFAULT_OPTS variable."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--rebuild-if-new-rev") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built, " + \
+ "if the dependency is not already installed with the " + \
+ "same version and revision."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--rebuild-if-new-ver") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built, " + \
+ "if the dependency is not already installed with the " + \
+ "same version. Revision numbers are ignored."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--rebuild-if-unbuilt") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--rebuilt-binaries") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Replace installed packages with binary packages that have " + \
+ "been rebuilt. Rebuilds are detected by comparison of " + \
+ "BUILD_TIME package metadata. This option is enabled " + \
+ "automatically when using binary packages " + \
+ "(--usepkgonly or --getbinpkgonly) together with " + \
+ "--update and --deep."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--rebuilt-binaries-timestamp") + "=%s" % turquoise("TIMESTAMP"))
+ desc = "This option modifies emerge's behaviour only if " + \
+ "--rebuilt-binaries is given. Only binaries that " + \
+ "have a BUILD_TIME that is larger than the given TIMESTAMP " + \
+ "and that is larger than that of the installed package will " + \
+ "be considered by the rebuilt-binaries logic."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--reinstall ") + turquoise("changed-use"))
+ print(" Tells emerge to include installed packages where USE flags have")
+ print(" changed since installation. Unlike --newuse, this option does")
+ print(" not trigger reinstallation when flags that the user has not")
+ print(" enabled are added or removed.")
+ print()
+ print(" " + green("--reinstall-atoms") + " " + turquoise("ATOMS"))
+ desc = "A space separated list of package names or slot atoms. " + \
+ "Emerge will treat matching packages as if they are not " + \
+ "installed, and reinstall them if necessary."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--root=DIR"))
+ desc = "Set the ROOT environment variable " + \
+ "which is documented in the emerge(1) man page."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--root-deps[=rdeps]"))
+ desc = "If no argument is given then build-time dependencies of packages for " + \
+ "ROOT are installed to " + \
+ "ROOT instead of /. If the rdeps argument is given then discard " + \
+ "all build-time dependencies of packages for ROOT. This option is " + \
+ "only meaningful when used together with ROOT and it should not " + \
+ "be enabled under normal circumstances. For currently supported " + \
+ "EAPI values, the build-time dependencies are specified in the " + \
+ "DEPEND variable. However, behavior may change for new " + \
+ "EAPIs when related extensions are added in the future."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--select") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Add specified packages to the world set (inverse of " + \
+ "--oneshot). This is useful if you want to " + \
+ "use EMERGE_DEFAULT_OPTS to make " + \
+ "--oneshot behavior default."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--selective") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "This identical to the --noreplace option. " + \
+ "Some options, such as --update, imply --selective. " + \
+ "Use --selective=n if you want to forcefully disable " + \
+ "--selective, regardless of options like --update."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--skipfirst"))
+ desc = "This option is only valid when " + \
+ "used with --resume. It removes the " + \
+ "first package in the resume list. " + \
+ "Dependencies are recalculated for " + \
+ "remaining packages and any that " + \
+ "have unsatisfied dependencies or are " + \
+ "masked will be automatically dropped. " + \
+ "Also see the related " + \
+ "--keep-going option."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--tree")+" ("+green("-t")+" short option)")
+ print(" Shows the dependency tree using indentation for dependencies.")
+ print(" The packages are also listed in reverse merge order so that")
+ print(" a package's dependencies follow the package. Only really useful")
+ print(" in combination with --emptytree, --update or --deep.")
+ print()
+ print(" " + green("--unordered-display"))
+ desc = "By default the displayed merge list is sorted using the " + \
+ "order in which the packages will be merged. When " + \
+ "--tree is used together with this option, this " + \
+ "constraint is removed, hopefully leading to a more " + \
+ "readable dependency tree."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" "+green("--update")+" ("+green("-u")+" short option)")
+ desc = "Updates packages to the best version available, which may " + \
+ "not always be the highest version number due to masking " + \
+ "for testing and development. Package atoms specified on " + \
+ "the command line are greedy, meaning that unspecific " + \
+ "atoms may match multiple versions of slotted packages."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--use-ebuild-visibility") + " [ %s | %s ]" % \
+ (turquoise("y"), turquoise("n")))
+ desc = "Use unbuilt ebuild metadata for visibility " + \
+ "checks on built packages."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--useoldpkg-atoms") + " " + turquoise("ATOMS"))
+ desc = "A space separated list of package names or slot atoms." + \
+ " Emerge will prefer matching binary packages over newer" + \
+ " unbuilt packages."
+ for line in wrap(desc, desc_width):
+ print(desc_indent + line)
+ print()
+ print(" " + green("--usepkg") + \
+ " [ %s | %s ] (%s short option)" % \
+ (turquoise("y"), turquoise("n"), green("-k")))
+ print(" Tell emerge to use binary packages (from $PKGDIR) if they are")
+ print(" available, thus possibly avoiding some time-consuming compiles.")
+ print(" This option is useful for CD installs; you can export")
+ print(" PKGDIR=/mnt/cdrom/packages and then use this option to have")
+ print(" emerge \"pull\" binary packages from the CD in order to satisfy")
+ print(" dependencies.")
+ print()
+ print(" " + green("--usepkgonly") + \
+ " [ %s | %s ] (%s short option)" % \
+ (turquoise("y"), turquoise("n"), green("-K")))
+ print(" Like --usepkg above, except this only allows the use of binary")
+ print(" packages, and it will abort the emerge if the package is not")
+ print(" available at the time of dependency calculation.")
+ print()
+ print(" "+green("--verbose")+" ("+green("-v")+" short option)")
+ print(" Effects vary, but the general outcome is an increased or expanded")
+ print(" display of content in portage's displays.")
+ print()
+ print(" "+green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" >")
+ print(" In dependency calculations, pull in build time dependencies that")
+ print(" are not strictly required. This defaults to 'n' for installation")
+ print(" actions and 'y' for the --depclean action. This setting can be")
+ print(" added to EMERGE_DEFAULT_OPTS (see make.conf(5)) and later")
+ print(" overridden via the command line.")
+ print()
diff --git a/portage_with_autodep/pym/_emerge/is_valid_package_atom.py b/portage_with_autodep/pym/_emerge/is_valid_package_atom.py
new file mode 100644
index 0000000..7cb2a5b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/is_valid_package_atom.py
@@ -0,0 +1,21 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+from portage.dep import isvalidatom
+
+def insert_category_into_atom(atom, category):
+ alphanum = re.search(r'\w', atom)
+ if alphanum:
+ ret = atom[:alphanum.start()] + "%s/" % category + \
+ atom[alphanum.start():]
+ else:
+ ret = None
+ return ret
+
+def is_valid_package_atom(x, allow_repo=False):
+ if "/" not in x:
+ x2 = insert_category_into_atom(x, 'cat')
+ if x2 != None:
+ x = x2
+ return isvalidatom(x, allow_blockers=False, allow_repo=allow_repo)
diff --git a/portage_with_autodep/pym/_emerge/main.py b/portage_with_autodep/pym/_emerge/main.py
new file mode 100644
index 0000000..2830214
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/main.py
@@ -0,0 +1,1910 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import signal
+import stat
+import sys
+import textwrap
+import platform
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+import _emerge.help
+import portage.xpak, errno, re, time
+from portage.output import colorize, xtermTitle, xtermTitleReset
+from portage.output import create_color_func
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+
+from portage.const import _ENABLE_DYN_LINK_MAP
+import portage.elog
+import portage.util
+import portage.locks
+import portage.exception
+from portage.data import secpass
+from portage.dbapi.dep_expand import dep_expand
+from portage.util import normalize_path as normpath
+from portage.util import shlex_split, writemsg_level, writemsg_stdout
+from portage._sets import SETPREFIX
+from portage._global_updates import _global_updates
+
+from _emerge.actions import action_config, action_sync, action_metadata, \
+ action_regen, action_search, action_uninstall, action_info, action_build, \
+ adjust_configs, chk_updated_cfg_files, display_missing_pkg_set, \
+ display_news_notification, getportageversion, load_emerge_config
+import _emerge
+from _emerge.emergelog import emergelog
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
+from _emerge.is_valid_package_atom import is_valid_package_atom
+from _emerge.stdout_spinner import stdout_spinner
+from _emerge.userquery import userquery
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+options=[
+"--alphabetical",
+"--ask-enter-invalid",
+"--buildpkgonly",
+"--changed-use",
+"--changelog", "--columns",
+"--debug",
+"--digest",
+"--emptytree",
+"--fetchonly", "--fetch-all-uri",
+"--ignore-default-opts",
+"--noconfmem",
+"--newuse",
+"--nodeps", "--noreplace",
+"--nospinner", "--oneshot",
+"--onlydeps", "--pretend",
+"--quiet-unmerge-warn",
+"--resume",
+"--searchdesc",
+"--skipfirst",
+"--tree",
+"--unordered-display",
+"--update",
+"--verbose",
+]
+
+shortmapping={
+"1":"--oneshot",
+"B":"--buildpkgonly",
+"c":"--depclean",
+"C":"--unmerge",
+"d":"--debug",
+"e":"--emptytree",
+"f":"--fetchonly", "F":"--fetch-all-uri",
+"h":"--help",
+"l":"--changelog",
+"n":"--noreplace", "N":"--newuse",
+"o":"--onlydeps", "O":"--nodeps",
+"p":"--pretend", "P":"--prune",
+"r":"--resume",
+"s":"--search", "S":"--searchdesc",
+"t":"--tree",
+"u":"--update",
+"v":"--verbose", "V":"--version"
+}
+
+def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
+
+ if os.path.exists("/usr/bin/install-info"):
+ out = portage.output.EOutput()
+ regen_infodirs=[]
+ for z in infodirs:
+ if z=='':
+ continue
+ inforoot=normpath(root+z)
+ if os.path.isdir(inforoot) and \
+ not [x for x in os.listdir(inforoot) \
+ if x.startswith('.keepinfodir')]:
+ infomtime = os.stat(inforoot)[stat.ST_MTIME]
+ if inforoot not in prev_mtimes or \
+ prev_mtimes[inforoot] != infomtime:
+ regen_infodirs.append(inforoot)
+
+ if not regen_infodirs:
+ portage.writemsg_stdout("\n")
+ if portage.util.noiselimit >= 0:
+ out.einfo("GNU info directory index is up-to-date.")
+ else:
+ portage.writemsg_stdout("\n")
+ if portage.util.noiselimit >= 0:
+ out.einfo("Regenerating GNU info directory index...")
+
+ dir_extensions = ("", ".gz", ".bz2")
+ icount=0
+ badcount=0
+ errmsg = ""
+ for inforoot in regen_infodirs:
+ if inforoot=='':
+ continue
+
+ if not os.path.isdir(inforoot) or \
+ not os.access(inforoot, os.W_OK):
+ continue
+
+ file_list = os.listdir(inforoot)
+ file_list.sort()
+ dir_file = os.path.join(inforoot, "dir")
+ moved_old_dir = False
+ processed_count = 0
+ for x in file_list:
+ if x.startswith(".") or \
+ os.path.isdir(os.path.join(inforoot, x)):
+ continue
+ if x.startswith("dir"):
+ skip = False
+ for ext in dir_extensions:
+ if x == "dir" + ext or \
+ x == "dir" + ext + ".old":
+ skip = True
+ break
+ if skip:
+ continue
+ if processed_count == 0:
+ for ext in dir_extensions:
+ try:
+ os.rename(dir_file + ext, dir_file + ext + ".old")
+ moved_old_dir = True
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ processed_count += 1
+ myso = portage.subprocess_getstatusoutput(
+ "LANG=C LANGUAGE=C /usr/bin/install-info " +
+ "--dir-file=%s/dir %s/%s" % (inforoot, inforoot, x))[1]
+ existsstr="already exists, for file `"
+ if myso!="":
+ if re.search(existsstr,myso):
+ # Already exists... Don't increment the count for this.
+ pass
+ elif myso[:44]=="install-info: warning: no info dir entry in ":
+ # This info file doesn't contain a DIR-header: install-info produces this
+ # (harmless) warning (the --quiet switch doesn't seem to work).
+ # Don't increment the count for this.
+ pass
+ else:
+ badcount=badcount+1
+ errmsg += myso + "\n"
+ icount=icount+1
+
+ if moved_old_dir and not os.path.exists(dir_file):
+ # We didn't generate a new dir file, so put the old file
+ # back where it was originally found.
+ for ext in dir_extensions:
+ try:
+ os.rename(dir_file + ext + ".old", dir_file + ext)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ # Clean dir.old cruft so that they don't prevent
+ # unmerge of otherwise empty directories.
+ for ext in dir_extensions:
+ try:
+ os.unlink(dir_file + ext + ".old")
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ #update mtime so we can potentially avoid regenerating.
+ prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
+
+ if badcount:
+ out.eerror("Processed %d info files; %d errors." % \
+ (icount, badcount))
+ writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
+ else:
+ if icount > 0 and portage.util.noiselimit >= 0:
+ out.einfo("Processed %d info files." % (icount,))
+
+def display_preserved_libs(vardbapi, myopts):
+ MAX_DISPLAY = 3
+
+ if vardbapi._linkmap is None or \
+ vardbapi._plib_registry is None:
+ # preserve-libs is entirely disabled
+ return
+
+ # Explicitly load and prune the PreservedLibsRegistry in order
+ # to ensure that we do not display stale data.
+ vardbapi._plib_registry.load()
+
+ if vardbapi._plib_registry.hasEntries():
+ if "--quiet" in myopts:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs found")
+ return
+ else:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs:")
+
+ plibdata = vardbapi._plib_registry.getPreservedLibs()
+ linkmap = vardbapi._linkmap
+ consumer_map = {}
+ owners = {}
+ linkmap_broken = False
+
+ try:
+ linkmap.rebuild()
+ except portage.exception.CommandNotFound as e:
+ writemsg_level("!!! Command Not Found: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ del e
+ linkmap_broken = True
+ else:
+ search_for_owners = set()
+ for cpv in plibdata:
+ internal_plib_keys = set(linkmap._obj_key(f) \
+ for f in plibdata[cpv])
+ for f in plibdata[cpv]:
+ if f in consumer_map:
+ continue
+ consumers = []
+ for c in linkmap.findConsumers(f):
+ # Filter out any consumers that are also preserved libs
+ # belonging to the same package as the provider.
+ if linkmap._obj_key(c) not in internal_plib_keys:
+ consumers.append(c)
+ consumers.sort()
+ consumer_map[f] = consumers
+ search_for_owners.update(consumers[:MAX_DISPLAY+1])
+
+ owners = {}
+ for f in search_for_owners:
+ owner_set = set()
+ for owner in linkmap.getOwners(f):
+ owner_dblink = vardbapi._dblink(owner)
+ if owner_dblink.exists():
+ owner_set.add(owner_dblink)
+ if owner_set:
+ owners[f] = owner_set
+
+ for cpv in plibdata:
+ print(colorize("WARN", ">>>") + " package: %s" % cpv)
+ samefile_map = {}
+ for f in plibdata[cpv]:
+ obj_key = linkmap._obj_key(f)
+ alt_paths = samefile_map.get(obj_key)
+ if alt_paths is None:
+ alt_paths = set()
+ samefile_map[obj_key] = alt_paths
+ alt_paths.add(f)
+
+ for alt_paths in samefile_map.values():
+ alt_paths = sorted(alt_paths)
+ for p in alt_paths:
+ print(colorize("WARN", " * ") + " - %s" % (p,))
+ f = alt_paths[0]
+ consumers = consumer_map.get(f, [])
+ for c in consumers[:MAX_DISPLAY]:
+ print(colorize("WARN", " * ") + " used by %s (%s)" % \
+ (c, ", ".join(x.mycpv for x in owners.get(c, []))))
+ if len(consumers) == MAX_DISPLAY + 1:
+ print(colorize("WARN", " * ") + " used by %s (%s)" % \
+ (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
+ for x in owners.get(consumers[MAX_DISPLAY], []))))
+ elif len(consumers) > MAX_DISPLAY:
+ print(colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY))
+ print("Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries")
+
+def post_emerge(myaction, myopts, myfiles,
+ target_root, trees, mtimedb, retval):
+ """
+ Misc. things to run at the end of a merge session.
+
+ Update Info Files
+ Update Config Files
+ Update News Items
+ Commit mtimeDB
+ Display preserved libs warnings
+
+ @param myaction: The action returned from parse_opts()
+ @type myaction: String
+ @param myopts: emerge options
+ @type myopts: dict
+ @param myfiles: emerge arguments
+ @type myfiles: list
+ @param target_root: The target ROOT for myaction
+ @type target_root: String
+ @param trees: A dictionary mapping each ROOT to it's package databases
+ @type trees: dict
+ @param mtimedb: The mtimeDB to store data needed across merge invocations
+ @type mtimedb: MtimeDB class instance
+ @param retval: Emerge's return value
+ @type retval: Int
+ """
+
+ root_config = trees[target_root]["root_config"]
+ vardbapi = trees[target_root]["vartree"].dbapi
+ settings = vardbapi.settings
+ info_mtimes = mtimedb["info"]
+
+ # Load the most current variables from ${ROOT}/etc/profile.env
+ settings.unlock()
+ settings.reload()
+ settings.regenerate()
+ settings.lock()
+
+ config_protect = shlex_split(settings.get("CONFIG_PROTECT", ""))
+ infodirs = settings.get("INFOPATH","").split(":") + \
+ settings.get("INFODIR","").split(":")
+
+ os.chdir("/")
+
+ if retval == os.EX_OK:
+ exit_msg = " *** exiting successfully."
+ else:
+ exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
+ emergelog("notitles" not in settings.features, exit_msg)
+
+ _flush_elog_mod_echo()
+
+ if not vardbapi._pkgs_changed:
+ display_news_notification(root_config, myopts)
+ # If vdb state has not changed then there's nothing else to do.
+ return
+
+ vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
+ portage.util.ensure_dirs(vdb_path)
+ vdb_lock = None
+ if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
+ vardbapi.lock()
+ vdb_lock = True
+
+ if vdb_lock:
+ try:
+ if "noinfo" not in settings.features:
+ chk_updated_info_files(target_root,
+ infodirs, info_mtimes, retval)
+ mtimedb.commit()
+ finally:
+ if vdb_lock:
+ vardbapi.unlock()
+
+ chk_updated_cfg_files(settings['EROOT'], config_protect)
+
+ display_news_notification(root_config, myopts)
+ if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
+ display_preserved_libs(vardbapi, myopts)
+
+ postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
+ portage.USER_CONFIG_PATH, "bin", "post_emerge")
+ if os.access(postemerge, os.X_OK):
+ hook_retval = portage.process.spawn(
+ [postemerge], env=settings.environ())
+ if hook_retval != os.EX_OK:
+ writemsg_level(
+ " %s spawn failed of %s\n" % (bad("*"), postemerge,),
+ level=logging.ERROR, noiselevel=-1)
+
+ if "--quiet" not in myopts and \
+ myaction is None and "@world" in myfiles:
+ show_depclean_suggestion()
+
+def show_depclean_suggestion():
+ out = portage.output.EOutput()
+ msg = "After world updates, it is important to remove " + \
+ "obsolete packages with emerge --depclean. Refer " + \
+ "to `man emerge` for more information."
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+def multiple_actions(action1, action2):
+ sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
+ sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
+ sys.exit(1)
+
+def insert_optional_args(args):
+ """
+ Parse optional arguments and insert a value if one has
+ not been provided. This is done before feeding the args
+ to the optparse parser since that parser does not support
+ this feature natively.
+ """
+
+ class valid_integers(object):
+ def __contains__(self, s):
+ try:
+ return int(s) >= 0
+ except (ValueError, OverflowError):
+ return False
+
+ valid_integers = valid_integers()
+ y_or_n = ('y', 'n',)
+
+ new_args = []
+
+ default_arg_opts = {
+ '--ask' : y_or_n,
+ '--autounmask' : y_or_n,
+ '--autounmask-write' : y_or_n,
+ '--buildpkg' : y_or_n,
+ '--complete-graph' : y_or_n,
+ '--deep' : valid_integers,
+ '--deselect' : y_or_n,
+ '--binpkg-respect-use' : y_or_n,
+ '--fail-clean' : y_or_n,
+ '--getbinpkg' : y_or_n,
+ '--getbinpkgonly' : y_or_n,
+ '--jobs' : valid_integers,
+ '--keep-going' : y_or_n,
+ '--package-moves' : y_or_n,
+ '--quiet' : y_or_n,
+ '--quiet-build' : y_or_n,
+ '--rebuild-if-new-rev' : y_or_n,
+ '--rebuild-if-new-ver' : y_or_n,
+ '--rebuild-if-unbuilt' : y_or_n,
+ '--rebuilt-binaries' : y_or_n,
+ '--root-deps' : ('rdeps',),
+ '--select' : y_or_n,
+ '--selective' : y_or_n,
+ "--use-ebuild-visibility": y_or_n,
+ '--usepkg' : y_or_n,
+ '--usepkgonly' : y_or_n,
+ }
+
+ if _ENABLE_DYN_LINK_MAP:
+ default_arg_opts['--depclean-lib-check'] = y_or_n
+
+ short_arg_opts = {
+ 'D' : valid_integers,
+ 'j' : valid_integers,
+ }
+
+ # Don't make things like "-kn" expand to "-k n"
+ # since existence of -n makes it too ambiguous.
+ short_arg_opts_n = {
+ 'a' : y_or_n,
+ 'b' : y_or_n,
+ 'g' : y_or_n,
+ 'G' : y_or_n,
+ 'k' : y_or_n,
+ 'K' : y_or_n,
+ 'q' : y_or_n,
+ }
+
+ arg_stack = args[:]
+ arg_stack.reverse()
+ while arg_stack:
+ arg = arg_stack.pop()
+
+ default_arg_choices = default_arg_opts.get(arg)
+ if default_arg_choices is not None:
+ new_args.append(arg)
+ if arg_stack and arg_stack[-1] in default_arg_choices:
+ new_args.append(arg_stack.pop())
+ else:
+ # insert default argument
+ new_args.append('True')
+ continue
+
+ if arg[:1] != "-" or arg[:2] == "--":
+ new_args.append(arg)
+ continue
+
+ match = None
+ for k, arg_choices in short_arg_opts.items():
+ if k in arg:
+ match = k
+ break
+
+ if match is None:
+ for k, arg_choices in short_arg_opts_n.items():
+ if k in arg:
+ match = k
+ break
+
+ if match is None:
+ new_args.append(arg)
+ continue
+
+ if len(arg) == 2:
+ new_args.append(arg)
+ if arg_stack and arg_stack[-1] in arg_choices:
+ new_args.append(arg_stack.pop())
+ else:
+ # insert default argument
+ new_args.append('True')
+ continue
+
+ # Insert an empty placeholder in order to
+ # satisfy the requirements of optparse.
+
+ new_args.append("-" + match)
+ opt_arg = None
+ saved_opts = None
+
+ if arg[1:2] == match:
+ if match not in short_arg_opts_n and arg[2:] in arg_choices:
+ opt_arg = arg[2:]
+ else:
+ saved_opts = arg[2:]
+ opt_arg = "True"
+ else:
+ saved_opts = arg[1:].replace(match, "")
+ opt_arg = "True"
+
+ if opt_arg is None and arg_stack and \
+ arg_stack[-1] in arg_choices:
+ opt_arg = arg_stack.pop()
+
+ if opt_arg is None:
+ new_args.append("True")
+ else:
+ new_args.append(opt_arg)
+
+ if saved_opts is not None:
+ # Recycle these on arg_stack since they
+ # might contain another match.
+ arg_stack.append("-" + saved_opts)
+
+ return new_args
+
+def _find_bad_atoms(atoms):
+ bad_atoms = []
+ for x in ' '.join(atoms).split():
+ bad_atom = False
+ try:
+ atom = portage.dep.Atom(x, allow_wildcard=True)
+ except portage.exception.InvalidAtom:
+ try:
+ atom = portage.dep.Atom("*/"+x, allow_wildcard=True)
+ except portage.exception.InvalidAtom:
+ bad_atom = True
+
+ if bad_atom or atom.operator or atom.blocker or atom.use:
+ bad_atoms.append(x)
+ return bad_atoms
+
+
+def parse_opts(tmpcmdline, silent=False):
+ myaction=None
+ myopts = {}
+ myfiles=[]
+
+ global options, shortmapping
+
+ actions = frozenset([
+ "clean", "config", "depclean", "help",
+ "info", "list-sets", "metadata",
+ "prune", "regen", "search",
+ "sync", "unmerge", "version",
+ ])
+
+ longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
+ true_y_or_n = ("True", "y", "n")
+ true_y = ("True", "y")
+ argument_options = {
+
+ "--ask": {
+ "shortopt" : "-a",
+ "help" : "prompt before performing any actions",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask": {
+ "help" : "automatically unmask packages",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-write": {
+ "help" : "write changes made by --autounmask to disk",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--accept-properties": {
+ "help":"temporarily override ACCEPT_PROPERTIES",
+ "action":"store"
+ },
+
+ "--backtrack": {
+
+ "help" : "Specifies how many times to backtrack if dependency " + \
+ "calculation fails ",
+
+ "action" : "store"
+ },
+
+ "--buildpkg": {
+ "shortopt" : "-b",
+ "help" : "build binary packages",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--config-root": {
+ "help":"specify the location for portage configuration files",
+ "action":"store"
+ },
+ "--color": {
+ "help":"enable or disable color output",
+ "type":"choice",
+ "choices":("y", "n")
+ },
+
+ "--complete-graph": {
+ "help" : "completely account for all known dependencies",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--deep": {
+
+ "shortopt" : "-D",
+
+ "help" : "Specifies how deep to recurse into dependencies " + \
+ "of packages given as arguments. If no argument is given, " + \
+ "depth is unlimited. Default behavior is to skip " + \
+ "dependencies of installed packages.",
+
+ "action" : "store"
+ },
+
+ "--deselect": {
+ "help" : "remove atoms/sets from the world file",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge won't install any ebuild or binary package that " + \
+ "matches any of the given package atoms.",
+
+ "action" : "append"
+ },
+
+ "--fail-clean": {
+ "help" : "clean temp files after build failure",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--jobs": {
+
+ "shortopt" : "-j",
+
+ "help" : "Specifies the number of packages to build " + \
+ "simultaneously.",
+
+ "action" : "store"
+ },
+
+ "--keep-going": {
+ "help" : "continue as much as possible after an error",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--load-average": {
+
+ "help" :"Specifies that no new builds should be started " + \
+ "if there are other builds running and the load average " + \
+ "is at least LOAD (a floating-point number).",
+
+ "action" : "store"
+ },
+
+ "--misspell-suggestions": {
+ "help" : "enable package name misspell suggestions",
+ "type" : "choice",
+ "choices" : ("y", "n")
+ },
+
+ "--with-bdeps": {
+ "help":"include unnecessary build time dependencies",
+ "type":"choice",
+ "choices":("y", "n")
+ },
+ "--reinstall": {
+ "help":"specify conditions to trigger package reinstallation",
+ "type":"choice",
+ "choices":["changed-use"]
+ },
+
+ "--reinstall-atoms": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will treat matching packages as if they are not " + \
+ "installed, and reinstall them if necessary. Implies --deep.",
+
+ "action" : "append",
+ },
+
+ "--binpkg-respect-use": {
+ "help" : "discard binary packages if their use flags \
+ don't match the current configuration",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--getbinpkg": {
+ "shortopt" : "-g",
+ "help" : "fetch binary packages",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--getbinpkgonly": {
+ "shortopt" : "-G",
+ "help" : "fetch binary packages only",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--usepkg-exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will ignore matching binary packages. ",
+
+ "action" : "append",
+ },
+
+ "--rebuild-exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will not rebuild these packages due to the " + \
+ "--rebuild flag. ",
+
+ "action" : "append",
+ },
+
+ "--rebuild-ignore": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will not rebuild packages that depend on matching " + \
+ "packages due to the --rebuild flag. ",
+
+ "action" : "append",
+ },
+
+ "--package-moves": {
+ "help" : "perform package moves when necessary",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--quiet": {
+ "shortopt" : "-q",
+ "help" : "reduced or condensed output",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--quiet-build": {
+ "help" : "redirect build output to logs",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-new-rev": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built, " + \
+ "if the dependency is not already installed with the " + \
+ "same version and revision.",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-new-ver": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built, " + \
+ "if the dependency is not already installed with the " + \
+ "same version. Revision numbers are ignored.",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-unbuilt": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built.",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuilt-binaries": {
+ "help" : "replace installed packages with binary " + \
+ "packages that have been rebuilt",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuilt-binaries-timestamp": {
+ "help" : "use only binaries that are newer than this " + \
+ "timestamp for --rebuilt-binaries",
+ "action" : "store"
+ },
+
+ "--root": {
+ "help" : "specify the target root filesystem for merging packages",
+ "action" : "store"
+ },
+
+ "--root-deps": {
+ "help" : "modify interpretation of depedencies",
+ "type" : "choice",
+ "choices" :("True", "rdeps")
+ },
+
+ "--select": {
+ "help" : "add specified packages to the world set " + \
+ "(inverse of --oneshot)",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--selective": {
+ "help" : "identical to --noreplace",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--use-ebuild-visibility": {
+ "help" : "use unbuilt ebuild metadata for visibility checks on built packages",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--useoldpkg-atoms": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will prefer matching binary packages over newer unbuilt packages. ",
+
+ "action" : "append",
+ },
+
+ "--usepkg": {
+ "shortopt" : "-k",
+ "help" : "use binary packages",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--usepkgonly": {
+ "shortopt" : "-K",
+ "help" : "use only binary packages",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ }
+
+ if _ENABLE_DYN_LINK_MAP:
+ argument_options["--depclean-lib-check"] = {
+ "help" : "check for consumers of libraries before removing them",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ }
+
+ from optparse import OptionParser
+ parser = OptionParser()
+ if parser.has_option("--help"):
+ parser.remove_option("--help")
+
+ for action_opt in actions:
+ parser.add_option("--" + action_opt, action="store_true",
+ dest=action_opt.replace("-", "_"), default=False)
+ for myopt in options:
+ parser.add_option(myopt, action="store_true",
+ dest=myopt.lstrip("--").replace("-", "_"), default=False)
+ for shortopt, longopt in shortmapping.items():
+ parser.add_option("-" + shortopt, action="store_true",
+ dest=longopt.lstrip("--").replace("-", "_"), default=False)
+ for myalias, myopt in longopt_aliases.items():
+ parser.add_option(myalias, action="store_true",
+ dest=myopt.lstrip("--").replace("-", "_"), default=False)
+
+ for myopt, kwargs in argument_options.items():
+ shortopt = kwargs.pop("shortopt", None)
+ args = [myopt]
+ if shortopt is not None:
+ args.append(shortopt)
+ parser.add_option(dest=myopt.lstrip("--").replace("-", "_"),
+ *args, **kwargs)
+
+ tmpcmdline = insert_optional_args(tmpcmdline)
+
+ myoptions, myargs = parser.parse_args(args=tmpcmdline)
+
+ if myoptions.ask in true_y:
+ myoptions.ask = True
+ else:
+ myoptions.ask = None
+
+ if myoptions.autounmask in true_y:
+ myoptions.autounmask = True
+
+ if myoptions.autounmask_write in true_y:
+ myoptions.autounmask_write = True
+
+ if myoptions.buildpkg in true_y:
+ myoptions.buildpkg = True
+ else:
+ myoptions.buildpkg = None
+
+ if myoptions.changed_use is not False:
+ myoptions.reinstall = "changed-use"
+ myoptions.changed_use = False
+
+ if myoptions.deselect in true_y:
+ myoptions.deselect = True
+
+ if myoptions.binpkg_respect_use in true_y:
+ myoptions.binpkg_respect_use = True
+ else:
+ myoptions.binpkg_respect_use = None
+
+ if myoptions.complete_graph in true_y:
+ myoptions.complete_graph = True
+ else:
+ myoptions.complete_graph = None
+
+ if _ENABLE_DYN_LINK_MAP:
+ if myoptions.depclean_lib_check in true_y:
+ myoptions.depclean_lib_check = True
+
+ if myoptions.exclude:
+ bad_atoms = _find_bad_atoms(myoptions.exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.reinstall_atoms:
+ bad_atoms = _find_bad_atoms(myoptions.reinstall_atoms)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --reinstall-atoms parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.rebuild_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.rebuild_exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --rebuild-exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.rebuild_ignore:
+ bad_atoms = _find_bad_atoms(myoptions.rebuild_ignore)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --rebuild-ignore parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.usepkg_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.usepkg_exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --usepkg-exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.useoldpkg_atoms:
+ bad_atoms = _find_bad_atoms(myoptions.useoldpkg_atoms)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --useoldpkg-atoms parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.fail_clean in true_y:
+ myoptions.fail_clean = True
+
+ if myoptions.getbinpkg in true_y:
+ myoptions.getbinpkg = True
+ else:
+ myoptions.getbinpkg = None
+
+ if myoptions.getbinpkgonly in true_y:
+ myoptions.getbinpkgonly = True
+ else:
+ myoptions.getbinpkgonly = None
+
+ if myoptions.keep_going in true_y:
+ myoptions.keep_going = True
+ else:
+ myoptions.keep_going = None
+
+ if myoptions.package_moves in true_y:
+ myoptions.package_moves = True
+
+ if myoptions.quiet in true_y:
+ myoptions.quiet = True
+ else:
+ myoptions.quiet = None
+
+ if myoptions.quiet_build in true_y:
+ myoptions.quiet_build = True
+ else:
+ myoptions.quiet_build = None
+
+ if myoptions.rebuild_if_new_ver in true_y:
+ myoptions.rebuild_if_new_ver = True
+ else:
+ myoptions.rebuild_if_new_ver = None
+
+ if myoptions.rebuild_if_new_rev in true_y:
+ myoptions.rebuild_if_new_rev = True
+ myoptions.rebuild_if_new_ver = None
+ else:
+ myoptions.rebuild_if_new_rev = None
+
+ if myoptions.rebuild_if_unbuilt in true_y:
+ myoptions.rebuild_if_unbuilt = True
+ myoptions.rebuild_if_new_rev = None
+ myoptions.rebuild_if_new_ver = None
+ else:
+ myoptions.rebuild_if_unbuilt = None
+
+ if myoptions.rebuilt_binaries in true_y:
+ myoptions.rebuilt_binaries = True
+
+ if myoptions.root_deps in true_y:
+ myoptions.root_deps = True
+
+ if myoptions.select in true_y:
+ myoptions.select = True
+ myoptions.oneshot = False
+ elif myoptions.select == "n":
+ myoptions.oneshot = True
+
+ if myoptions.selective in true_y:
+ myoptions.selective = True
+
+ if myoptions.backtrack is not None:
+
+ try:
+ backtrack = int(myoptions.backtrack)
+ except (OverflowError, ValueError):
+ backtrack = -1
+
+ if backtrack < 0:
+ backtrack = None
+ if not silent:
+ parser.error("Invalid --backtrack parameter: '%s'\n" % \
+ (myoptions.backtrack,))
+
+ myoptions.backtrack = backtrack
+
+ if myoptions.deep is not None:
+ deep = None
+ if myoptions.deep == "True":
+ deep = True
+ else:
+ try:
+ deep = int(myoptions.deep)
+ except (OverflowError, ValueError):
+ deep = -1
+
+ if deep is not True and deep < 0:
+ deep = None
+ if not silent:
+ parser.error("Invalid --deep parameter: '%s'\n" % \
+ (myoptions.deep,))
+
+ myoptions.deep = deep
+
+ if myoptions.jobs:
+ jobs = None
+ if myoptions.jobs == "True":
+ jobs = True
+ else:
+ try:
+ jobs = int(myoptions.jobs)
+ except ValueError:
+ jobs = -1
+
+ if jobs is not True and \
+ jobs < 1:
+ jobs = None
+ if not silent:
+ parser.error("Invalid --jobs parameter: '%s'\n" % \
+ (myoptions.jobs,))
+
+ myoptions.jobs = jobs
+
+ if myoptions.load_average:
+ try:
+ load_average = float(myoptions.load_average)
+ except ValueError:
+ load_average = 0.0
+
+ if load_average <= 0.0:
+ load_average = None
+ if not silent:
+ parser.error("Invalid --load-average parameter: '%s'\n" % \
+ (myoptions.load_average,))
+
+ myoptions.load_average = load_average
+
+ if myoptions.rebuilt_binaries_timestamp:
+ try:
+ rebuilt_binaries_timestamp = int(myoptions.rebuilt_binaries_timestamp)
+ except ValueError:
+ rebuilt_binaries_timestamp = -1
+
+ if rebuilt_binaries_timestamp < 0:
+ rebuilt_binaries_timestamp = 0
+ if not silent:
+ parser.error("Invalid --rebuilt-binaries-timestamp parameter: '%s'\n" % \
+ (myoptions.rebuilt_binaries_timestamp,))
+
+ myoptions.rebuilt_binaries_timestamp = rebuilt_binaries_timestamp
+
+ if myoptions.use_ebuild_visibility in true_y:
+ myoptions.use_ebuild_visibility = True
+ else:
+ # None or "n"
+ pass
+
+ if myoptions.usepkg in true_y:
+ myoptions.usepkg = True
+ else:
+ myoptions.usepkg = None
+
+ if myoptions.usepkgonly in true_y:
+ myoptions.usepkgonly = True
+ else:
+ myoptions.usepkgonly = None
+
+ for myopt in options:
+ v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
+ if v:
+ myopts[myopt] = True
+
+ for myopt in argument_options:
+ v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
+ if v is not None:
+ myopts[myopt] = v
+
+ if myoptions.searchdesc:
+ myoptions.search = True
+
+ for action_opt in actions:
+ v = getattr(myoptions, action_opt.replace("-", "_"))
+ if v:
+ if myaction:
+ multiple_actions(myaction, action_opt)
+ sys.exit(1)
+ myaction = action_opt
+
+ if myaction is None and myoptions.deselect is True:
+ myaction = 'deselect'
+
+ if myargs and sys.hexversion < 0x3000000 and \
+ not isinstance(myargs[0], unicode):
+ for i in range(len(myargs)):
+ myargs[i] = portage._unicode_decode(myargs[i])
+
+ myfiles += myargs
+
+ return myaction, myopts, myfiles
+
+# Warn about features that may confuse users and
+# lead them to report invalid bugs.
+_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
+
+def validate_ebuild_environment(trees):
+ features_warn = set()
+ for myroot in trees:
+ settings = trees[myroot]["vartree"].settings
+ settings.validate()
+ features_warn.update(
+ _emerge_features_warn.intersection(settings.features))
+
+ if features_warn:
+ msg = "WARNING: The FEATURES variable contains one " + \
+ "or more values that should be disabled under " + \
+ "normal circumstances: %s" % " ".join(features_warn)
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 65):
+ out.ewarn(line)
+
+def apply_priorities(settings):
+ ionice(settings)
+ nice(settings)
+
+def nice(settings):
+ try:
+ os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
+ except (OSError, ValueError) as e:
+ out = portage.output.EOutput()
+ out.eerror("Failed to change nice value to '%s'" % \
+ settings["PORTAGE_NICENESS"])
+ out.eerror("%s\n" % str(e))
+
+def ionice(settings):
+
+ ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
+ if ionice_cmd:
+ ionice_cmd = portage.util.shlex_split(ionice_cmd)
+ if not ionice_cmd:
+ return
+
+ from portage.util import varexpand
+ variables = {"PID" : str(os.getpid())}
+ cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
+
+ try:
+ rval = portage.process.spawn(cmd, env=os.environ)
+ except portage.exception.CommandNotFound:
+ # The OS kernel probably doesn't support ionice,
+ # so return silently.
+ return
+
+ if rval != os.EX_OK:
+ out = portage.output.EOutput()
+ out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
+ out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
+
+def setconfig_fallback(root_config):
+ from portage._sets.base import DummyPackageSet
+ from portage._sets.files import WorldSelectedSet
+ from portage._sets.profiles import PackagesSystemSet
+ setconfig = root_config.setconfig
+ setconfig.psets['world'] = DummyPackageSet(atoms=['@selected', '@system'])
+ setconfig.psets['selected'] = WorldSelectedSet(root_config.settings['EROOT'])
+ setconfig.psets['system'] = \
+ PackagesSystemSet(root_config.settings.profiles)
+ root_config.sets = setconfig.getSets()
+
+def get_missing_sets(root_config):
+ # emerge requires existence of "world", "selected", and "system"
+ missing_sets = []
+
+ for s in ("selected", "system", "world",):
+ if s not in root_config.sets:
+ missing_sets.append(s)
+
+ return missing_sets
+
+def missing_sets_warning(root_config, missing_sets):
+ if len(missing_sets) > 2:
+ missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
+ missing_sets_str += ', and "%s"' % missing_sets[-1]
+ elif len(missing_sets) == 2:
+ missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
+ else:
+ missing_sets_str = '"%s"' % missing_sets[-1]
+ msg = ["emerge: incomplete set configuration, " + \
+ "missing set(s): %s" % missing_sets_str]
+ if root_config.sets:
+ msg.append(" sets defined: %s" % ", ".join(root_config.sets))
+ global_config_path = portage.const.GLOBAL_CONFIG_PATH
+ if root_config.settings['EPREFIX']:
+ global_config_path = os.path.join(root_config.settings['EPREFIX'],
+ portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ msg.append(" This usually means that '%s'" % \
+ (os.path.join(global_config_path, "sets/portage.conf"),))
+ msg.append(" is missing or corrupt.")
+ msg.append(" Falling back to default world and system set configuration!!!")
+ for line in msg:
+ writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
+
+def ensure_required_sets(trees):
+ warning_shown = False
+ for root_trees in trees.values():
+ missing_sets = get_missing_sets(root_trees["root_config"])
+ if missing_sets and not warning_shown:
+ warning_shown = True
+ missing_sets_warning(root_trees["root_config"], missing_sets)
+ if missing_sets:
+ setconfig_fallback(root_trees["root_config"])
+
+def expand_set_arguments(myfiles, myaction, root_config):
+ retval = os.EX_OK
+ setconfig = root_config.setconfig
+
+ sets = setconfig.getSets()
+
+ # In order to know exactly which atoms/sets should be added to the
+ # world file, the depgraph performs set expansion later. It will get
+ # confused about where the atoms came from if it's not allowed to
+ # expand them itself.
+ do_not_expand = (None, )
+ newargs = []
+ for a in myfiles:
+ if a in ("system", "world"):
+ newargs.append(SETPREFIX+a)
+ else:
+ newargs.append(a)
+ myfiles = newargs
+ del newargs
+ newargs = []
+
+ # separators for set arguments
+ ARG_START = "{"
+ ARG_END = "}"
+
+ for i in range(0, len(myfiles)):
+ if myfiles[i].startswith(SETPREFIX):
+ start = 0
+ end = 0
+ x = myfiles[i][len(SETPREFIX):]
+ newset = ""
+ while x:
+ start = x.find(ARG_START)
+ end = x.find(ARG_END)
+ if start > 0 and start < end:
+ namepart = x[:start]
+ argpart = x[start+1:end]
+
+ # TODO: implement proper quoting
+ args = argpart.split(",")
+ options = {}
+ for a in args:
+ if "=" in a:
+ k, v = a.split("=", 1)
+ options[k] = v
+ else:
+ options[a] = "True"
+ setconfig.update(namepart, options)
+ newset += (x[:start-len(namepart)]+namepart)
+ x = x[end+len(ARG_END):]
+ else:
+ newset += x
+ x = ""
+ myfiles[i] = SETPREFIX+newset
+
+ sets = setconfig.getSets()
+
+ # display errors that occurred while loading the SetConfig instance
+ for e in setconfig.errors:
+ print(colorize("BAD", "Error during set creation: %s" % e))
+
+ unmerge_actions = ("unmerge", "prune", "clean", "depclean")
+
+ for a in myfiles:
+ if a.startswith(SETPREFIX):
+ s = a[len(SETPREFIX):]
+ if s not in sets:
+ display_missing_pkg_set(root_config, s)
+ return (None, 1)
+ setconfig.active.append(s)
+ try:
+ set_atoms = setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level(("emerge: the given set '%s' " + \
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ return (None, 1)
+ if myaction in unmerge_actions and \
+ not sets[s].supportsOperation("unmerge"):
+ sys.stderr.write("emerge: the given set '%s' does " % s + \
+ "not support unmerge operations\n")
+ retval = 1
+ elif not set_atoms:
+ print("emerge: '%s' is an empty set" % s)
+ elif myaction not in do_not_expand:
+ newargs.extend(set_atoms)
+ else:
+ newargs.append(SETPREFIX+s)
+ for e in sets[s].errors:
+ print(e)
+ else:
+ newargs.append(a)
+ return (newargs, retval)
+
+def repo_name_check(trees):
+ missing_repo_names = set()
+ for root_trees in trees.values():
+ porttree = root_trees.get("porttree")
+ if porttree:
+ portdb = porttree.dbapi
+ missing_repo_names.update(portdb.getMissingRepoNames())
+ if portdb.porttree_root in missing_repo_names and \
+ not os.path.exists(os.path.join(
+ portdb.porttree_root, "profiles")):
+ # This is normal if $PORTDIR happens to be empty,
+ # so don't warn about it.
+ missing_repo_names.remove(portdb.porttree_root)
+
+ if missing_repo_names:
+ msg = []
+ msg.append("WARNING: One or more repositories " + \
+ "have missing repo_name entries:")
+ msg.append("")
+ for p in missing_repo_names:
+ msg.append("\t%s/profiles/repo_name" % (p,))
+ msg.append("")
+ msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
+ "should be a plain text file containing a unique " + \
+ "name for the repository on the first line.", 70))
+ msg.append("\n")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(missing_repo_names)
+
+def repo_name_duplicate_check(trees):
+ ignored_repos = {}
+ for root, root_trees in trees.items():
+ if 'porttree' in root_trees:
+ portdb = root_trees['porttree'].dbapi
+ if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
+ for repo_name, paths in portdb.getIgnoredRepos():
+ k = (root, repo_name, portdb.getRepositoryPath(repo_name))
+ ignored_repos.setdefault(k, []).extend(paths)
+
+ if ignored_repos:
+ msg = []
+ msg.append('WARNING: One or more repositories ' + \
+ 'have been ignored due to duplicate')
+ msg.append(' profiles/repo_name entries:')
+ msg.append('')
+ for k in sorted(ignored_repos):
+ msg.append(' %s overrides' % ", ".join(k))
+ for path in ignored_repos[k]:
+ msg.append(' %s' % (path,))
+ msg.append('')
+ msg.extend(' ' + x for x in textwrap.wrap(
+ "All profiles/repo_name entries must be unique in order " + \
+ "to avoid having duplicates ignored. " + \
+ "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
+ "/etc/make.conf if you would like to disable this warning."))
+ msg.append("\n")
+ writemsg_level(''.join('%s\n' % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(ignored_repos)
+
+def config_protect_check(trees):
+ for root, root_trees in trees.items():
+ if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
+ msg = "!!! CONFIG_PROTECT is empty"
+ if root != "/":
+ msg += " for '%s'" % root
+ msg += "\n"
+ writemsg_level(msg, level=logging.WARN, noiselevel=-1)
+
+def profile_check(trees, myaction):
+ if myaction in ("help", "info", "sync", "version"):
+ return os.EX_OK
+ for root, root_trees in trees.items():
+ if root_trees["root_config"].settings.profiles:
+ continue
+ # generate some profile related warning messages
+ validate_ebuild_environment(trees)
+ msg = "If you have just changed your profile configuration, you " + \
+ "should revert back to the previous configuration. Due to " + \
+ "your current profile being invalid, allowed actions are " + \
+ "limited to --help, --info, --sync, and --version."
+ writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ return os.EX_OK
+
+def check_procfs():
+ procfs_path = '/proc'
+ if platform.system() not in ("Linux",) or \
+ os.path.ismount(procfs_path):
+ return os.EX_OK
+ msg = "It seems that %s is not mounted. You have been warned." % procfs_path
+ writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+def emerge_main(args=None):
+ """
+ @param args: command arguments (default: sys.argv[1:])
+ @type args: list
+ """
+ if args is None:
+ args = sys.argv[1:]
+
+ portage._disable_legacy_globals()
+ portage.dep._internal_warnings = True
+ # Disable color until we're sure that it should be enabled (after
+ # EMERGE_DEFAULT_OPTS has been parsed).
+ portage.output.havecolor = 0
+ # This first pass is just for options that need to be known as early as
+ # possible, such as --config-root. They will be parsed again later,
+ # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
+ # the value of --config-root).
+ myaction, myopts, myfiles = parse_opts(args, silent=True)
+ if "--debug" in myopts:
+ os.environ["PORTAGE_DEBUG"] = "1"
+ if "--config-root" in myopts:
+ os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
+ if "--root" in myopts:
+ os.environ["ROOT"] = myopts["--root"]
+ if "--accept-properties" in myopts:
+ os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]
+
+ # Portage needs to ensure a sane umask for the files it creates.
+ os.umask(0o22)
+ settings, trees, mtimedb = load_emerge_config()
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ rval = profile_check(trees, myaction)
+ if rval != os.EX_OK:
+ return rval
+
+ tmpcmdline = []
+ if "--ignore-default-opts" not in myopts:
+ tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
+ tmpcmdline.extend(args)
+ myaction, myopts, myfiles = parse_opts(tmpcmdline)
+
+ if myaction not in ('help', 'info', 'version') and \
+ myopts.get('--package-moves') != 'n' and \
+ _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
+ mtimedb.commit()
+ # Reload the whole config from scratch.
+ settings, trees, mtimedb = load_emerge_config(trees=trees)
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+
+ xterm_titles = "notitles" not in settings.features
+ if xterm_titles:
+ xtermTitle("emerge")
+
+ if "--digest" in myopts:
+ os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
+ # Reload the whole config from scratch so that the portdbapi internal
+ # config is updated with new FEATURES.
+ settings, trees, mtimedb = load_emerge_config(trees=trees)
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+
+ adjust_configs(myopts, trees)
+ apply_priorities(settings)
+
+ if myaction == 'version':
+ writemsg_stdout(getportageversion(
+ settings["PORTDIR"], settings["ROOT"],
+ settings.profile_path, settings["CHOST"],
+ trees[settings["ROOT"]]["vartree"].dbapi) + '\n', noiselevel=-1)
+ return 0
+ elif myaction == 'help':
+ _emerge.help.help(myopts, portage.output.havecolor)
+ return 0
+
+ spinner = stdout_spinner()
+ if "candy" in settings.features:
+ spinner.update = spinner.update_scroll
+
+ if "--quiet" not in myopts:
+ portage.deprecated_profile_check(settings=settings)
+ if portage.const._ENABLE_REPO_NAME_WARN:
+ # Bug #248603 - Disable warnings about missing
+ # repo_name entries for stable branch.
+ repo_name_check(trees)
+ repo_name_duplicate_check(trees)
+ config_protect_check(trees)
+ check_procfs()
+
+ if "getbinpkg" in settings.features:
+ myopts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in myopts:
+ myopts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in myopts:
+ myopts["--usepkgonly"] = True
+
+ if "--getbinpkg" in myopts:
+ myopts["--usepkg"] = True
+
+ if "--usepkgonly" in myopts:
+ myopts["--usepkg"] = True
+
+ if "buildpkg" in settings.features or "--buildpkgonly" in myopts:
+ myopts["--buildpkg"] = True
+
+ if "--buildpkgonly" in myopts:
+ # --buildpkgonly will not merge anything, so
+ # it cancels all binary package options.
+ for opt in ("--getbinpkg", "--getbinpkgonly",
+ "--usepkg", "--usepkgonly"):
+ myopts.pop(opt, None)
+
+ for mytrees in trees.values():
+ mydb = mytrees["porttree"].dbapi
+ # Freeze the portdbapi for performance (memoize all xmatch results).
+ mydb.freeze()
+
+ if myaction in ('search', None) and \
+ "--usepkg" in myopts:
+ # Populate the bintree with current --getbinpkg setting.
+ # This needs to happen before expand_set_arguments(), in case
+ # any sets use the bintree.
+ mytrees["bintree"].populate(
+ getbinpkgs="--getbinpkg" in myopts)
+
+ del mytrees, mydb
+
+ if "moo" in myfiles:
+ print("""
+
+ Larry loves Gentoo (""" + platform.system() + """)
+
+ _______________________
+< Have you mooed today? >
+ -----------------------
+ \ ^__^
+ \ (oo)\_______
+ (__)\ )\/\
+ ||----w |
+ || ||
+
+""")
+
+ for x in myfiles:
+ ext = os.path.splitext(x)[1]
+ if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
+ print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n"))
+ break
+
+ root_config = trees[settings["ROOT"]]["root_config"]
+ if myaction == "list-sets":
+ writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets)))
+ return os.EX_OK
+
+ ensure_required_sets(trees)
+
+ # only expand sets for actions taking package arguments
+ oldargs = myfiles[:]
+ if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
+ myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
+ if retval != os.EX_OK:
+ return retval
+
+ # Need to handle empty sets specially, otherwise emerge will react
+ # with the help message for empty argument lists
+ if oldargs and not myfiles:
+ print("emerge: no targets left after set expansion")
+ return 0
+
+ if ("--tree" in myopts) and ("--columns" in myopts):
+ print("emerge: can't specify both of \"--tree\" and \"--columns\".")
+ return 1
+
+ if '--emptytree' in myopts and '--noreplace' in myopts:
+ writemsg_level("emerge: can't specify both of " + \
+ "\"--emptytree\" and \"--noreplace\".\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if ("--quiet" in myopts):
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = -1
+
+ if "--fetch-all-uri" in myopts:
+ myopts["--fetchonly"] = True
+
+ if "--skipfirst" in myopts and "--resume" not in myopts:
+ myopts["--resume"] = True
+
+ # Allow -p to remove --ask
+ if "--pretend" in myopts:
+ myopts.pop("--ask", None)
+
+ # forbid --ask when not in a terminal
+ # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
+ if ("--ask" in myopts) and (not sys.stdin.isatty()):
+ portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
+ noiselevel=-1)
+ return 1
+
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = 0
+ if "python-trace" in settings.features:
+ import portage.debug as portage_debug
+ portage_debug.set_trace(True)
+
+ if not ("--quiet" in myopts):
+ if '--nospinner' in myopts or \
+ settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ spinner.update = spinner.update_basic
+
+ if "--debug" in myopts:
+ print("myaction", myaction)
+ print("myopts", myopts)
+
+ if not myaction and not myfiles and "--resume" not in myopts:
+ _emerge.help.help(myopts, portage.output.havecolor)
+ return 1
+
+ pretend = "--pretend" in myopts
+ fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
+ buildpkgonly = "--buildpkgonly" in myopts
+
+ # check if root user is the current user for the actions where emerge needs this
+ if portage.secpass < 2:
+ # We've already allowed "--version" and "--help" above.
+ if "--pretend" not in myopts and myaction not in ("search","info"):
+ need_superuser = myaction in ('clean', 'depclean', 'deselect',
+ 'prune', 'unmerge') or not \
+ (fetchonly or \
+ (buildpkgonly and secpass >= 1) or \
+ myaction in ("metadata", "regen", "sync"))
+ if portage.secpass < 1 or \
+ need_superuser:
+ if need_superuser:
+ access_desc = "superuser"
+ else:
+ access_desc = "portage group"
+ # Always show portage_group_warning() when only portage group
+ # access is required but the user is not in the portage group.
+ from portage.data import portage_group_warning
+ if "--ask" in myopts:
+ writemsg_stdout("This action requires %s access...\n" % \
+ (access_desc,), noiselevel=-1)
+ if portage.secpass < 1 and not need_superuser:
+ portage_group_warning()
+ if userquery("Would you like to add --pretend to options?",
+ "--ask-enter-invalid" in myopts) == "No":
+ return 1
+ myopts["--pretend"] = True
+ del myopts["--ask"]
+ else:
+ sys.stderr.write(("emerge: %s access is required\n") \
+ % access_desc)
+ if portage.secpass < 1 and not need_superuser:
+ portage_group_warning()
+ return 1
+
+ # Disable emergelog for everything except build or unmerge operations.
+ # This helps minimize parallel emerge.log entries that can confuse log
+ # parsers like genlop.
+ disable_emergelog = False
+ for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
+ if x in myopts:
+ disable_emergelog = True
+ break
+ if myaction in ("search", "info"):
+ disable_emergelog = True
+
+ _emerge.emergelog._disable = disable_emergelog
+
+ if not disable_emergelog:
+ if 'EMERGE_LOG_DIR' in settings:
+ try:
+ # At least the parent needs to exist for the lock file.
+ portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
+ except portage.exception.PortageException as e:
+ writemsg_level("!!! Error creating directory for " + \
+ "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
+ (settings['EMERGE_LOG_DIR'], e),
+ noiselevel=-1, level=logging.ERROR)
+ else:
+ _emerge.emergelog._emerge_log_dir = settings["EMERGE_LOG_DIR"]
+
+ if not "--pretend" in myopts:
+ emergelog(xterm_titles, "Started emerge on: "+\
+ _unicode_decode(
+ time.strftime("%b %d, %Y %H:%M:%S", time.localtime()),
+ encoding=_encodings['content'], errors='replace'))
+ myelogstr=""
+ if myopts:
+ myelogstr=" ".join(myopts)
+ if myaction:
+ myelogstr+=" "+myaction
+ if myfiles:
+ myelogstr += " " + " ".join(oldargs)
+ emergelog(xterm_titles, " *** emerge " + myelogstr)
+ del oldargs
+
+ def emergeexitsig(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
+ sys.exit(128 + signum)
+ signal.signal(signal.SIGINT, emergeexitsig)
+ signal.signal(signal.SIGTERM, emergeexitsig)
+
+ def emergeexit():
+ """This gets out final log message in before we quit."""
+ if "--pretend" not in myopts:
+ emergelog(xterm_titles, " *** terminating.")
+ if xterm_titles:
+ xtermTitleReset()
+ portage.atexit_register(emergeexit)
+
+ if myaction in ("config", "metadata", "regen", "sync"):
+ if "--pretend" in myopts:
+ sys.stderr.write(("emerge: The '%s' action does " + \
+ "not support '--pretend'.\n") % myaction)
+ return 1
+
+ if "sync" == myaction:
+ return action_sync(settings, trees, mtimedb, myopts, myaction)
+ elif "metadata" == myaction:
+ action_metadata(settings, portdb, myopts)
+ elif myaction=="regen":
+ validate_ebuild_environment(trees)
+ return action_regen(settings, portdb, myopts.get("--jobs"),
+ myopts.get("--load-average"))
+ # HELP action
+ elif "config"==myaction:
+ validate_ebuild_environment(trees)
+ action_config(settings, trees, myopts, myfiles)
+
+ # SEARCH action
+ elif "search"==myaction:
+ validate_ebuild_environment(trees)
+ action_search(trees[settings["ROOT"]]["root_config"],
+ myopts, myfiles, spinner)
+
+ elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
+ validate_ebuild_environment(trees)
+ rval = action_uninstall(settings, trees, mtimedb["ldpath"],
+ myopts, myaction, myfiles, spinner)
+ if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
+ post_emerge(myaction, myopts, myfiles, settings["ROOT"],
+ trees, mtimedb, rval)
+ return rval
+
+ elif myaction == 'info':
+
+ # Ensure atoms are valid before calling unmerge().
+ vardb = trees[settings["ROOT"]]["vartree"].dbapi
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ bindb = trees[settings["ROOT"]]["bintree"].dbapi
+ valid_atoms = []
+ for x in myfiles:
+ if is_valid_package_atom(x):
+ try:
+ #look at the installed files first, if there is no match
+ #look at the ebuilds, since EAPI 4 allows running pkg_info
+ #on non-installed packages
+ valid_atom = dep_expand(x, mydb=vardb, settings=settings)
+ if valid_atom.cp.split("/")[0] == "null":
+ valid_atom = dep_expand(x, mydb=portdb, settings=settings)
+ if valid_atom.cp.split("/")[0] == "null" and "--usepkg" in myopts:
+ valid_atom = dep_expand(x, mydb=bindb, settings=settings)
+ valid_atoms.append(valid_atom)
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ continue
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ return action_info(settings, trees, myopts, valid_atoms)
+
+ # "update", "system", or just process files:
+ else:
+ validate_ebuild_environment(trees)
+
+ for x in myfiles:
+ if x.startswith(SETPREFIX) or \
+ is_valid_package_atom(x, allow_repo=True):
+ continue
+ if x[:1] == os.sep:
+ continue
+ try:
+ os.lstat(x)
+ continue
+ except OSError:
+ pass
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if "--pretend" not in myopts:
+ display_news_notification(root_config, myopts)
+ retval = action_build(settings, trees, mtimedb,
+ myopts, myaction, myfiles, spinner)
+ post_emerge(myaction, myopts, myfiles, settings["ROOT"],
+ trees, mtimedb, retval)
+
+ return retval
diff --git a/portage_with_autodep/pym/_emerge/resolver/__init__.py b/portage_with_autodep/pym/_emerge/resolver/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/_emerge/resolver/backtracking.py b/portage_with_autodep/pym/_emerge/resolver/backtracking.py
new file mode 100644
index 0000000..dcdaee0
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/backtracking.py
@@ -0,0 +1,197 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+
+class BacktrackParameter(object):
+
+ __slots__ = (
+ "needed_unstable_keywords", "runtime_pkg_mask", "needed_use_config_changes", "needed_license_changes",
+ "rebuild_list", "reinstall_list", "needed_p_mask_changes"
+ )
+
+ def __init__(self):
+ self.needed_unstable_keywords = set()
+ self.needed_p_mask_changes = set()
+ self.runtime_pkg_mask = {}
+ self.needed_use_config_changes = {}
+ self.needed_license_changes = {}
+ self.rebuild_list = set()
+ self.reinstall_list = set()
+
+ def __deepcopy__(self, memo=None):
+ if memo is None:
+ memo = {}
+ result = BacktrackParameter()
+ memo[id(self)] = result
+
+ #Shallow copies are enough here, as we only need to ensure that nobody adds stuff
+ #to our sets and dicts. The existing content is immutable.
+ result.needed_unstable_keywords = copy.copy(self.needed_unstable_keywords)
+ result.needed_p_mask_changes = copy.copy(self.needed_p_mask_changes)
+ result.runtime_pkg_mask = copy.copy(self.runtime_pkg_mask)
+ result.needed_use_config_changes = copy.copy(self.needed_use_config_changes)
+ result.needed_license_changes = copy.copy(self.needed_license_changes)
+ result.rebuild_list = copy.copy(self.rebuild_list)
+ result.reinstall_list = copy.copy(self.reinstall_list)
+
+ return result
+
+ def __eq__(self, other):
+ return self.needed_unstable_keywords == other.needed_unstable_keywords and \
+ self.needed_p_mask_changes == other.needed_p_mask_changes and \
+ self.runtime_pkg_mask == other.runtime_pkg_mask and \
+ self.needed_use_config_changes == other.needed_use_config_changes and \
+ self.needed_license_changes == other.needed_license_changes and \
+ self.rebuild_list == other.rebuild_list and \
+ self.reinstall_list == other.reinstall_list
+
+
+class _BacktrackNode:
+
+ __slots__ = (
+ "parameter", "depth", "mask_steps", "terminal",
+ )
+
+ def __init__(self, parameter=BacktrackParameter(), depth=0, mask_steps=0, terminal=True):
+ self.parameter = parameter
+ self.depth = depth
+ self.mask_steps = mask_steps
+ self.terminal = terminal
+
+ def __eq__(self, other):
+ return self.parameter == other.parameter
+
+
+class Backtracker(object):
+
+ __slots__ = (
+ "_max_depth", "_unexplored_nodes", "_current_node", "_nodes", "_root",
+ )
+
+ def __init__(self, max_depth):
+ self._max_depth = max_depth
+ self._unexplored_nodes = []
+ self._current_node = None
+ self._nodes = []
+
+ self._root = _BacktrackNode()
+ self._add(self._root)
+
+
+ def _add(self, node, explore=True):
+ """
+ Adds a newly computed backtrack parameter. Makes sure that it doesn't already exist and
+ that we don't backtrack deeper than we are allowed by --backtrack.
+ """
+ if node.mask_steps <= self._max_depth and node not in self._nodes:
+ if explore:
+ self._unexplored_nodes.append(node)
+ self._nodes.append(node)
+
+
+ def get(self):
+ """
+ Returns a backtrack parameter. The backtrack graph is explored with depth first.
+ """
+ if self._unexplored_nodes:
+ node = self._unexplored_nodes.pop()
+ self._current_node = node
+ return copy.deepcopy(node.parameter)
+ else:
+ return None
+
+
+ def __len__(self):
+ return len(self._unexplored_nodes)
+
+
+ def _feedback_slot_conflict(self, conflict_data):
+ for pkg, parent_atoms in conflict_data:
+ new_node = copy.deepcopy(self._current_node)
+ new_node.depth += 1
+ new_node.mask_steps += 1
+ new_node.terminal = False
+ new_node.parameter.runtime_pkg_mask.setdefault(
+ pkg, {})["slot conflict"] = parent_atoms
+ self._add(new_node)
+
+
+ def _feedback_missing_dep(self, dep):
+ new_node = copy.deepcopy(self._current_node)
+ new_node.depth += 1
+ new_node.mask_steps += 1
+ new_node.terminal = False
+
+ new_node.parameter.runtime_pkg_mask.setdefault(
+ dep.parent, {})["missing dependency"] = \
+ set([(dep.parent, dep.root, dep.atom)])
+
+ self._add(new_node)
+
+
+ def _feedback_config(self, changes, explore=True):
+ """
+ Handle config changes. Don't count config changes for the maximum backtrack depth.
+ """
+ new_node = copy.deepcopy(self._current_node)
+ new_node.depth += 1
+ para = new_node.parameter
+
+ for change, data in changes.items():
+ if change == "needed_unstable_keywords":
+ para.needed_unstable_keywords.update(data)
+ elif change == "needed_p_mask_changes":
+ para.needed_p_mask_changes.update(data)
+ elif change == "needed_license_changes":
+ for pkg, missing_licenses in data:
+ para.needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
+ elif change == "needed_use_config_changes":
+ for pkg, (new_use, new_changes) in data:
+ para.needed_use_config_changes[pkg] = (new_use, new_changes)
+ elif change == "rebuild_list":
+ para.rebuild_list.update(data)
+ elif change == "reinstall_list":
+ para.reinstall_list.update(data)
+
+ self._add(new_node, explore=explore)
+ self._current_node = new_node
+
+
+ def feedback(self, infos):
+ """
+ Takes information from the depgraph and computes new backtrack parameters to try.
+ """
+ assert self._current_node is not None, "call feedback() only after get() was called"
+
+ #Not all config changes require a restart, that's why they can appear together
+ #with other conflicts.
+ if "config" in infos:
+ self._feedback_config(infos["config"], explore=(len(infos)==1))
+
+ #There is at most one of the following types of conflicts for a given restart.
+ if "slot conflict" in infos:
+ self._feedback_slot_conflict(infos["slot conflict"])
+ elif "missing dependency" in infos:
+ self._feedback_missing_dep(infos["missing dependency"])
+
+
+ def backtracked(self):
+ """
+ If we didn't backtrack, there is only the root.
+ """
+ return len(self._nodes) > 1
+
+
+ def get_best_run(self):
+ """
+ Like, get() but returns the backtrack parameter that has as many config changes as possible,
+ but has no masks. This makes --autounmask effective, but prevents confusing error messages
+ with "masked by backtracking".
+ """
+ best_node = self._root
+ for node in self._nodes:
+ if node.terminal and node.depth > best_node.depth:
+ best_node = node
+
+ return copy.deepcopy(best_node.parameter)
diff --git a/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py
new file mode 100644
index 0000000..d113c5e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py
@@ -0,0 +1,267 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from itertools import chain, product
+import logging
+
+from portage.dep import use_reduce, extract_affecting_use, check_required_use, get_required_use_flags
+from portage.exception import InvalidDependString
+from portage.output import colorize
+from portage.util import writemsg_level
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+
+class circular_dependency_handler(object):
+
+ def __init__(self, depgraph, graph):
+ self.depgraph = depgraph
+ self.graph = graph
+ self.all_parent_atoms = depgraph._dynamic_config._parent_atoms
+
+ if "--debug" in depgraph._frozen_config.myopts:
+ # Show this debug output before doing the calculations
+ # that follow, so at least we have this debug info
+ # if we happen to hit a bug later.
+ writemsg_level("\n\ncircular dependency graph:\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+ self.debug_print()
+
+ self.cycles, self.shortest_cycle = self._find_cycles()
+ #Guess if it is a large cluster of cycles. This usually requires
+ #a global USE change.
+ self.large_cycle_count = len(self.cycles) > 3
+ self.merge_list = self._prepare_reduced_merge_list()
+ #The digraph dump
+ self.circular_dep_message = self._prepare_circular_dep_message()
+ #Suggestions, in machine and human readable form
+ self.solutions, self.suggestions = self._find_suggestions()
+
+ def _find_cycles(self):
+ shortest_cycle = None
+ cycles = self.graph.get_cycles(ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
+ for cycle in cycles:
+ if not shortest_cycle or len(cycle) < len(shortest_cycle):
+ shortest_cycle = cycle
+ return cycles, shortest_cycle
+
+ def _prepare_reduced_merge_list(self):
+ """
+ Create a merge to be displayed by depgraph.display().
+ This merge list contains only packages involved in
+ the circular deps.
+ """
+ display_order = []
+ tempgraph = self.graph.copy()
+ while tempgraph:
+ nodes = tempgraph.leaf_nodes()
+ if not nodes:
+ node = tempgraph.order[0]
+ else:
+ node = nodes[0]
+ display_order.append(node)
+ tempgraph.remove(node)
+ display_order.reverse()
+ return display_order
+
+ def _prepare_circular_dep_message(self):
+ """
+ Like digraph.debug_print(), but prints only the shortest cycle.
+ """
+ if not self.shortest_cycle:
+ return None
+
+ msg = []
+ indent = ""
+ for pos, pkg in enumerate(self.shortest_cycle):
+ parent = self.shortest_cycle[pos-1]
+ priorities = self.graph.nodes[parent][0][pkg]
+ if pos > 0:
+ msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
+ else:
+ msg.append(indent + "%s depends on" % pkg)
+ indent += " "
+
+ pkg = self.shortest_cycle[0]
+ parent = self.shortest_cycle[-1]
+ priorities = self.graph.nodes[parent][0][pkg]
+ msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
+
+ return "\n".join(msg)
+
+ def _get_use_mask_and_force(self, pkg):
+ return pkg.use.mask, pkg.use.force
+
+ def _get_autounmask_changes(self, pkg):
+ needed_use_config_change = self.depgraph._dynamic_config._needed_use_config_changes.get(pkg)
+ if needed_use_config_change is None:
+ return frozenset()
+
+ use, changes = needed_use_config_change
+ return frozenset(changes.keys())
+
+ def _find_suggestions(self):
+ if not self.shortest_cycle:
+ return None, None
+
+ suggestions = []
+ final_solutions = {}
+
+ for pos, pkg in enumerate(self.shortest_cycle):
+ parent = self.shortest_cycle[pos-1]
+ priorities = self.graph.nodes[parent][0][pkg]
+ parent_atoms = self.all_parent_atoms.get(pkg)
+
+ if priorities[-1].buildtime:
+ dep = parent.metadata["DEPEND"]
+ elif priorities[-1].runtime:
+ dep = parent.metadata["RDEPEND"]
+
+ for ppkg, atom in parent_atoms:
+ if ppkg == parent:
+ changed_parent = ppkg
+ parent_atom = atom.unevaluated_atom
+ break
+
+ try:
+ affecting_use = extract_affecting_use(dep, parent_atom,
+ eapi=parent.metadata["EAPI"])
+ except InvalidDependString:
+ if not parent.installed:
+ raise
+ affecting_use = set()
+
+ # Make sure we don't want to change a flag that is
+ # a) in use.mask or use.force
+ # b) changed by autounmask
+
+ usemask, useforce = self._get_use_mask_and_force(parent)
+ autounmask_changes = self._get_autounmask_changes(parent)
+ untouchable_flags = frozenset(chain(usemask, useforce, autounmask_changes))
+
+ affecting_use.difference_update(untouchable_flags)
+
+ #If any of the flags we're going to touch is in REQUIRED_USE, add all
+ #other flags in REQUIRED_USE to affecting_use, to not lose any solution.
+ required_use_flags = get_required_use_flags(parent.metadata["REQUIRED_USE"])
+
+ if affecting_use.intersection(required_use_flags):
+ # TODO: Find out exactly which REQUIRED_USE flags are
+ # entangled with affecting_use. We have to limit the
+ # number of flags since the number of loops is
+ # exponentially related (see bug #374397).
+ total_flags = set()
+ total_flags.update(affecting_use, required_use_flags)
+ total_flags.difference_update(untouchable_flags)
+ if len(total_flags) <= 10:
+ affecting_use = total_flags
+
+ affecting_use = tuple(affecting_use)
+
+ if not affecting_use:
+ continue
+
+ #We iterate over all possible settings of these use flags and gather
+ #a set of possible changes
+ #TODO: Use the information encoded in REQUIRED_USE
+ solutions = set()
+ for use_state in product(("disabled", "enabled"),
+ repeat=len(affecting_use)):
+ current_use = set(self.depgraph._pkg_use_enabled(parent))
+ for flag, state in zip(affecting_use, use_state):
+ if state == "enabled":
+ current_use.add(flag)
+ else:
+ current_use.discard(flag)
+ try:
+ reduced_dep = use_reduce(dep,
+ uselist=current_use, flat=True)
+ except InvalidDependString:
+ if not parent.installed:
+ raise
+ reduced_dep = None
+
+ if reduced_dep is not None and \
+ parent_atom not in reduced_dep:
+ #We found an assignment that removes the atom from 'dep'.
+ #Make sure it doesn't conflict with REQUIRED_USE.
+ required_use = parent.metadata["REQUIRED_USE"]
+
+ if check_required_use(required_use, current_use, parent.iuse.is_valid_flag):
+ use = self.depgraph._pkg_use_enabled(parent)
+ solution = set()
+ for flag, state in zip(affecting_use, use_state):
+ if state == "enabled" and \
+ flag not in use:
+ solution.add((flag, True))
+ elif state == "disabled" and \
+ flag in use:
+ solution.add((flag, False))
+ solutions.add(frozenset(solution))
+
+ for solution in solutions:
+ ignore_solution = False
+ for other_solution in solutions:
+ if solution is other_solution:
+ continue
+ if solution.issuperset(other_solution):
+ ignore_solution = True
+ if ignore_solution:
+ continue
+
+ #Check if a USE change conflicts with use requirements of the parents.
+ #If a requiremnet is hard, ignore the suggestion.
+ #If the requirment is conditional, warn the user that other changes might be needed.
+ followup_change = False
+ parent_parent_atoms = self.depgraph._dynamic_config._parent_atoms.get(changed_parent)
+ for ppkg, atom in parent_parent_atoms:
+
+ atom = atom.unevaluated_atom
+ if not atom.use:
+ continue
+
+ for flag, state in solution:
+ if flag in atom.use.enabled or flag in atom.use.disabled:
+ ignore_solution = True
+ break
+ elif atom.use.conditional:
+ for flags in atom.use.conditional.values():
+ if flag in flags:
+ followup_change = True
+ break
+
+ if ignore_solution:
+ break
+
+ if ignore_solution:
+ continue
+
+ changes = []
+ for flag, state in solution:
+ if state:
+ changes.append(colorize("red", "+"+flag))
+ else:
+ changes.append(colorize("blue", "-"+flag))
+ msg = "- %s (Change USE: %s)\n" \
+ % (parent.cpv, " ".join(changes))
+ if followup_change:
+ msg += " (This change might require USE changes on parent packages.)"
+ suggestions.append(msg)
+ final_solutions.setdefault(pkg, set()).add(solution)
+
+ return final_solutions, suggestions
+
+ def debug_print(self):
+ """
+ Create a copy of the digraph, prune all root nodes,
+ and call the debug_print() method.
+ """
+ graph = self.graph.copy()
+ while True:
+ root_nodes = graph.root_nodes(
+ ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
+ if not root_nodes:
+ break
+ graph.difference_update(root_nodes)
+
+ graph.debug_print()
diff --git a/portage_with_autodep/pym/_emerge/resolver/output.py b/portage_with_autodep/pym/_emerge/resolver/output.py
new file mode 100644
index 0000000..05e316a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/output.py
@@ -0,0 +1,888 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Resolver output display operation.
+"""
+
+__all__ = (
+ "Display",
+ )
+
+import sys
+
+from portage import os
+from portage import _unicode_decode
+from portage.dbapi.dep_expand import dep_expand
+from portage.const import PORTAGE_PACKAGE_ATOM
+from portage.dep import cpvequal, match_from_list
+from portage.exception import InvalidDependString
+from portage.output import ( blue, bold, colorize, create_color_func,
+ darkblue, darkgreen, green, nc_len, red, teal, turquoise, yellow )
+bad = create_color_func("BAD")
+from portage.util import writemsg_stdout, writemsg_level
+from portage.versions import best, catpkgsplit, cpv_getkey
+
+from _emerge.Blocker import Blocker
+from _emerge.create_world_atom import create_world_atom
+from _emerge.resolver.output_helpers import ( _DisplayConfig, _tree_display,
+ _PackageCounters, _create_use_string, _format_size, _calc_changelog, PkgInfo)
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+
+class Display(object):
+ """Formats and outputs the depgrah supplied it for merge/re-merge, etc.
+
+ __call__()
+ @param depgraph: list
+ @param favorites: defaults to []
+ @param verbosity: integer, defaults to None
+ """
+
+ def __init__(self):
+ self.changelogs = []
+ self.print_msg = []
+ self.blockers = []
+ self.counters = _PackageCounters()
+ self.resolver = None
+ self.resolved = None
+ self.vardb = None
+ self.portdb = None
+ self.verboseadd = ''
+ self.oldlp = None
+ self.myfetchlist = None
+ self.indent = ''
+ self.is_new = True
+ self.cur_use = None
+ self.cur_iuse = None
+ self.old_use = ''
+ self.old_iuse = ''
+ self.use_expand = None
+ self.use_expand_hidden = None
+ self.pkgsettings = None
+ self.forced_flags = None
+ self.newlp = None
+ self.conf = None
+ self.blocker_style = None
+
+
+ def _blockers(self, pkg, fetch_symbol):
+ """Processes pkg for blockers and adds colorized strings to
+ self.print_msg and self.blockers
+
+ @param pkg: _emerge.Package instance
+ @param fetch_symbol: string
+ @rtype: bool
+ Modifies class globals: self.blocker_style, self.resolved,
+ self.print_msg
+ """
+ if pkg.satisfied:
+ self.blocker_style = "PKG_BLOCKER_SATISFIED"
+ addl = "%s %s " % (colorize(self.blocker_style, "b"),
+ fetch_symbol)
+ else:
+ self.blocker_style = "PKG_BLOCKER"
+ addl = "%s %s " % (colorize(self.blocker_style, "B"),
+ fetch_symbol)
+ addl += self.empty_space_in_brackets()
+ self.resolved = dep_expand(
+ str(pkg.atom).lstrip("!"), mydb=self.vardb,
+ settings=self.pkgsettings
+ )
+ if self.conf.columns and self.conf.quiet:
+ addl += " " + colorize(self.blocker_style, str(self.resolved))
+ else:
+ addl = "[%s %s] %s%s" % \
+ (colorize(self.blocker_style, "blocks"),
+ addl, self.indent,
+ colorize(self.blocker_style, str(self.resolved))
+ )
+ block_parents = self.conf.blocker_parents.parent_nodes(pkg)
+ block_parents = set([pnode[2] for pnode in block_parents])
+ block_parents = ", ".join(block_parents)
+ if self.resolved != pkg[2]:
+ addl += colorize(self.blocker_style,
+ " (\"%s\" is blocking %s)") % \
+ (str(pkg.atom).lstrip("!"), block_parents)
+ else:
+ addl += colorize(self.blocker_style,
+ " (is blocking %s)") % block_parents
+ if isinstance(pkg, Blocker) and pkg.satisfied:
+ if self.conf.columns:
+ return True
+ self.print_msg.append(addl)
+ else:
+ self.blockers.append(addl)
+ return False
+
+
+ def _display_use(self, pkg, myoldbest, myinslotlist):
+ """ USE flag display
+
+ @param pkg: _emerge.Package instance
+ @param myoldbest: list of installed versions
+ @param myinslotlist: list of installed slots
+ Modifies class globals: self.forced_flags, self.cur_iuse,
+ self.old_iuse, self.old_use, self.use_expand
+ """
+
+ self.forced_flags = set()
+ self.forced_flags.update(pkg.use.force)
+ self.forced_flags.update(pkg.use.mask)
+
+ self.cur_use = [flag for flag in self.conf.pkg_use_enabled(pkg) \
+ if flag in pkg.iuse.all]
+ self.cur_iuse = sorted(pkg.iuse.all)
+
+ if myoldbest and myinslotlist:
+ previous_cpv = myoldbest[0].cpv
+ else:
+ previous_cpv = pkg.cpv
+ if self.vardb.cpv_exists(previous_cpv):
+ previous_pkg = self.vardb.match_pkgs('=' + previous_cpv)[0]
+ self.old_iuse = sorted(previous_pkg.iuse.all)
+ self.old_use = previous_pkg.use.enabled
+ self.is_new = False
+ else:
+ self.old_iuse = []
+ self.old_use = []
+ self.is_new = True
+
+ self.old_use = [flag for flag in self.old_use if flag in self.old_iuse]
+
+ self.use_expand = pkg.use.expand
+ self.use_expand_hidden = pkg.use.expand_hidden
+ return
+
+ def include_mask_str(self):
+ return self.conf.verbosity > 1
+
+ def gen_mask_str(self, pkg):
+ """
+ @param pkg: _emerge.Package instance
+ """
+ hardmasked = pkg.isHardMasked()
+ mask_str = " "
+
+ if hardmasked:
+ mask_str = colorize("BAD", "#")
+ else:
+ keyword_mask = pkg.get_keyword_mask()
+
+ if keyword_mask is None:
+ pass
+ elif keyword_mask == "missing":
+ mask_str = colorize("BAD", "*")
+ else:
+ mask_str = colorize("WARN", "~")
+
+ return mask_str
+
+ def empty_space_in_brackets(self):
+ space = ""
+ if self.include_mask_str():
+ # add column for mask status
+ space += " "
+ return space
+
+ def map_to_use_expand(self, myvals, forced_flags=False,
+ remove_hidden=True):
+ """Map use expand variables
+
+ @param myvals: list
+ @param forced_flags: bool
+ @param remove_hidden: bool
+ @rtype ret dictionary
+ or ret dict, forced dict.
+ """
+ ret = {}
+ forced = {}
+ for exp in self.use_expand:
+ ret[exp] = []
+ forced[exp] = set()
+ for val in myvals[:]:
+ if val.startswith(exp.lower()+"_"):
+ if val in self.forced_flags:
+ forced[exp].add(val[len(exp)+1:])
+ ret[exp].append(val[len(exp)+1:])
+ myvals.remove(val)
+ ret["USE"] = myvals
+ forced["USE"] = [val for val in myvals \
+ if val in self.forced_flags]
+ if remove_hidden:
+ for exp in self.use_expand_hidden:
+ ret.pop(exp, None)
+ if forced_flags:
+ return ret, forced
+ return ret
+
+
+ def recheck_hidden(self, pkg):
+ """ Prevent USE_EXPAND_HIDDEN flags from being hidden if they
+ are the only thing that triggered reinstallation.
+
+ @param pkg: _emerge.Package instance
+ Modifies self.use_expand_hidden, self.use_expand, self.verboseadd
+ """
+ reinst_flags_map = {}
+ reinstall_for_flags = self.conf.reinstall_nodes.get(pkg)
+ reinst_expand_map = None
+ if reinstall_for_flags:
+ reinst_flags_map = self.map_to_use_expand(
+ list(reinstall_for_flags), remove_hidden=False)
+ for k in list(reinst_flags_map):
+ if not reinst_flags_map[k]:
+ del reinst_flags_map[k]
+ if not reinst_flags_map.get("USE"):
+ reinst_expand_map = reinst_flags_map.copy()
+ reinst_expand_map.pop("USE", None)
+ if reinst_expand_map and \
+ not set(reinst_expand_map).difference(
+ self.use_expand_hidden):
+ self.use_expand_hidden = \
+ set(self.use_expand_hidden).difference(
+ reinst_expand_map)
+
+ cur_iuse_map, iuse_forced = \
+ self.map_to_use_expand(self.cur_iuse, forced_flags=True)
+ cur_use_map = self.map_to_use_expand(self.cur_use)
+ old_iuse_map = self.map_to_use_expand(self.old_iuse)
+ old_use_map = self.map_to_use_expand(self.old_use)
+
+ use_expand = sorted(self.use_expand)
+ use_expand.insert(0, "USE")
+
+ for key in use_expand:
+ if key in self.use_expand_hidden:
+ continue
+ self.verboseadd += _create_use_string(self.conf, key.upper(),
+ cur_iuse_map[key], iuse_forced[key],
+ cur_use_map[key], old_iuse_map[key],
+ old_use_map[key], self.is_new,
+ reinst_flags_map.get(key))
+ return
+
+
+ @staticmethod
+ def pkgprint(pkg_str, pkg_info):
+ """Colorizes a string acording to pkg_info settings
+
+ @param pkg_str: string
+ @param pkg_info: dictionary
+ @rtype colorized string
+ """
+ if pkg_info.merge:
+ if pkg_info.built:
+ if pkg_info.system:
+ return colorize("PKG_BINARY_MERGE_SYSTEM", pkg_str)
+ elif pkg_info.world:
+ return colorize("PKG_BINARY_MERGE_WORLD", pkg_str)
+ else:
+ return colorize("PKG_BINARY_MERGE", pkg_str)
+ else:
+ if pkg_info.system:
+ return colorize("PKG_MERGE_SYSTEM", pkg_str)
+ elif pkg_info.world:
+ return colorize("PKG_MERGE_WORLD", pkg_str)
+ else:
+ return colorize("PKG_MERGE", pkg_str)
+ elif pkg_info.operation == "uninstall":
+ return colorize("PKG_UNINSTALL", pkg_str)
+ else:
+ if pkg_info.system:
+ return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
+ elif pkg_info.world:
+ return colorize("PKG_NOMERGE_WORLD", pkg_str)
+ else:
+ return colorize("PKG_NOMERGE", pkg_str)
+
+
+ def verbose_size(self, pkg, repoadd_set, pkg_info):
+ """Determines the size of the downloads required
+
+ @param pkg: _emerge.Package instance
+ @param repoadd_set: set of repos to add
+ @param pkg_info: dictionary
+ Modifies class globals: self.myfetchlist, self.counters.totalsize,
+ self.verboseadd, repoadd_set.
+ """
+ mysize = 0
+ if pkg.type_name == "ebuild" and pkg_info.merge:
+ try:
+ myfilesdict = self.portdb.getfetchsizes(pkg.cpv,
+ useflags=pkg_info.use, myrepo=pkg.repo)
+ except InvalidDependString as e:
+ # FIXME: validate SRC_URI earlier
+ depstr, = self.portdb.aux_get(pkg.cpv,
+ ["SRC_URI"], myrepo=pkg.repo)
+ show_invalid_depstring_notice(
+ pkg, depstr, str(e))
+ raise
+ if myfilesdict is None:
+ myfilesdict = "[empty/missing/bad digest]"
+ else:
+ for myfetchfile in myfilesdict:
+ if myfetchfile not in self.myfetchlist:
+ mysize += myfilesdict[myfetchfile]
+ self.myfetchlist.append(myfetchfile)
+ if pkg_info.ordered:
+ self.counters.totalsize += mysize
+ self.verboseadd += _format_size(mysize)
+
+ # overlay verbose
+ # assign index for a previous version in the same slot
+ slot_matches = self.vardb.match(pkg.slot_atom)
+ if slot_matches:
+ repo_name_prev = self.vardb.aux_get(slot_matches[0],
+ ["repository"])[0]
+ else:
+ repo_name_prev = None
+
+ # now use the data to generate output
+ if pkg.installed or not slot_matches:
+ self.repoadd = self.conf.repo_display.repoStr(
+ pkg_info.repo_path_real)
+ else:
+ repo_path_prev = None
+ if repo_name_prev:
+ repo_path_prev = self.portdb.getRepositoryPath(
+ repo_name_prev)
+ if repo_path_prev == pkg_info.repo_path_real:
+ self.repoadd = self.conf.repo_display.repoStr(
+ pkg_info.repo_path_real)
+ else:
+ self.repoadd = "%s=>%s" % (
+ self.conf.repo_display.repoStr(repo_path_prev),
+ self.conf.repo_display.repoStr(pkg_info.repo_path_real))
+ if self.repoadd:
+ repoadd_set.add(self.repoadd)
+
+
+ @staticmethod
+ def convert_myoldbest(myoldbest):
+ """converts and colorizes a version list to a string
+
+ @param myoldbest: list
+ @rtype string.
+ """
+ # Convert myoldbest from a list to a string.
+ myoldbest_str = ""
+ if myoldbest:
+ versions = []
+ for pos, pkg in enumerate(myoldbest):
+ key = catpkgsplit(pkg.cpv)[2] + \
+ "-" + catpkgsplit(pkg.cpv)[3]
+ if key[-3:] == "-r0":
+ key = key[:-3]
+ versions.append(key)
+ myoldbest_str = blue("["+", ".join(versions)+"]")
+ return myoldbest_str
+
+
+ def set_interactive(self, pkg, ordered, addl):
+ """Increments counters.interactive if the pkg is to
+ be merged and it's metadata has interactive set True
+
+ @param pkg: _emerge.Package instance
+ @param ordered: boolean
+ @param addl: already defined string to add to
+ """
+ if 'interactive' in pkg.metadata.properties and \
+ pkg.operation == 'merge':
+ addl = colorize("WARN", "I") + addl[1:]
+ if ordered:
+ self.counters.interactive += 1
+ return addl
+
+ def _set_non_root_columns(self, addl, pkg_info, pkg):
+ """sets the indent level and formats the output
+
+ @param addl: already defined string to add to
+ @param pkg_info: dictionary
+ @param pkg: _emerge.Package instance
+ @rtype string
+ """
+ if self.conf.quiet:
+ myprint = addl + " " + self.indent + \
+ self.pkgprint(pkg_info.cp, pkg_info)
+ myprint = myprint+darkblue(" "+pkg_info.ver)+" "
+ myprint = myprint+pkg_info.oldbest
+ myprint = myprint+darkgreen("to "+pkg.root)
+ self.verboseadd = None
+ else:
+ if not pkg_info.merge:
+ myprint = "[%s] %s%s" % \
+ (self.pkgprint(pkg_info.operation.ljust(13), pkg_info),
+ self.indent, self.pkgprint(pkg.cp, pkg_info))
+ else:
+ myprint = "[%s %s] %s%s" % \
+ (self.pkgprint(pkg.type_name, pkg_info), addl,
+ self.indent, self.pkgprint(pkg.cp, pkg_info))
+ if (self.newlp-nc_len(myprint)) > 0:
+ myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
+ myprint = myprint+"["+darkblue(pkg_info.ver)+"] "
+ if (self.oldlp-nc_len(myprint)) > 0:
+ myprint = myprint+" "*(self.oldlp-nc_len(myprint))
+ myprint = myprint+pkg_info.oldbest
+ myprint += darkgreen("to " + pkg.root)
+ return myprint
+
+
+ def _set_root_columns(self, addl, pkg_info, pkg):
+ """sets the indent level and formats the output
+
+ @param addl: already defined string to add to
+ @param pkg_info: dictionary
+ @param pkg: _emerge.Package instance
+ @rtype string
+ Modifies self.verboseadd
+ """
+ if self.conf.quiet:
+ myprint = addl + " " + self.indent + \
+ self.pkgprint(pkg_info.cp, pkg_info)
+ myprint = myprint+" "+green(pkg_info.ver)+" "
+ myprint = myprint+pkg_info.oldbest
+ self.verboseadd = None
+ else:
+ if not pkg_info.merge:
+ addl = self.empty_space_in_brackets()
+ myprint = "[%s%s] %s%s" % \
+ (self.pkgprint(pkg_info.operation.ljust(13), pkg_info),
+ addl, self.indent, self.pkgprint(pkg.cp, pkg_info))
+ else:
+ myprint = "[%s %s] %s%s" % \
+ (self.pkgprint(pkg.type_name, pkg_info), addl,
+ self.indent, self.pkgprint(pkg.cp, pkg_info))
+ if (self.newlp-nc_len(myprint)) > 0:
+ myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
+ myprint = myprint+green(" ["+pkg_info.ver+"] ")
+ if (self.oldlp-nc_len(myprint)) > 0:
+ myprint = myprint+(" "*(self.oldlp-nc_len(myprint)))
+ myprint += pkg_info.oldbest
+ return myprint
+
+
+ def _set_no_columns(self, pkg, pkg_info, addl):
+ """prints pkg info without column indentation.
+
+ @param pkg: _emerge.Package instance
+ @param pkg_info: dictionary
+ @param addl: the current text to add for the next line to output
+ @rtype the updated addl
+ """
+ if not pkg_info.merge:
+ addl = self.empty_space_in_brackets()
+ myprint = "[%s%s] %s%s %s" % \
+ (self.pkgprint(pkg_info.operation.ljust(13),
+ pkg_info), addl,
+ self.indent, self.pkgprint(pkg.cpv, pkg_info),
+ pkg_info.oldbest)
+ else:
+ myprint = "[%s %s] %s%s %s" % \
+ (self.pkgprint(pkg.type_name, pkg_info),
+ addl, self.indent,
+ self.pkgprint(pkg.cpv, pkg_info), pkg_info.oldbest)
+ return myprint
+
+
+ def _insert_slot(self, pkg, pkg_info, myinslotlist):
+ """Adds slot info to the message
+
+ @returns addl: formatted slot info
+ @returns myoldbest: installed version list
+ Modifies self.counters.downgrades, self.counters.upgrades,
+ self.counters.binary
+ """
+ addl = " " + pkg_info.fetch_symbol
+ if not cpvequal(pkg.cpv,
+ best([pkg.cpv] + [x.cpv for x in myinslotlist])):
+ # Downgrade in slot
+ addl += turquoise("U")+blue("D")
+ if pkg_info.ordered:
+ self.counters.downgrades += 1
+ if pkg.type_name == "binary":
+ self.counters.binary += 1
+ else:
+ # Update in slot
+ addl += turquoise("U") + " "
+ if pkg_info.ordered:
+ self.counters.upgrades += 1
+ if pkg.type_name == "binary":
+ self.counters.binary += 1
+ return addl
+
+
+ def _new_slot(self, pkg, pkg_info):
+ """New slot, mark it new.
+
+ @returns addl: formatted slot info
+ @returns myoldbest: installed version list
+ Modifies self.counters.newslot, self.counters.binary
+ """
+ addl = " " + green("NS") + pkg_info.fetch_symbol + " "
+ if pkg_info.ordered:
+ self.counters.newslot += 1
+ if pkg.type_name == "binary":
+ self.counters.binary += 1
+ return addl
+
+
+ def print_messages(self, show_repos):
+ """Performs the actual output printing of the pre-formatted
+ messages
+
+ @param show_repos: bool.
+ """
+ for msg in self.print_msg:
+ if isinstance(msg, basestring):
+ writemsg_stdout("%s\n" % (msg,), noiselevel=-1)
+ continue
+ myprint, self.verboseadd, repoadd = msg
+ if self.verboseadd:
+ myprint += " " + self.verboseadd
+ if show_repos and repoadd:
+ myprint += " " + teal("[%s]" % repoadd)
+ writemsg_stdout("%s\n" % (myprint,), noiselevel=-1)
+ return
+
+
+ def print_blockers(self):
+ """Performs the actual output printing of the pre-formatted
+ blocker messages
+ """
+ for pkg in self.blockers:
+ writemsg_stdout("%s\n" % (pkg,), noiselevel=-1)
+ return
+
+
+ def print_verbose(self, show_repos):
+ """Prints the verbose output to std_out
+
+ @param show_repos: bool.
+ """
+ writemsg_stdout('\n%s\n' % (self.counters,), noiselevel=-1)
+ if show_repos:
+ # Use _unicode_decode() to force unicode format string so
+ # that RepoDisplay.__unicode__() is called in python2.
+ writemsg_stdout(_unicode_decode("%s") % (self.conf.repo_display,),
+ noiselevel=-1)
+ return
+
+
+ def print_changelog(self):
+ """Prints the changelog text to std_out
+ """
+ writemsg_stdout('\n', noiselevel=-1)
+ for revision, text in self.changelogs:
+ writemsg_stdout(bold('*'+revision) + '\n' + text,
+ noiselevel=-1)
+ return
+
+
+ def get_display_list(self, mylist):
+ """Determines the display list to process
+
+ @param mylist
+ @rtype list
+ Modifies self.counters.blocks, self.counters.blocks_satisfied,
+
+ """
+ unsatisfied_blockers = []
+ ordered_nodes = []
+ for pkg in mylist:
+ if isinstance(pkg, Blocker):
+ self.counters.blocks += 1
+ if pkg.satisfied:
+ ordered_nodes.append(pkg)
+ self.counters.blocks_satisfied += 1
+ else:
+ unsatisfied_blockers.append(pkg)
+ else:
+ ordered_nodes.append(pkg)
+ if self.conf.tree_display:
+ display_list = _tree_display(self.conf, ordered_nodes)
+ else:
+ display_list = [(pkg, 0, True) for pkg in ordered_nodes]
+ for pkg in unsatisfied_blockers:
+ display_list.append((pkg, 0, True))
+ return display_list
+
+
+ def set_pkg_info(self, pkg, ordered):
+ """Sets various pkg_info dictionary variables
+
+ @param pkg: _emerge.Package instance
+ @param ordered: bool
+ @rtype pkg_info dictionary
+ Modifies self.counters.restrict_fetch,
+ self.counters.restrict_fetch_satisfied
+ """
+ pkg_info = PkgInfo()
+ pkg_info.ordered = ordered
+ pkg_info.fetch_symbol = " "
+ pkg_info.operation = pkg.operation
+ pkg_info.merge = ordered and pkg_info.operation == "merge"
+ if not pkg_info.merge and pkg_info.operation == "merge":
+ pkg_info.operation = "nomerge"
+ pkg_info.built = pkg.type_name != "ebuild"
+ pkg_info.ebuild_path = None
+ pkg_info.repo_name = pkg.repo
+ if pkg.type_name == "ebuild":
+ pkg_info.ebuild_path = self.portdb.findname(
+ pkg.cpv, myrepo=pkg_info.repo_name)
+ if pkg_info.ebuild_path is None:
+ raise AssertionError(
+ "ebuild not found for '%s'" % pkg.cpv)
+ pkg_info.repo_path_real = os.path.dirname(os.path.dirname(
+ os.path.dirname(pkg_info.ebuild_path)))
+ else:
+ pkg_info.repo_path_real = \
+ self.portdb.getRepositoryPath(pkg.metadata["repository"])
+ pkg_info.use = list(self.conf.pkg_use_enabled(pkg))
+ if not pkg.built and pkg.operation == 'merge' and \
+ 'fetch' in pkg.metadata.restrict:
+ pkg_info.fetch_symbol = red("F")
+ if pkg_info.ordered:
+ self.counters.restrict_fetch += 1
+ if not self.portdb.getfetchsizes(pkg.cpv,
+ useflags=pkg_info.use, myrepo=pkg.repo):
+ pkg_info.fetch_symbol = green("f")
+ if pkg_info.ordered:
+ self.counters.restrict_fetch_satisfied += 1
+ return pkg_info
+
+
+ def do_changelog(self, pkg, pkg_info):
+ """Processes and adds the changelog text to the master text for output
+
+ @param pkg: _emerge.Package instance
+ @param pkg_info: dictionay
+ Modifies self.changelogs
+ """
+ inst_matches = self.vardb.match(pkg.slot_atom)
+ if inst_matches:
+ ebuild_path_cl = pkg_info.ebuild_path
+ if ebuild_path_cl is None:
+ # binary package
+ ebuild_path_cl = self.portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path_cl is not None:
+ self.changelogs.extend(_calc_changelog(
+ ebuild_path_cl, inst_matches[0], pkg.cpv))
+ return
+
+
+ def check_system_world(self, pkg):
+ """Checks for any occurances of the package in the system or world sets
+
+ @param pkg: _emerge.Package instance
+ @rtype system and world booleans
+ """
+ root_config = self.conf.roots[pkg.root]
+ system_set = root_config.sets["system"]
+ world_set = root_config.sets["selected"]
+ system = False
+ world = False
+ try:
+ system = system_set.findAtomForPackage(
+ pkg, modified_use=self.conf.pkg_use_enabled(pkg))
+ world = world_set.findAtomForPackage(
+ pkg, modified_use=self.conf.pkg_use_enabled(pkg))
+ if not (self.conf.oneshot or world) and \
+ pkg.root == self.conf.target_root and \
+ self.conf.favorites.findAtomForPackage(
+ pkg, modified_use=self.conf.pkg_use_enabled(pkg)
+ ):
+ # Maybe it will be added to world now.
+ if create_world_atom(pkg, self.conf.favorites, root_config):
+ world = True
+ except InvalidDependString:
+ # This is reported elsewhere if relevant.
+ pass
+ return system, world
+
+
+ @staticmethod
+ def get_ver_str(pkg):
+ """Obtains the version string
+ @param pkg: _emerge.Package instance
+ @rtype string
+ """
+ ver_str = list(catpkgsplit(pkg.cpv)[2:])
+ if ver_str[1] == "r0":
+ ver_str[1] = ""
+ else:
+ ver_str[1] = "-" + ver_str[1]
+ return ver_str[0]+ver_str[1]
+
+
+ def _get_installed_best(self, pkg, pkg_info):
+ """ we need to use "--emptrytree" testing here rather than
+ "empty" param testing because "empty"
+ param is used for -u, where you still *do* want to see when
+ something is being upgraded.
+
+ @param pkg: _emerge.Package instance
+ @param pkg_info: dictionay
+ @rtype addl, myoldbest: list, myinslotlist: list
+ Modifies self.counters.reinst, self.counters.binary, self.counters.new
+
+ """
+ myoldbest = []
+ myinslotlist = None
+ installed_versions = self.vardb.match_pkgs(pkg.cp)
+ if self.vardb.cpv_exists(pkg.cpv):
+ addl = " "+yellow("R")+pkg_info.fetch_symbol+" "
+ if pkg_info.ordered:
+ if pkg_info.merge:
+ self.counters.reinst += 1
+ if pkg.type_name == "binary":
+ self.counters.binary += 1
+ elif pkg_info.operation == "uninstall":
+ self.counters.uninst += 1
+ # filter out old-style virtual matches
+ elif installed_versions and \
+ installed_versions[0].cp == pkg.cp:
+ myinslotlist = self.vardb.match_pkgs(pkg.slot_atom)
+ # If this is the first install of a new-style virtual, we
+ # need to filter out old-style virtual matches.
+ if myinslotlist and \
+ myinslotlist[0].cp != pkg.cp:
+ myinslotlist = None
+ if myinslotlist:
+ myoldbest = myinslotlist[:]
+ addl = self._insert_slot(pkg, pkg_info, myinslotlist)
+ else:
+ myoldbest = installed_versions
+ addl = self._new_slot(pkg, pkg_info)
+ if self.conf.changelog:
+ self.do_changelog(pkg, pkg_info)
+ else:
+ addl = " " + green("N") + " " + pkg_info.fetch_symbol + " "
+ if pkg_info.ordered:
+ self.counters.new += 1
+ if pkg.type_name == "binary":
+ self.counters.binary += 1
+ return addl, myoldbest, myinslotlist
+
+
+ def __call__(self, depgraph, mylist, favorites=None, verbosity=None):
+ """The main operation to format and display the resolver output.
+
+ @param depgraph: dependency grah
+ @param mylist: list of packages being processed
+ @param favorites: list, defaults to []
+ @param verbosity: verbose level, defaults to None
+ Modifies self.conf, self.myfetchlist, self.portdb, self.vardb,
+ self.pkgsettings, self.verboseadd, self.oldlp, self.newlp,
+ self.print_msg,
+ """
+ if favorites is None:
+ favorites = []
+ self.conf = _DisplayConfig(depgraph, mylist, favorites, verbosity)
+ mylist = self.get_display_list(self.conf.mylist)
+ # files to fetch list - avoids counting a same file twice
+ # in size display (verbose mode)
+ self.myfetchlist = []
+ # Use this set to detect when all the "repoadd" strings are "[0]"
+ # and disable the entire repo display in this case.
+ repoadd_set = set()
+
+ for mylist_index in range(len(mylist)):
+ pkg, depth, ordered = mylist[mylist_index]
+ self.portdb = self.conf.trees[pkg.root]["porttree"].dbapi
+ self.vardb = self.conf.trees[pkg.root]["vartree"].dbapi
+ self.pkgsettings = self.conf.pkgsettings[pkg.root]
+ self.indent = " " * depth
+
+ if isinstance(pkg, Blocker):
+ if self._blockers(pkg, fetch_symbol=" "):
+ continue
+ else:
+ pkg_info = self.set_pkg_info(pkg, ordered)
+ addl, pkg_info.oldbest, myinslotlist = \
+ self._get_installed_best(pkg, pkg_info)
+ self.verboseadd = ""
+ self.repoadd = None
+ self._display_use(pkg, pkg_info.oldbest, myinslotlist)
+ self.recheck_hidden(pkg)
+ if self.conf.verbosity == 3:
+ self.verbose_size(pkg, repoadd_set, pkg_info)
+
+ pkg_info.cp = pkg.cp
+ pkg_info.ver = self.get_ver_str(pkg)
+
+ self.oldlp = self.conf.columnwidth - 30
+ self.newlp = self.oldlp - 30
+ pkg_info.oldbest = self.convert_myoldbest(pkg_info.oldbest)
+ pkg_info.system, pkg_info.world = \
+ self.check_system_world(pkg)
+ addl = self.set_interactive(pkg, pkg_info.ordered, addl)
+
+ if self.include_mask_str():
+ addl += self.gen_mask_str(pkg)
+
+ if pkg.root != "/":
+ if pkg_info.oldbest:
+ pkg_info.oldbest += " "
+ if self.conf.columns:
+ myprint = self._set_non_root_columns(
+ addl, pkg_info, pkg)
+ else:
+ if not pkg_info.merge:
+ addl = self.empty_space_in_brackets()
+ myprint = "[%s%s] " % (
+ self.pkgprint(pkg_info.operation.ljust(13),
+ pkg_info), addl,
+ )
+ else:
+ myprint = "[%s %s] " % (
+ self.pkgprint(pkg.type_name, pkg_info), addl)
+ myprint += self.indent + \
+ self.pkgprint(pkg.cpv, pkg_info) + " " + \
+ pkg_info.oldbest + darkgreen("to " + pkg.root)
+ else:
+ if self.conf.columns:
+ myprint = self._set_root_columns(
+ addl, pkg_info, pkg)
+ else:
+ myprint = self._set_no_columns(
+ pkg, pkg_info, addl)
+
+ if self.conf.columns and pkg.operation == "uninstall":
+ continue
+ self.print_msg.append((myprint, self.verboseadd, self.repoadd))
+
+ if not self.conf.tree_display \
+ and not self.conf.no_restart \
+ and pkg.root == self.conf.running_root.root \
+ and match_from_list(PORTAGE_PACKAGE_ATOM, [pkg]) \
+ and not self.conf.quiet:
+
+ if not self.vardb.cpv_exists(pkg.cpv) or \
+ '9999' in pkg.cpv or \
+ 'git' in pkg.inherited or \
+ 'git-2' in pkg.inherited:
+ if mylist_index < len(mylist) - 1:
+ self.print_msg.append(
+ colorize(
+ "WARN", "*** Portage will stop merging "
+ "at this point and reload itself,"
+ )
+ )
+ self.print_msg.append(
+ colorize("WARN", " then resume the merge.")
+ )
+
+ show_repos = repoadd_set and repoadd_set != set(["0"])
+
+ # now finally print out the messages
+ self.print_messages(show_repos)
+ self.print_blockers()
+ if self.conf.verbosity == 3:
+ self.print_verbose(show_repos)
+ if self.conf.changelog:
+ self.print_changelog()
+
+ return os.EX_OK
diff --git a/portage_with_autodep/pym/_emerge/resolver/output_helpers.py b/portage_with_autodep/pym/_emerge/resolver/output_helpers.py
new file mode 100644
index 0000000..b7e7376
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/output_helpers.py
@@ -0,0 +1,576 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Contains private support functions for the Display class
+in output.py
+"""
+__all__ = (
+ )
+
+import io
+import re
+import sys
+
+from portage import os
+from portage import _encodings, _unicode_encode
+from portage._sets.base import InternalPackageSet
+from portage.output import blue, colorize, create_color_func, green, red, \
+ teal, yellow
+bad = create_color_func("BAD")
+from portage.util import writemsg
+from portage.versions import catpkgsplit
+
+from _emerge.Blocker import Blocker
+from _emerge.Package import Package
+
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+
+class _RepoDisplay(object):
+ def __init__(self, roots):
+ self._shown_repos = {}
+ self._unknown_repo = False
+ repo_paths = set()
+ for root_config in roots.values():
+ portdir = root_config.settings.get("PORTDIR")
+ if portdir:
+ repo_paths.add(portdir)
+ overlays = root_config.settings.get("PORTDIR_OVERLAY")
+ if overlays:
+ repo_paths.update(overlays.split())
+ repo_paths = list(repo_paths)
+ self._repo_paths = repo_paths
+ self._repo_paths_real = [ os.path.realpath(repo_path) \
+ for repo_path in repo_paths ]
+
+ # pre-allocate index for PORTDIR so that it always has index 0.
+ for root_config in roots.values():
+ portdb = root_config.trees["porttree"].dbapi
+ portdir = portdb.porttree_root
+ if portdir:
+ self.repoStr(portdir)
+
+ def repoStr(self, repo_path_real):
+ real_index = -1
+ if repo_path_real:
+ real_index = self._repo_paths_real.index(repo_path_real)
+ if real_index == -1:
+ s = "?"
+ self._unknown_repo = True
+ else:
+ shown_repos = self._shown_repos
+ repo_paths = self._repo_paths
+ repo_path = repo_paths[real_index]
+ index = shown_repos.get(repo_path)
+ if index is None:
+ index = len(shown_repos)
+ shown_repos[repo_path] = index
+ s = str(index)
+ return s
+
+ def __str__(self):
+ output = []
+ shown_repos = self._shown_repos
+ unknown_repo = self._unknown_repo
+ if shown_repos or self._unknown_repo:
+ output.append("Portage tree and overlays:\n")
+ show_repo_paths = list(shown_repos)
+ for repo_path, repo_index in shown_repos.items():
+ show_repo_paths[repo_index] = repo_path
+ if show_repo_paths:
+ for index, repo_path in enumerate(show_repo_paths):
+ output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
+ if unknown_repo:
+ output.append(" "+teal("[?]") + \
+ " indicates that the source repository could not be determined\n")
+ return "".join(output)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+
+class _PackageCounters(object):
+
+ def __init__(self):
+ self.upgrades = 0
+ self.downgrades = 0
+ self.new = 0
+ self.newslot = 0
+ self.reinst = 0
+ self.uninst = 0
+ self.blocks = 0
+ self.blocks_satisfied = 0
+ self.totalsize = 0
+ self.restrict_fetch = 0
+ self.restrict_fetch_satisfied = 0
+ self.interactive = 0
+ self.binary = 0
+
+ def __str__(self):
+ total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
+ myoutput = []
+ details = []
+ myoutput.append("Total: %s package" % total_installs)
+ if total_installs != 1:
+ myoutput.append("s")
+ if total_installs != 0:
+ myoutput.append(" (")
+ if self.upgrades > 0:
+ details.append("%s upgrade" % self.upgrades)
+ if self.upgrades > 1:
+ details[-1] += "s"
+ if self.downgrades > 0:
+ details.append("%s downgrade" % self.downgrades)
+ if self.downgrades > 1:
+ details[-1] += "s"
+ if self.new > 0:
+ details.append("%s new" % self.new)
+ if self.newslot > 0:
+ details.append("%s in new slot" % self.newslot)
+ if self.newslot > 1:
+ details[-1] += "s"
+ if self.reinst > 0:
+ details.append("%s reinstall" % self.reinst)
+ if self.reinst > 1:
+ details[-1] += "s"
+ if self.binary > 0:
+ details.append("%s binary" % self.binary)
+ if self.binary > 1:
+ details[-1] = details[-1][:-1] + "ies"
+ if self.uninst > 0:
+ details.append("%s uninstall" % self.uninst)
+ if self.uninst > 1:
+ details[-1] += "s"
+ if self.interactive > 0:
+ details.append("%s %s" % (self.interactive,
+ colorize("WARN", "interactive")))
+ myoutput.append(", ".join(details))
+ if total_installs != 0:
+ myoutput.append(")")
+ myoutput.append(", Size of downloads: %s" % _format_size(self.totalsize))
+ if self.restrict_fetch:
+ myoutput.append("\nFetch Restriction: %s package" % \
+ self.restrict_fetch)
+ if self.restrict_fetch > 1:
+ myoutput.append("s")
+ if self.restrict_fetch_satisfied < self.restrict_fetch:
+ myoutput.append(bad(" (%s unsatisfied)") % \
+ (self.restrict_fetch - self.restrict_fetch_satisfied))
+ if self.blocks > 0:
+ myoutput.append("\nConflict: %s block" % \
+ self.blocks)
+ if self.blocks > 1:
+ myoutput.append("s")
+ if self.blocks_satisfied < self.blocks:
+ myoutput.append(bad(" (%s unsatisfied)") % \
+ (self.blocks - self.blocks_satisfied))
+ return "".join(myoutput)
+
+
+class _DisplayConfig(object):
+
+ def __init__(self, depgraph, mylist, favorites, verbosity):
+ frozen_config = depgraph._frozen_config
+ dynamic_config = depgraph._dynamic_config
+
+ self.mylist = mylist
+ self.favorites = InternalPackageSet(favorites, allow_repo=True)
+ self.verbosity = verbosity
+
+ if self.verbosity is None:
+ self.verbosity = ("--quiet" in frozen_config.myopts and 1 or \
+ "--verbose" in frozen_config.myopts and 3 or 2)
+
+ self.oneshot = "--oneshot" in frozen_config.myopts or \
+ "--onlydeps" in frozen_config.myopts
+ self.columns = "--columns" in frozen_config.myopts
+ self.tree_display = "--tree" in frozen_config.myopts
+ self.alphabetical = "--alphabetical" in frozen_config.myopts
+ self.quiet = "--quiet" in frozen_config.myopts
+ self.all_flags = self.verbosity == 3 or self.quiet
+ self.print_use_string = self.verbosity != 1 or "--verbose" in frozen_config.myopts
+ self.changelog = "--changelog" in frozen_config.myopts
+ self.edebug = frozen_config.edebug
+ self.no_restart = frozen_config._opts_no_restart.intersection(frozen_config.myopts)
+ self.unordered_display = "--unordered-display" in frozen_config.myopts
+
+ mywidth = 130
+ if "COLUMNWIDTH" in frozen_config.settings:
+ try:
+ mywidth = int(frozen_config.settings["COLUMNWIDTH"])
+ except ValueError as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg("!!! Unable to parse COLUMNWIDTH='%s'\n" % \
+ frozen_config.settings["COLUMNWIDTH"], noiselevel=-1)
+ del e
+ self.columnwidth = mywidth
+
+ self.repo_display = _RepoDisplay(frozen_config.roots)
+ self.trees = frozen_config.trees
+ self.pkgsettings = frozen_config.pkgsettings
+ self.target_root = frozen_config.target_root
+ self.running_root = frozen_config._running_root
+ self.roots = frozen_config.roots
+
+ self.blocker_parents = dynamic_config._blocker_parents
+ self.reinstall_nodes = dynamic_config._reinstall_nodes
+ self.digraph = dynamic_config.digraph
+ self.blocker_uninstalls = dynamic_config._blocker_uninstalls
+ self.slot_pkg_map = dynamic_config._slot_pkg_map
+ self.set_nodes = dynamic_config._set_nodes
+
+ self.pkg_use_enabled = depgraph._pkg_use_enabled
+ self.pkg = depgraph._pkg
+
+
+# formats a size given in bytes nicely
+def _format_size(mysize):
+ if isinstance(mysize, basestring):
+ return mysize
+ if 0 != mysize % 1024:
+ # Always round up to the next kB so that it doesn't show 0 kB when
+ # some small file still needs to be fetched.
+ mysize += 1024 - mysize % 1024
+ mystr=str(mysize//1024)
+ mycount=len(mystr)
+ while (mycount > 3):
+ mycount-=3
+ mystr=mystr[:mycount]+","+mystr[mycount:]
+ return mystr+" kB"
+
+
+def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
+ old_iuse, old_use,
+ is_new, reinst_flags):
+
+ if not conf.print_use_string:
+ return ""
+
+ enabled = []
+ if conf.alphabetical:
+ disabled = enabled
+ removed = enabled
+ else:
+ disabled = []
+ removed = []
+ cur_iuse = set(cur_iuse)
+ enabled_flags = cur_iuse.intersection(cur_use)
+ removed_iuse = set(old_iuse).difference(cur_iuse)
+ any_iuse = cur_iuse.union(old_iuse)
+ any_iuse = list(any_iuse)
+ any_iuse.sort()
+ for flag in any_iuse:
+ flag_str = None
+ isEnabled = False
+ reinst_flag = reinst_flags and flag in reinst_flags
+ if flag in enabled_flags:
+ isEnabled = True
+ if is_new or flag in old_use and \
+ (conf.all_flags or reinst_flag):
+ flag_str = red(flag)
+ elif flag not in old_iuse:
+ flag_str = yellow(flag) + "%*"
+ elif flag not in old_use:
+ flag_str = green(flag) + "*"
+ elif flag in removed_iuse:
+ if conf.all_flags or reinst_flag:
+ flag_str = yellow("-" + flag) + "%"
+ if flag in old_use:
+ flag_str += "*"
+ flag_str = "(" + flag_str + ")"
+ removed.append(flag_str)
+ continue
+ else:
+ if is_new or flag in old_iuse and \
+ flag not in old_use and \
+ (conf.all_flags or reinst_flag):
+ flag_str = blue("-" + flag)
+ elif flag not in old_iuse:
+ flag_str = yellow("-" + flag)
+ if flag not in iuse_forced:
+ flag_str += "%"
+ elif flag in old_use:
+ flag_str = green("-" + flag) + "*"
+ if flag_str:
+ if flag in iuse_forced:
+ flag_str = "(" + flag_str + ")"
+ if isEnabled:
+ enabled.append(flag_str)
+ else:
+ disabled.append(flag_str)
+
+ if conf.alphabetical:
+ ret = " ".join(enabled)
+ else:
+ ret = " ".join(enabled + disabled + removed)
+ if ret:
+ ret = '%s="%s" ' % (name, ret)
+ return ret
+
+
+def _tree_display(conf, mylist):
+
+ # If there are any Uninstall instances, add the
+ # corresponding blockers to the digraph.
+ mygraph = conf.digraph.copy()
+
+ executed_uninstalls = set(node for node in mylist \
+ if isinstance(node, Package) and node.operation == "unmerge")
+
+ for uninstall in conf.blocker_uninstalls.leaf_nodes():
+ uninstall_parents = \
+ conf.blocker_uninstalls.parent_nodes(uninstall)
+ if not uninstall_parents:
+ continue
+
+ # Remove the corresponding "nomerge" node and substitute
+ # the Uninstall node.
+ inst_pkg = conf.pkg(uninstall.cpv, "installed",
+ uninstall.root_config, installed=True)
+
+ try:
+ mygraph.remove(inst_pkg)
+ except KeyError:
+ pass
+
+ try:
+ inst_pkg_blockers = conf.blocker_parents.child_nodes(inst_pkg)
+ except KeyError:
+ inst_pkg_blockers = []
+
+ # Break the Package -> Uninstall edges.
+ mygraph.remove(uninstall)
+
+ # Resolution of a package's blockers
+ # depend on it's own uninstallation.
+ for blocker in inst_pkg_blockers:
+ mygraph.add(uninstall, blocker)
+
+ # Expand Package -> Uninstall edges into
+ # Package -> Blocker -> Uninstall edges.
+ for blocker in uninstall_parents:
+ mygraph.add(uninstall, blocker)
+ for parent in conf.blocker_parents.parent_nodes(blocker):
+ if parent != inst_pkg:
+ mygraph.add(blocker, parent)
+
+ # If the uninstall task did not need to be executed because
+ # of an upgrade, display Blocker -> Upgrade edges since the
+ # corresponding Blocker -> Uninstall edges will not be shown.
+ upgrade_node = \
+ conf.slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
+ if upgrade_node is not None and \
+ uninstall not in executed_uninstalls:
+ for blocker in uninstall_parents:
+ mygraph.add(upgrade_node, blocker)
+
+ if conf.unordered_display:
+ display_list = _unordered_tree_display(mygraph, mylist)
+ else:
+ display_list = _ordered_tree_display(conf, mygraph, mylist)
+
+ _prune_tree_display(display_list)
+
+ return display_list
+
+
+def _unordered_tree_display(mygraph, mylist):
+ display_list = []
+ seen_nodes = set()
+
+ def print_node(node, depth):
+
+ if node in seen_nodes:
+ pass
+ else:
+ seen_nodes.add(node)
+
+ if isinstance(node, (Blocker, Package)):
+ display_list.append((node, depth, True))
+ else:
+ depth = -1
+
+ for child_node in mygraph.child_nodes(node):
+ print_node(child_node, depth + 1)
+
+ for root_node in mygraph.root_nodes():
+ print_node(root_node, 0)
+
+ return display_list
+
+
+def _ordered_tree_display(conf, mygraph, mylist):
+ depth = 0
+ shown_edges = set()
+ tree_nodes = []
+ display_list = []
+
+ for x in mylist:
+ depth = len(tree_nodes)
+ while depth and x not in \
+ mygraph.child_nodes(tree_nodes[depth-1]):
+ depth -= 1
+ if depth:
+ tree_nodes = tree_nodes[:depth]
+ tree_nodes.append(x)
+ display_list.append((x, depth, True))
+ shown_edges.add((x, tree_nodes[depth-1]))
+ else:
+ traversed_nodes = set() # prevent endless circles
+ traversed_nodes.add(x)
+ def add_parents(current_node, ordered):
+ parent_nodes = None
+ # Do not traverse to parents if this node is an
+ # an argument or a direct member of a set that has
+ # been specified as an argument (system or world).
+ if current_node not in conf.set_nodes:
+ parent_nodes = mygraph.parent_nodes(current_node)
+ if parent_nodes:
+ child_nodes = set(mygraph.child_nodes(current_node))
+ selected_parent = None
+ # First, try to avoid a direct cycle.
+ for node in parent_nodes:
+ if not isinstance(node, (Blocker, Package)):
+ continue
+ if node not in traversed_nodes and \
+ node not in child_nodes:
+ edge = (current_node, node)
+ if edge in shown_edges:
+ continue
+ selected_parent = node
+ break
+ if not selected_parent:
+ # A direct cycle is unavoidable.
+ for node in parent_nodes:
+ if not isinstance(node, (Blocker, Package)):
+ continue
+ if node not in traversed_nodes:
+ edge = (current_node, node)
+ if edge in shown_edges:
+ continue
+ selected_parent = node
+ break
+ if selected_parent:
+ shown_edges.add((current_node, selected_parent))
+ traversed_nodes.add(selected_parent)
+ add_parents(selected_parent, False)
+ display_list.append((current_node,
+ len(tree_nodes), ordered))
+ tree_nodes.append(current_node)
+ tree_nodes = []
+ add_parents(x, True)
+
+ return display_list
+
+
+def _prune_tree_display(display_list):
+ last_merge_depth = 0
+ for i in range(len(display_list) - 1, -1, -1):
+ node, depth, ordered = display_list[i]
+ if not ordered and depth == 0 and i > 0 \
+ and node == display_list[i-1][0] and \
+ display_list[i-1][1] == 0:
+ # An ordered node got a consecutive duplicate
+ # when the tree was being filled in.
+ del display_list[i]
+ continue
+ if ordered and isinstance(node, Package) \
+ and node.operation in ('merge', 'uninstall'):
+ last_merge_depth = depth
+ continue
+ if depth >= last_merge_depth or \
+ i < len(display_list) - 1 and \
+ depth >= display_list[i+1][1]:
+ del display_list[i]
+
+
+def _calc_changelog(ebuildpath,current,next):
+ if ebuildpath == None or not os.path.exists(ebuildpath):
+ return []
+ current = '-'.join(catpkgsplit(current)[1:])
+ if current.endswith('-r0'):
+ current = current[:-3]
+ next = '-'.join(catpkgsplit(next)[1:])
+ if next.endswith('-r0'):
+ next = next[:-3]
+ changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
+ try:
+ changelog = io.open(_unicode_encode(changelogpath,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ).read()
+ except SystemExit:
+ raise # Needed else can't exit
+ except:
+ return []
+ divisions = _find_changelog_tags(changelog)
+ #print 'XX from',current,'to',next
+ #for div,text in divisions: print 'XX',div
+ # skip entries for all revisions above the one we are about to emerge
+ for i in range(len(divisions)):
+ if divisions[i][0]==next:
+ divisions = divisions[i:]
+ break
+ # find out how many entries we are going to display
+ for i in range(len(divisions)):
+ if divisions[i][0]==current:
+ divisions = divisions[:i]
+ break
+ else:
+ # couldnt find the current revision in the list. display nothing
+ return []
+ return divisions
+
+
+def _find_changelog_tags(changelog):
+ divs = []
+ release = None
+ while 1:
+ match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
+ if match is None:
+ if release is not None:
+ divs.append((release,changelog))
+ return divs
+ if release is not None:
+ divs.append((release,changelog[:match.start()]))
+ changelog = changelog[match.end():]
+ release = match.group(1)
+ if release.endswith('.ebuild'):
+ release = release[:-7]
+ if release.endswith('-r0'):
+ release = release[:-3]
+
+
+class PkgInfo(object):
+ """Simple class to hold instance attributes for current
+ information about the pkg being printed.
+ """
+
+ __slots__ = ("ordered", "fetch_symbol", "operation", "merge",
+ "built", "cp", "ebuild_path", "repo_name", "repo_path_real",
+ "world", "system", "use", "oldbest", "ver"
+ )
+
+
+ def __init__(self):
+ self.built = False
+ self.cp = ''
+ self.ebuild_path = ''
+ self.fetch_symbol = ''
+ self.merge = ''
+ self.oldbest = ''
+ self.operation = ''
+ self.ordered = False
+ self.repo_path_real = ''
+ self.repo_name = ''
+ self.system = False
+ self.use = ''
+ self.ver = ''
+ self.world = False
diff --git a/portage_with_autodep/pym/_emerge/resolver/slot_collision.py b/portage_with_autodep/pym/_emerge/resolver/slot_collision.py
new file mode 100644
index 0000000..0df8f20
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/slot_collision.py
@@ -0,0 +1,978 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+
+from _emerge.AtomArg import AtomArg
+from _emerge.Package import Package
+from _emerge.PackageArg import PackageArg
+from portage.dep import check_required_use
+from portage.output import colorize
+from portage._sets.base import InternalPackageSet
+from portage.util import writemsg
+from portage.versions import cpv_getversion, vercmp
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class slot_conflict_handler(object):
+ """This class keeps track of all slot conflicts and provides
+ an interface to get possible solutions.
+
+ How it works:
+ If two packages have been pulled into a slot, one needs to
+ go away. This class focuses on cases where this can be achieved
+ with a change in USE settings.
+
+ 1) Find out if what causes a given slot conflict. There are
+ three possibilities:
+
+ a) One parent needs foo-1:0 and another one needs foo-2:0,
+ nothing we can do about this. This is called a 'version
+ based conflict'.
+
+ b) All parents of one of the conflict packages could use
+ another conflict package. This is called an 'unspecific
+ conflict'. This should be caught by the backtracking logic.
+ Ask the user to enable -uN (if not already enabled). If -uN is
+ enabled, this case is treated in the same way as c).
+
+ c) Neither a 'version based conflict' nor an 'unspecific
+ conflict'. Ignoring use deps would result result in an
+ 'unspecific conflict'. This is called a 'specific conflict'.
+ This is the only conflict we try to find suggestions for.
+
+ 2) Computing suggestions.
+
+ Def.: "configuration": A list of packages, containing exactly one
+ package from each slot conflict.
+
+ We try to find USE changes such that all parents of conflict packages
+ can work with a package in the configuration we're looking at. This
+ is done for all possible configurations, except if the 'all-ebuild'
+ configuration has a suggestion. In this case we immediately abort the
+ search.
+ For the current configuration, all use flags that are part of violated
+ use deps are computed. This is done for every slot conflict on its own.
+
+ Def.: "solution (candidate)": An assignment of "enabled" / "disabled"
+ values for the use flags that are part of violated use deps.
+
+ Now all involved use flags for the current configuration are known. For
+ now they have an undetermined value. Fix their value in the
+ following cases:
+ * The use dep in the parent atom is unconditional.
+ * The parent package is 'installed'.
+ * The conflict package is 'installed'.
+
+ USE of 'installed' packages can't be changed. This always requires an
+ non-installed package.
+
+ During this procedure, contradictions may occur. In this case the
+ configuration has no solution.
+
+ Now generate all possible solution candidates with fixed values. Check
+ if they don't introduce new conflicts.
+
+ We have found a valid assignment for all involved use flags. Compute
+ the needed USE changes and prepare the message for the user.
+ """
+
+ def __init__(self, depgraph):
+ self.depgraph = depgraph
+ self.myopts = depgraph._frozen_config.myopts
+ self.debug = "--debug" in self.myopts
+ if self.debug:
+ writemsg("Starting slot conflict handler\n", noiselevel=-1)
+ #slot_collision_info is a dict mapping (slot atom, root) to set
+ #of packages. The packages in the set all belong to the same
+ #slot.
+ self.slot_collision_info = depgraph._dynamic_config._slot_collision_info
+
+ #A dict mapping packages to pairs of parent package
+ #and parent atom
+ self.all_parents = depgraph._dynamic_config._parent_atoms
+
+ #set containing all nodes that are part of a slot conflict
+ conflict_nodes = set()
+
+ #a list containing list of packages that form a slot conflict
+ conflict_pkgs = []
+
+ #a list containing sets of (parent, atom) pairs that have pulled packages
+ #into the same slot
+ all_conflict_atoms_by_slotatom = []
+
+ #fill conflict_pkgs, all_conflict_atoms_by_slotatom
+ for (atom, root), pkgs \
+ in self.slot_collision_info.items():
+ conflict_pkgs.append(list(pkgs))
+ all_conflict_atoms_by_slotatom.append(set())
+
+ for pkg in pkgs:
+ conflict_nodes.add(pkg)
+ for ppkg, atom in self.all_parents.get(pkg):
+ all_conflict_atoms_by_slotatom[-1].add((ppkg, atom))
+
+ #Variable that holds the non-explanation part of the message.
+ self.conflict_msg = []
+ #If any conflict package was pulled in only by unspecific atoms, then
+ #the user forgot to enable --newuse and/or --update.
+ self.conflict_is_unspecific = False
+
+ #Indicate if the conflict is caused by incompatible version requirements
+ #cat/pkg-2 pulled in, but a parent requires <cat/pkg-2
+ self.is_a_version_conflict = False
+
+ self._prepare_conflict_msg_and_check_for_specificity()
+
+ #a list of dicts that hold the needed USE values to solve all conflicts
+ self.solutions = []
+
+ #a list of dicts that hold the needed USE changes to solve all conflicts
+ self.changes = []
+
+ #configuration = a list of packages with exactly one package from every
+ #single slot conflict
+ config_gen = _configuration_generator(conflict_pkgs)
+ first_config = True
+
+ #go through all configurations and collect solutions
+ while(True):
+ config = config_gen.get_configuration()
+ if not config:
+ break
+
+ if self.debug:
+ writemsg("\nNew configuration:\n", noiselevel=-1)
+ for pkg in config:
+ writemsg(" " + str(pkg) + "\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ new_solutions = self._check_configuration(config, all_conflict_atoms_by_slotatom, conflict_nodes)
+
+ if new_solutions:
+ self.solutions.extend(new_solutions)
+
+ if first_config:
+ #If the "all ebuild"-config gives a solution, use it.
+ #Otherwise enumerate all other solutions.
+ if self.debug:
+ writemsg("All-ebuild configuration has a solution. Aborting search.\n", noiselevel=-1)
+ break
+ first_config = False
+
+ if len(conflict_pkgs) > 4:
+ # The number of configurations to check grows exponentially in the number of conflict_pkgs.
+ # To prevent excessive running times, only check the "all-ebuild" configuration,
+ # if the number of conflict packages is too large.
+ if self.debug:
+ writemsg("\nAborting search due to excessive number of configurations.\n", noiselevel=-1)
+ break
+
+ for solution in self.solutions:
+ self._add_change(self._get_change(solution))
+
+
+ def get_conflict(self):
+ return "".join(self.conflict_msg)
+
+ def _is_subset(self, change1, change2):
+ """
+ Checks if a set of changes 'change1' is a subset of the changes 'change2'.
+ """
+ #All pkgs of change1 have to be in change2.
+ #For every package in change1, the changes have to be a subset of
+ #the corresponding changes in change2.
+ for pkg in change1:
+ if pkg not in change2:
+ return False
+
+ for flag in change1[pkg]:
+ if flag not in change2[pkg]:
+ return False
+ if change1[pkg][flag] != change2[pkg][flag]:
+ return False
+ return True
+
+ def _add_change(self, new_change):
+ """
+ Make sure to keep only minimal changes. If "+foo", does the job, discard "+foo -bar".
+ """
+ changes = self.changes
+ #Make sure there is no other solution that is a subset of the new solution.
+ ignore = False
+ to_be_removed = []
+ for change in changes:
+ if self._is_subset(change, new_change):
+ ignore = True
+ break
+ elif self._is_subset(new_change, change):
+ to_be_removed.append(change)
+
+ if not ignore:
+ #Discard all existing change that are a superset of the new change.
+ for obsolete_change in to_be_removed:
+ changes.remove(obsolete_change)
+ changes.append(new_change)
+
+ def _get_change(self, solution):
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+ new_change = {}
+ for pkg in solution:
+ for flag, state in solution[pkg].items():
+ if state == "enabled" and flag not in _pkg_use_enabled(pkg):
+ new_change.setdefault(pkg, {})[flag] = True
+ elif state == "disabled" and flag in _pkg_use_enabled(pkg):
+ new_change.setdefault(pkg, {})[flag] = False
+ return new_change
+
+ def _prepare_conflict_msg_and_check_for_specificity(self):
+ """
+ Print all slot conflicts in a human readable way.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+ msg = self.conflict_msg
+ indent = " "
+ msg.append("\n!!! Multiple package instances within a single " + \
+ "package slot have been pulled\n")
+ msg.append("!!! into the dependency graph, resulting" + \
+ " in a slot conflict:\n\n")
+
+ for (slot_atom, root), pkgs \
+ in self.slot_collision_info.items():
+ msg.append(str(slot_atom))
+ if root != '/':
+ msg.append(" for %s" % (root,))
+ msg.append("\n\n")
+
+ for pkg in pkgs:
+ msg.append(indent)
+ msg.append(str(pkg))
+ parent_atoms = self.all_parents.get(pkg)
+ if parent_atoms:
+ #Create a list of collision reasons and map them to sets
+ #of atoms.
+ #Possible reasons:
+ # ("version", "ge") for operator >=, >
+ # ("version", "eq") for operator =, ~
+ # ("version", "le") for operator <=, <
+ # ("use", "<some use flag>") for unmet use conditionals
+ collision_reasons = {}
+ num_all_specific_atoms = 0
+
+ for ppkg, atom in parent_atoms:
+ atom_set = InternalPackageSet(initial_atoms=(atom,))
+ atom_without_use_set = InternalPackageSet(initial_atoms=(atom.without_use,))
+
+ for other_pkg in pkgs:
+ if other_pkg == pkg:
+ continue
+
+ if not atom_without_use_set.findAtomForPackage(other_pkg, \
+ modified_use=_pkg_use_enabled(other_pkg)):
+ #The version range does not match.
+ sub_type = None
+ if atom.operator in (">=", ">"):
+ sub_type = "ge"
+ elif atom.operator in ("=", "~"):
+ sub_type = "eq"
+ elif atom.operator in ("<=", "<"):
+ sub_type = "le"
+
+ atoms = collision_reasons.get(("version", sub_type), set())
+ atoms.add((ppkg, atom, other_pkg))
+ num_all_specific_atoms += 1
+ collision_reasons[("version", sub_type)] = atoms
+ elif not atom_set.findAtomForPackage(other_pkg, \
+ modified_use=_pkg_use_enabled(other_pkg)):
+ missing_iuse = other_pkg.iuse.get_missing_iuse(
+ atom.unevaluated_atom.use.required)
+ if missing_iuse:
+ for flag in missing_iuse:
+ atoms = collision_reasons.get(("use", flag), set())
+ atoms.add((ppkg, atom, other_pkg))
+ collision_reasons[("use", flag)] = atoms
+ num_all_specific_atoms += 1
+ else:
+ #Use conditionals not met.
+ violated_atom = atom.violated_conditionals(_pkg_use_enabled(other_pkg), \
+ other_pkg.iuse.is_valid_flag)
+ for flag in violated_atom.use.enabled.union(violated_atom.use.disabled):
+ atoms = collision_reasons.get(("use", flag), set())
+ atoms.add((ppkg, atom, other_pkg))
+ collision_reasons[("use", flag)] = atoms
+ num_all_specific_atoms += 1
+
+ msg.append(" pulled in by\n")
+
+ selected_for_display = set()
+ unconditional_use_deps = set()
+
+ for (type, sub_type), parents in collision_reasons.items():
+ #From each (type, sub_type) pair select at least one atom.
+ #Try to select as few atoms as possible
+
+ if type == "version":
+ #Find the atom with version that is as far away as possible.
+ best_matches = {}
+ for ppkg, atom, other_pkg in parents:
+ if atom.cp in best_matches:
+ cmp = vercmp( \
+ cpv_getversion(atom.cpv), \
+ cpv_getversion(best_matches[atom.cp][1].cpv))
+
+ if (sub_type == "ge" and cmp > 0) \
+ or (sub_type == "le" and cmp < 0) \
+ or (sub_type == "eq" and cmp > 0):
+ best_matches[atom.cp] = (ppkg, atom)
+ else:
+ best_matches[atom.cp] = (ppkg, atom)
+ selected_for_display.update(best_matches.values())
+ elif type == "use":
+ #Prefer atoms with unconditional use deps over, because it's
+ #not possible to change them on the parent, which means there
+ #are fewer possible solutions.
+ use = sub_type
+ for ppkg, atom, other_pkg in parents:
+ missing_iuse = other_pkg.iuse.get_missing_iuse(
+ atom.unevaluated_atom.use.required)
+ if missing_iuse:
+ unconditional_use_deps.add((ppkg, atom))
+ else:
+ parent_use = None
+ if isinstance(ppkg, Package):
+ parent_use = _pkg_use_enabled(ppkg)
+ violated_atom = atom.unevaluated_atom.violated_conditionals(
+ _pkg_use_enabled(other_pkg),
+ other_pkg.iuse.is_valid_flag,
+ parent_use=parent_use)
+ # It's possible for autounmask to change
+ # parent_use such that the unevaluated form
+ # of the atom now matches, even though the
+ # earlier evaluated form (from before
+ # autounmask changed parent_use) does not.
+ # In this case (see bug #374423), it's
+ # expected that violated_atom.use is None.
+ # Since the atom now matches, we don't want
+ # to display it in the slot conflict
+ # message, so we simply ignore it and rely
+ # on the autounmask display to communicate
+ # the necessary USE change to the user.
+ if violated_atom.use is None:
+ continue
+ if use in violated_atom.use.enabled or \
+ use in violated_atom.use.disabled:
+ unconditional_use_deps.add((ppkg, atom))
+ # When USE flags are removed, it can be
+ # essential to see all broken reverse
+ # dependencies here, so don't omit any.
+ # If the list is long, people can simply
+ # use a pager.
+ selected_for_display.add((ppkg, atom))
+
+ def highlight_violations(atom, version, use=[]):
+ """Colorize parts of an atom"""
+ atom_str = str(atom)
+ if version:
+ op = atom.operator
+ ver = None
+ if atom.cp != atom.cpv:
+ ver = cpv_getversion(atom.cpv)
+ slot = atom.slot
+
+ if op == "=*":
+ op = "="
+ ver += "*"
+
+ if op is not None:
+ atom_str = atom_str.replace(op, colorize("BAD", op), 1)
+
+ if ver is not None:
+ start = atom_str.rfind(ver)
+ end = start + len(ver)
+ atom_str = atom_str[:start] + \
+ colorize("BAD", ver) + \
+ atom_str[end:]
+ if slot:
+ atom_str = atom_str.replace(":" + slot, colorize("BAD", ":" + slot))
+
+ if use and atom.use.tokens:
+ use_part_start = atom_str.find("[")
+ use_part_end = atom_str.find("]")
+
+ new_tokens = []
+ for token in atom.use.tokens:
+ if token.lstrip("-!").rstrip("=?") in use:
+ new_tokens.append(colorize("BAD", token))
+ else:
+ new_tokens.append(token)
+
+ atom_str = atom_str[:use_part_start] \
+ + "[%s]" % (",".join(new_tokens),) + \
+ atom_str[use_part_end+1:]
+
+ return atom_str
+
+ # Show unconditional use deps first, since those
+ # are more problematic than the conditional kind.
+ ordered_list = list(unconditional_use_deps)
+ if len(selected_for_display) > len(unconditional_use_deps):
+ for parent_atom in selected_for_display:
+ if parent_atom not in unconditional_use_deps:
+ ordered_list.append(parent_atom)
+ for parent_atom in ordered_list:
+ parent, atom = parent_atom
+ msg.append(2*indent)
+ if isinstance(parent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ msg.append(str(parent))
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ version_violated = False
+ use = []
+ for (type, sub_type), parents in collision_reasons.items():
+ for x in parents:
+ if parent == x[0] and atom == x[1]:
+ if type == "version":
+ version_violated = True
+ elif type == "use":
+ use.append(sub_type)
+ break
+
+ atom_str = highlight_violations(atom.unevaluated_atom, version_violated, use)
+
+ if version_violated:
+ self.is_a_version_conflict = True
+
+ msg.append("%s required by %s" % (atom_str, parent))
+ msg.append("\n")
+
+ if not selected_for_display:
+ msg.append(2*indent)
+ msg.append("(no parents that aren't satisfied by other packages in this slot)\n")
+ self.conflict_is_unspecific = True
+
+ omitted_parents = num_all_specific_atoms - len(selected_for_display)
+ if omitted_parents:
+ msg.append(2*indent)
+ if len(selected_for_display) > 1:
+ msg.append("(and %d more with the same problems)\n" % omitted_parents)
+ else:
+ msg.append("(and %d more with the same problem)\n" % omitted_parents)
+ else:
+ msg.append(" (no parents)\n")
+ msg.append("\n")
+ msg.append("\n")
+
+ def get_explanation(self):
+ msg = ""
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+
+ if self.is_a_version_conflict:
+ return None
+
+ if self.conflict_is_unspecific and \
+ not ("--newuse" in self.myopts and "--update" in self.myopts):
+ msg += "!!! Enabling --newuse and --update might solve this conflict.\n"
+ msg += "!!! If not, it might help emerge to give a more specific suggestion.\n\n"
+ return msg
+
+ solutions = self.solutions
+ if not solutions:
+ return None
+
+ if len(solutions)==1:
+ if len(self.slot_collision_info)==1:
+ msg += "It might be possible to solve this slot collision\n"
+ else:
+ msg += "It might be possible to solve these slot collisions\n"
+ msg += "by applying all of the following changes:\n"
+ else:
+ if len(self.slot_collision_info)==1:
+ msg += "It might be possible to solve this slot collision\n"
+ else:
+ msg += "It might be possible to solve these slot collisions\n"
+ msg += "by applying one of the following solutions:\n"
+
+ def print_change(change, indent=""):
+ mymsg = ""
+ for pkg in change:
+ changes = []
+ for flag, state in change[pkg].items():
+ if state:
+ changes.append(colorize("red", "+" + flag))
+ else:
+ changes.append(colorize("blue", "-" + flag))
+ mymsg += indent + "- " + pkg.cpv + " (Change USE: %s" % " ".join(changes) + ")\n"
+ mymsg += "\n"
+ return mymsg
+
+
+ if len(self.changes) == 1:
+ msg += print_change(self.changes[0], " ")
+ else:
+ for change in self.changes:
+ msg += " Solution: Apply all of:\n"
+ msg += print_change(change, " ")
+
+ return msg
+
+ def _check_configuration(self, config, all_conflict_atoms_by_slotatom, conflict_nodes):
+ """
+ Given a configuartion, required use changes are computed and checked to
+ make sure that no new conflict is introduced. Returns a solution or None.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+ #An installed package can only be part of a valid configuration if it has no
+ #pending use changed. Otherwise the ebuild will be pulled in again.
+ for pkg in config:
+ if not pkg.installed:
+ continue
+
+ for (atom, root), pkgs \
+ in self.slot_collision_info.items():
+ if pkg not in pkgs:
+ continue
+ for other_pkg in pkgs:
+ if other_pkg == pkg:
+ continue
+ if pkg.iuse.all.symmetric_difference(other_pkg.iuse.all) \
+ or _pkg_use_enabled(pkg).symmetric_difference(_pkg_use_enabled(other_pkg)):
+ if self.debug:
+ writemsg(str(pkg) + " has pending USE changes. Rejecting configuration.\n", noiselevel=-1)
+ return False
+
+ #A list of dicts. Keeps one dict per slot conflict. [ { flag1: "enabled" }, { flag2: "disabled" } ]
+ all_involved_flags = []
+
+ #Go through all slot conflicts
+ for id, pkg in enumerate(config):
+ involved_flags = {}
+ for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+ if ppkg in conflict_nodes and not ppkg in config:
+ #The parent is part of a slot conflict itself and is
+ #not part of the current config.
+ continue
+
+ i = InternalPackageSet(initial_atoms=(atom,))
+ if i.findAtomForPackage(pkg, modified_use=_pkg_use_enabled(pkg)):
+ continue
+
+ i = InternalPackageSet(initial_atoms=(atom.without_use,))
+ if not i.findAtomForPackage(pkg, modified_use=_pkg_use_enabled(pkg)):
+ #Version range does not match.
+ if self.debug:
+ writemsg(str(pkg) + " does not satify all version requirements." + \
+ " Rejecting configuration.\n", noiselevel=-1)
+ return False
+
+ if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required):
+ #Missing IUSE.
+ #FIXME: This needs to support use dep defaults.
+ if self.debug:
+ writemsg(str(pkg) + " misses needed flags from IUSE." + \
+ " Rejecting configuration.\n", noiselevel=-1)
+ return False
+
+ if not isinstance(ppkg, Package) or ppkg.installed:
+ #We cannot assume that it's possible to reinstall the package. Do not
+ #check if some of its atom has use.conditional
+ violated_atom = atom.violated_conditionals(_pkg_use_enabled(pkg), \
+ pkg.iuse.is_valid_flag)
+ else:
+ violated_atom = atom.unevaluated_atom.violated_conditionals(_pkg_use_enabled(pkg), \
+ pkg.iuse.is_valid_flag, parent_use=_pkg_use_enabled(ppkg))
+ if violated_atom.use is None:
+ # It's possible for autounmask to change
+ # parent_use such that the unevaluated form
+ # of the atom now matches, even though the
+ # earlier evaluated form (from before
+ # autounmask changed parent_use) does not.
+ # In this case (see bug #374423), it's
+ # expected that violated_atom.use is None.
+ continue
+
+ if pkg.installed and (violated_atom.use.enabled or violated_atom.use.disabled):
+ #We can't change USE of an installed package (only of an ebuild, but that is already
+ #part of the conflict, isn't it?
+ if self.debug:
+ writemsg(str(pkg) + ": installed package would need USE changes." + \
+ " Rejecting configuration.\n", noiselevel=-1)
+ return False
+
+ #Compute the required USE changes. A flag can be forced to "enabled" or "disabled",
+ #it can be in the conditional state "cond" that allows both values or in the
+ #"contradiction" state, which means that some atoms insist on differnt values
+ #for this flag and those kill this configuration.
+ for flag in violated_atom.use.required:
+ state = involved_flags.get(flag, "")
+
+ if flag in violated_atom.use.enabled:
+ if state in ("", "cond", "enabled"):
+ state = "enabled"
+ else:
+ state = "contradiction"
+ elif flag in violated_atom.use.disabled:
+ if state in ("", "cond", "disabled"):
+ state = "disabled"
+ else:
+ state = "contradiction"
+ else:
+ if state == "":
+ state = "cond"
+
+ involved_flags[flag] = state
+
+ if pkg.installed:
+ #We don't change the installed pkg's USE. Force all involved flags
+ #to the same value as the installed package has it.
+ for flag in involved_flags:
+ if involved_flags[flag] == "enabled":
+ if not flag in _pkg_use_enabled(pkg):
+ involved_flags[flag] = "contradiction"
+ elif involved_flags[flag] == "disabled":
+ if flag in _pkg_use_enabled(pkg):
+ involved_flags[flag] = "contradiction"
+ elif involved_flags[flag] == "cond":
+ if flag in _pkg_use_enabled(pkg):
+ involved_flags[flag] = "enabled"
+ else:
+ involved_flags[flag] = "disabled"
+
+ for flag, state in involved_flags.items():
+ if state == "contradiction":
+ if self.debug:
+ writemsg("Contradicting requirements found for flag " + \
+ flag + ". Rejecting configuration.\n", noiselevel=-1)
+ return False
+
+ all_involved_flags.append(involved_flags)
+
+ if self.debug:
+ writemsg("All involved flags:\n", noiselevel=-1)
+ for id, involved_flags in enumerate(all_involved_flags):
+ writemsg(" " + str(config[id]) + "\n", noiselevel=-1)
+ for flag, state in involved_flags.items():
+ writemsg(" " + flag + ": " + state + "\n", noiselevel=-1)
+
+ solutions = []
+ sol_gen = _solution_candidate_generator(all_involved_flags)
+ while(True):
+ candidate = sol_gen.get_candidate()
+ if not candidate:
+ break
+ solution = self._check_solution(config, candidate, all_conflict_atoms_by_slotatom)
+ if solution:
+ solutions.append(solution)
+
+ if self.debug:
+ if not solutions:
+ writemsg("No viable solutions. Rejecting configuration.\n", noiselevel=-1)
+ return solutions
+
+
+ def _force_flag_for_package(self, required_changes, pkg, flag, state):
+ """
+ Adds an USE change to required_changes. Sets the target state to
+ "contradiction" if a flag is forced to conflicting values.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+
+ if state == "disabled":
+ changes = required_changes.get(pkg, {})
+ flag_change = changes.get(flag, "")
+ if flag_change == "enabled":
+ flag_change = "contradiction"
+ elif flag in _pkg_use_enabled(pkg):
+ flag_change = "disabled"
+
+ changes[flag] = flag_change
+ required_changes[pkg] = changes
+ elif state == "enabled":
+ changes = required_changes.get(pkg, {})
+ flag_change = changes.get(flag, "")
+ if flag_change == "disabled":
+ flag_change = "contradiction"
+ else:
+ flag_change = "enabled"
+
+ changes[flag] = flag_change
+ required_changes[pkg] = changes
+
+ def _check_solution(self, config, all_involved_flags, all_conflict_atoms_by_slotatom):
+ """
+ Given a configuartion and all involved flags, all possible settings for the involved
+ flags are checked if they solve the slot conflict.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+
+ if self.debug:
+ #The code is a bit verbose, because the states might not
+ #be a string, but a _value_helper.
+ msg = "Solution candidate: "
+ msg += "["
+ first = True
+ for involved_flags in all_involved_flags:
+ if first:
+ first = False
+ else:
+ msg += ", "
+ msg += "{"
+ inner_first = True
+ for flag, state in involved_flags.items():
+ if inner_first:
+ inner_first = False
+ else:
+ msg += ", "
+ msg += flag + ": " + str(state)
+ msg += "}"
+ msg += "]\n"
+ writemsg(msg, noiselevel=-1)
+
+ required_changes = {}
+ for id, pkg in enumerate(config):
+ if not pkg.installed:
+ #We can't change the USE of installed packages.
+ for flag in all_involved_flags[id]:
+ if not pkg.iuse.is_valid_flag(flag):
+ continue
+ state = all_involved_flags[id][flag]
+ self._force_flag_for_package(required_changes, pkg, flag, state)
+
+ #Go through all (parent, atom) pairs for the current slot conflict.
+ for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+ use = atom.unevaluated_atom.use
+ if not use:
+ #No need to force something for an atom without USE conditionals.
+ #These atoms are already satisfied.
+ continue
+ for flag in all_involved_flags[id]:
+ state = all_involved_flags[id][flag]
+
+ if flag not in use.required or not use.conditional:
+ continue
+ if flag in use.conditional.enabled:
+ #[flag?]
+ if state == "enabled":
+ #no need to change anything, the atom won't
+ #force -flag on pkg
+ pass
+ elif state == "disabled":
+ #if flag is enabled we get [flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif flag in use.conditional.disabled:
+ #[!flag?]
+ if state == "enabled":
+ #if flag is enabled we get [-flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif state == "disabled":
+ #no need to change anything, the atom won't
+ #force +flag on pkg
+ pass
+ elif flag in use.conditional.equal:
+ #[flag=]
+ if state == "enabled":
+ #if flag is disabled we get [-flag] -> it must be enabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "enabled")
+ elif state == "disabled":
+ #if flag is enabled we get [flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif flag in use.conditional.not_equal:
+ #[!flag=]
+ if state == "enabled":
+ #if flag is enabled we get [-flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif state == "disabled":
+ #if flag is disabled we get [flag] -> it must be enabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "enabled")
+
+ is_valid_solution = True
+ for pkg in required_changes:
+ for state in required_changes[pkg].values():
+ if not state in ("enabled", "disabled"):
+ is_valid_solution = False
+
+ if not is_valid_solution:
+ return None
+
+ #Check if all atoms are satisfied after the changes are applied.
+ for id, pkg in enumerate(config):
+ new_use = _pkg_use_enabled(pkg)
+ if pkg in required_changes:
+ old_use = pkg.use.enabled
+ new_use = set(new_use)
+ for flag, state in required_changes[pkg].items():
+ if state == "enabled":
+ new_use.add(flag)
+ elif state == "disabled":
+ new_use.discard(flag)
+ if not new_use.symmetric_difference(old_use):
+ #avoid copying the package in findAtomForPackage if possible
+ new_use = old_use
+
+ for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+ if not hasattr(ppkg, "use"):
+ #It's a SetArg or something like that.
+ continue
+ ppkg_new_use = set(_pkg_use_enabled(ppkg))
+ if ppkg in required_changes:
+ for flag, state in required_changes[ppkg].items():
+ if state == "enabled":
+ ppkg_new_use.add(flag)
+ elif state == "disabled":
+ ppkg_new_use.discard(flag)
+
+ new_atom = atom.unevaluated_atom.evaluate_conditionals(ppkg_new_use)
+ i = InternalPackageSet(initial_atoms=(new_atom,))
+ if not i.findAtomForPackage(pkg, new_use):
+ #We managed to create a new problem with our changes.
+ is_valid_solution = False
+ if self.debug:
+ writemsg("new conflict introduced: " + str(pkg) + \
+ " does not match " + new_atom + " from " + str(ppkg) + "\n", noiselevel=-1)
+ break
+
+ if not is_valid_solution:
+ break
+
+ #Make sure the changes don't violate REQUIRED_USE
+ for pkg in required_changes:
+ required_use = pkg.metadata["REQUIRED_USE"]
+ if not required_use:
+ continue
+
+ use = set(_pkg_use_enabled(pkg))
+ for flag, state in required_changes[pkg].items():
+ if state == "enabled":
+ use.add(flag)
+ else:
+ use.discard(flag)
+
+ if not check_required_use(required_use, use, pkg.iuse.is_valid_flag):
+ is_valid_solution = False
+ break
+
+ if is_valid_solution and required_changes:
+ return required_changes
+ else:
+ return None
+
+class _configuration_generator(object):
+ def __init__(self, conflict_pkgs):
+ #reorder packages such that installed packages come last
+ self.conflict_pkgs = []
+ for pkgs in conflict_pkgs:
+ new_pkgs = []
+ for pkg in pkgs:
+ if not pkg.installed:
+ new_pkgs.append(pkg)
+ for pkg in pkgs:
+ if pkg.installed:
+ new_pkgs.append(pkg)
+ self.conflict_pkgs.append(new_pkgs)
+
+ self.solution_ids = []
+ for pkgs in self.conflict_pkgs:
+ self.solution_ids.append(0)
+ self._is_first_solution = True
+
+ def get_configuration(self):
+ if self._is_first_solution:
+ self._is_first_solution = False
+ else:
+ if not self._next():
+ return None
+
+ solution = []
+ for id, pkgs in enumerate(self.conflict_pkgs):
+ solution.append(pkgs[self.solution_ids[id]])
+ return solution
+
+ def _next(self, id=None):
+ solution_ids = self.solution_ids
+ conflict_pkgs = self.conflict_pkgs
+
+ if id is None:
+ id = len(solution_ids)-1
+
+ if solution_ids[id] == len(conflict_pkgs[id])-1:
+ if id > 0:
+ return self._next(id=id-1)
+ else:
+ return False
+ else:
+ solution_ids[id] += 1
+ for other_id in range(id+1, len(solution_ids)):
+ solution_ids[other_id] = 0
+ return True
+
+class _solution_candidate_generator(object):
+ class _value_helper(object):
+ def __init__(self, value=None):
+ self.value = value
+ def __eq__(self, other):
+ if isinstance(other, basestring):
+ return self.value == other
+ else:
+ return self.value == other.value
+ def __str__(self):
+ return str(self.value)
+
+ def __init__(self, all_involved_flags):
+ #A copy of all_involved_flags with all "cond" values
+ #replaced by a _value_helper object.
+ self.all_involved_flags = []
+
+ #A list tracking references to all used _value_helper
+ #objects.
+ self.conditional_values = []
+
+ for involved_flags in all_involved_flags:
+ new_involved_flags = {}
+ for flag, state in involved_flags.items():
+ if state in ("enabled", "disabled"):
+ new_involved_flags[flag] = state
+ else:
+ v = self._value_helper("disabled")
+ new_involved_flags[flag] = v
+ self.conditional_values.append(v)
+ self.all_involved_flags.append(new_involved_flags)
+
+ self._is_first_solution = True
+
+ def get_candidate(self):
+ if self._is_first_solution:
+ self._is_first_solution = False
+ else:
+ if not self._next():
+ return None
+
+ return self.all_involved_flags
+
+ def _next(self, id=None):
+ values = self.conditional_values
+
+ if not values:
+ return False
+
+ if id is None:
+ id = len(values)-1
+
+ if values[id].value == "enabled":
+ if id > 0:
+ return self._next(id=id-1)
+ else:
+ return False
+ else:
+ values[id].value = "enabled"
+ for other_id in range(id+1, len(values)):
+ values[other_id].value = "disabled"
+ return True
+
+
diff --git a/portage_with_autodep/pym/_emerge/search.py b/portage_with_autodep/pym/_emerge/search.py
new file mode 100644
index 0000000..35f0412
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/search.py
@@ -0,0 +1,385 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import re
+import portage
+from portage import os
+from portage.output import bold, bold as white, darkgreen, green, red
+from portage.util import writemsg_stdout
+
+from _emerge.Package import Package
+
+class search(object):
+
+ #
+ # class constants
+ #
+ VERSION_SHORT=1
+ VERSION_RELEASE=2
+
+ #
+ # public interface
+ #
+ def __init__(self, root_config, spinner, searchdesc,
+ verbose, usepkg, usepkgonly):
+ """Searches the available and installed packages for the supplied search key.
+ The list of available and installed packages is created at object instantiation.
+ This makes successive searches faster."""
+ self.settings = root_config.settings
+ self.vartree = root_config.trees["vartree"]
+ self.spinner = spinner
+ self.verbose = verbose
+ self.searchdesc = searchdesc
+ self.root_config = root_config
+ self.setconfig = root_config.setconfig
+ self.matches = {"pkg" : []}
+ self.mlen = 0
+
+ self._dbs = []
+
+ portdb = root_config.trees["porttree"].dbapi
+ bindb = root_config.trees["bintree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+
+ if not usepkgonly and portdb._have_root_eclass_dir:
+ self._dbs.append(portdb)
+
+ if (usepkg or usepkgonly) and bindb.cp_all():
+ self._dbs.append(bindb)
+
+ self._dbs.append(vardb)
+ self._portdb = portdb
+
+ def _spinner_update(self):
+ if self.spinner:
+ self.spinner.update()
+
+ def _cp_all(self):
+ cp_all = set()
+ for db in self._dbs:
+ cp_all.update(db.cp_all())
+ return list(sorted(cp_all))
+
+ def _aux_get(self, *args, **kwargs):
+ for db in self._dbs:
+ try:
+ return db.aux_get(*args, **kwargs)
+ except KeyError:
+ pass
+ raise
+
+ def _findname(self, *args, **kwargs):
+ for db in self._dbs:
+ if db is not self._portdb:
+ # We don't want findname to return anything
+ # unless it's an ebuild in a portage tree.
+ # Otherwise, it's already built and we don't
+ # care about it.
+ continue
+ func = getattr(db, "findname", None)
+ if func:
+ value = func(*args, **kwargs)
+ if value:
+ return value
+ return None
+
+ def _getFetchMap(self, *args, **kwargs):
+ for db in self._dbs:
+ func = getattr(db, "getFetchMap", None)
+ if func:
+ value = func(*args, **kwargs)
+ if value:
+ return value
+ return {}
+
+ def _visible(self, db, cpv, metadata):
+ installed = db is self.vartree.dbapi
+ built = installed or db is not self._portdb
+ pkg_type = "ebuild"
+ if installed:
+ pkg_type = "installed"
+ elif built:
+ pkg_type = "binary"
+ return Package(type_name=pkg_type,
+ root_config=self.root_config,
+ cpv=cpv, built=built, installed=installed,
+ metadata=metadata).visible
+
+ def _xmatch(self, level, atom):
+ """
+ This method does not expand old-style virtuals because it
+ is restricted to returning matches for a single ${CATEGORY}/${PN}
+ and old-style virual matches unreliable for that when querying
+ multiple package databases. If necessary, old-style virtuals
+ can be performed on atoms prior to calling this method.
+ """
+ cp = portage.dep_getkey(atom)
+ if level == "match-all":
+ matches = set()
+ for db in self._dbs:
+ if hasattr(db, "xmatch"):
+ matches.update(db.xmatch(level, atom))
+ else:
+ matches.update(db.match(atom))
+ result = list(x for x in matches if portage.cpv_getkey(x) == cp)
+ db._cpv_sort_ascending(result)
+ elif level == "match-visible":
+ matches = set()
+ for db in self._dbs:
+ if hasattr(db, "xmatch"):
+ matches.update(db.xmatch(level, atom))
+ else:
+ db_keys = list(db._aux_cache_keys)
+ for cpv in db.match(atom):
+ metadata = zip(db_keys,
+ db.aux_get(cpv, db_keys))
+ if not self._visible(db, cpv, metadata):
+ continue
+ matches.add(cpv)
+ result = list(x for x in matches if portage.cpv_getkey(x) == cp)
+ db._cpv_sort_ascending(result)
+ elif level == "bestmatch-visible":
+ result = None
+ for db in self._dbs:
+ if hasattr(db, "xmatch"):
+ cpv = db.xmatch("bestmatch-visible", atom)
+ if not cpv or portage.cpv_getkey(cpv) != cp:
+ continue
+ if not result or cpv == portage.best([cpv, result]):
+ result = cpv
+ else:
+ db_keys = Package.metadata_keys
+ # break out of this loop with highest visible
+ # match, checked in descending order
+ for cpv in reversed(db.match(atom)):
+ if portage.cpv_getkey(cpv) != cp:
+ continue
+ metadata = zip(db_keys,
+ db.aux_get(cpv, db_keys))
+ if not self._visible(db, cpv, metadata):
+ continue
+ if not result or cpv == portage.best([cpv, result]):
+ result = cpv
+ break
+ else:
+ raise NotImplementedError(level)
+ return result
+
+ def execute(self,searchkey):
+ """Performs the search for the supplied search key"""
+ match_category = 0
+ self.searchkey=searchkey
+ self.packagematches = []
+ if self.searchdesc:
+ self.searchdesc=1
+ self.matches = {"pkg":[], "desc":[], "set":[]}
+ else:
+ self.searchdesc=0
+ self.matches = {"pkg":[], "set":[]}
+ print("Searching... ", end=' ')
+
+ regexsearch = False
+ if self.searchkey.startswith('%'):
+ regexsearch = True
+ self.searchkey = self.searchkey[1:]
+ if self.searchkey.startswith('@'):
+ match_category = 1
+ self.searchkey = self.searchkey[1:]
+ if regexsearch:
+ self.searchre=re.compile(self.searchkey,re.I)
+ else:
+ self.searchre=re.compile(re.escape(self.searchkey), re.I)
+
+ for package in self._cp_all():
+ self._spinner_update()
+
+ if match_category:
+ match_string = package[:]
+ else:
+ match_string = package.split("/")[-1]
+
+ masked=0
+ if self.searchre.search(match_string):
+ if not self._xmatch("match-visible", package):
+ masked=1
+ self.matches["pkg"].append([package,masked])
+ elif self.searchdesc: # DESCRIPTION searching
+ full_package = self._xmatch("bestmatch-visible", package)
+ if not full_package:
+ #no match found; we don't want to query description
+ full_package = portage.best(
+ self._xmatch("match-all", package))
+ if not full_package:
+ continue
+ else:
+ masked=1
+ try:
+ full_desc = self._aux_get(
+ full_package, ["DESCRIPTION"])[0]
+ except KeyError:
+ print("emerge: search: aux_get() failed, skipping")
+ continue
+ if self.searchre.search(full_desc):
+ self.matches["desc"].append([full_package,masked])
+
+ self.sdict = self.setconfig.getSets()
+ for setname in self.sdict:
+ self._spinner_update()
+ if match_category:
+ match_string = setname
+ else:
+ match_string = setname.split("/")[-1]
+
+ if self.searchre.search(match_string):
+ self.matches["set"].append([setname, False])
+ elif self.searchdesc:
+ if self.searchre.search(
+ self.sdict[setname].getMetadata("DESCRIPTION")):
+ self.matches["set"].append([setname, False])
+
+ self.mlen=0
+ for mtype in self.matches:
+ self.matches[mtype].sort()
+ self.mlen += len(self.matches[mtype])
+
+ def addCP(self, cp):
+ if not self._xmatch("match-all", cp):
+ return
+ masked = 0
+ if not self._xmatch("bestmatch-visible", cp):
+ masked = 1
+ self.matches["pkg"].append([cp, masked])
+ self.mlen += 1
+
+ def output(self):
+ """Outputs the results of the search."""
+ msg = []
+ msg.append("\b\b \n[ Results for search key : " + \
+ bold(self.searchkey) + " ]\n")
+ msg.append("[ Applications found : " + \
+ bold(str(self.mlen)) + " ]\n\n")
+ vardb = self.vartree.dbapi
+ for mtype in self.matches:
+ for match,masked in self.matches[mtype]:
+ full_package = None
+ if mtype == "pkg":
+ catpack = match
+ full_package = self._xmatch(
+ "bestmatch-visible", match)
+ if not full_package:
+ #no match found; we don't want to query description
+ masked=1
+ full_package = portage.best(
+ self._xmatch("match-all",match))
+ elif mtype == "desc":
+ full_package = match
+ match = portage.cpv_getkey(match)
+ elif mtype == "set":
+ msg.append(green("*") + " " + bold(match) + "\n")
+ if self.verbose:
+ msg.append(" " + darkgreen("Description:") + \
+ " " + \
+ self.sdict[match].getMetadata("DESCRIPTION") \
+ + "\n\n")
+ if full_package:
+ try:
+ desc, homepage, license = self._aux_get(
+ full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
+ except KeyError:
+ msg.append("emerge: search: aux_get() failed, skipping\n")
+ continue
+ if masked:
+ msg.append(green("*") + " " + \
+ white(match) + " " + red("[ Masked ]") + "\n")
+ else:
+ msg.append(green("*") + " " + bold(match) + "\n")
+ myversion = self.getVersion(full_package, search.VERSION_RELEASE)
+
+ mysum = [0,0]
+ file_size_str = None
+ mycat = match.split("/")[0]
+ mypkg = match.split("/")[1]
+ mycpv = match + "-" + myversion
+ myebuild = self._findname(mycpv)
+ if myebuild:
+ pkgdir = os.path.dirname(myebuild)
+ from portage import manifest
+ mf = manifest.Manifest(
+ pkgdir, self.settings["DISTDIR"])
+ try:
+ uri_map = self._getFetchMap(mycpv)
+ except portage.exception.InvalidDependString as e:
+ file_size_str = "Unknown (%s)" % (e,)
+ del e
+ else:
+ try:
+ mysum[0] = mf.getDistfilesSize(uri_map)
+ except KeyError as e:
+ file_size_str = "Unknown (missing " + \
+ "digest for %s)" % (e,)
+ del e
+
+ available = False
+ for db in self._dbs:
+ if db is not vardb and \
+ db.cpv_exists(mycpv):
+ available = True
+ if not myebuild and hasattr(db, "bintree"):
+ myebuild = db.bintree.getname(mycpv)
+ try:
+ mysum[0] = os.stat(myebuild).st_size
+ except OSError:
+ myebuild = None
+ break
+
+ if myebuild and file_size_str is None:
+ mystr = str(mysum[0] // 1024)
+ mycount = len(mystr)
+ while (mycount > 3):
+ mycount -= 3
+ mystr = mystr[:mycount] + "," + mystr[mycount:]
+ file_size_str = mystr + " kB"
+
+ if self.verbose:
+ if available:
+ msg.append(" %s %s\n" % \
+ (darkgreen("Latest version available:"),
+ myversion))
+ msg.append(" %s\n" % \
+ self.getInstallationStatus(mycat+'/'+mypkg))
+ if myebuild:
+ msg.append(" %s %s\n" % \
+ (darkgreen("Size of files:"), file_size_str))
+ msg.append(" " + darkgreen("Homepage:") + \
+ " " + homepage + "\n")
+ msg.append(" " + darkgreen("Description:") \
+ + " " + desc + "\n")
+ msg.append(" " + darkgreen("License:") + \
+ " " + license + "\n\n")
+ writemsg_stdout(''.join(msg), noiselevel=-1)
+ #
+ # private interface
+ #
+ def getInstallationStatus(self,package):
+ installed_package = self.vartree.dep_bestmatch(package)
+ result = ""
+ version = self.getVersion(installed_package,search.VERSION_RELEASE)
+ if len(version) > 0:
+ result = darkgreen("Latest version installed:")+" "+version
+ else:
+ result = darkgreen("Latest version installed:")+" [ Not Installed ]"
+ return result
+
+ def getVersion(self,full_package,detail):
+ if len(full_package) > 1:
+ package_parts = portage.catpkgsplit(full_package)
+ if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
+ result = package_parts[2]+ "-" + package_parts[3]
+ else:
+ result = package_parts[2]
+ else:
+ result = ""
+ return result
+
diff --git a/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.py b/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.py
new file mode 100644
index 0000000..a230b31
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.py
@@ -0,0 +1,35 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import textwrap
+import portage
+from portage import os
+from portage.util import writemsg_level
+
+def show_invalid_depstring_notice(parent_node, depstring, error_msg):
+
+ msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
+ "\n\n%s\n\n%s\n\n" % (error_msg, parent_node)
+ p_key = parent_node.cpv
+ p_status = parent_node.operation
+ msg = []
+ if p_status == "nomerge":
+ category, pf = portage.catsplit(p_key)
+ pkg_location = os.path.join(parent_node.root_config.settings['EROOT'], portage.VDB_PATH, category, pf)
+ msg.append("Portage is unable to process the dependencies of the ")
+ msg.append("'%s' package. " % p_key)
+ msg.append("In order to correct this problem, the package ")
+ msg.append("should be uninstalled, reinstalled, or upgraded. ")
+ msg.append("As a temporary workaround, the --nodeps option can ")
+ msg.append("be used to ignore all dependencies. For reference, ")
+ msg.append("the problematic dependencies can be found in the ")
+ msg.append("*DEPEND files located in '%s/'." % pkg_location)
+ else:
+ msg.append("This package can not be installed. ")
+ msg.append("Please notify the '%s' package maintainer " % p_key)
+ msg.append("about this problem.")
+
+ msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
+ writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
+
diff --git a/portage_with_autodep/pym/_emerge/stdout_spinner.py b/portage_with_autodep/pym/_emerge/stdout_spinner.py
new file mode 100644
index 0000000..5ad31f0
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/stdout_spinner.py
@@ -0,0 +1,83 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import platform
+import sys
+import time
+
+from portage.output import darkgreen, green
+
+class stdout_spinner(object):
+ scroll_msgs = [
+ "Gentoo Rocks ("+platform.system()+")",
+ "Thank you for using Gentoo. :)",
+ "Are you actually trying to read this?",
+ "How many times have you stared at this?",
+ "We are generating the cache right now",
+ "You are paying too much attention.",
+ "A theory is better than its explanation.",
+ "Phasers locked on target, Captain.",
+ "Thrashing is just virtual crashing.",
+ "To be is to program.",
+ "Real Users hate Real Programmers.",
+ "When all else fails, read the instructions.",
+ "Functionality breeds Contempt.",
+ "The future lies ahead.",
+ "3.1415926535897932384626433832795028841971694",
+ "Sometimes insanity is the only alternative.",
+ "Inaccuracy saves a world of explanation.",
+ ]
+
+ twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
+
+ def __init__(self):
+ self.spinpos = 0
+ self.update = self.update_twirl
+ self.scroll_sequence = self.scroll_msgs[
+ int(time.time() * 100) % len(self.scroll_msgs)]
+ self.last_update = 0
+ self.min_display_latency = 0.05
+
+ def _return_early(self):
+ """
+ Flushing ouput to the tty too frequently wastes cpu time. Therefore,
+ each update* method should return without doing any output when this
+ method returns True.
+ """
+ cur_time = time.time()
+ if cur_time - self.last_update < self.min_display_latency:
+ return True
+ self.last_update = cur_time
+ return False
+
+ def update_basic(self):
+ self.spinpos = (self.spinpos + 1) % 500
+ if self._return_early():
+ return
+ if (self.spinpos % 100) == 0:
+ if self.spinpos == 0:
+ sys.stdout.write(". ")
+ else:
+ sys.stdout.write(".")
+ sys.stdout.flush()
+
+ def update_scroll(self):
+ if self._return_early():
+ return
+ if(self.spinpos >= len(self.scroll_sequence)):
+ sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
+ len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
+ else:
+ sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
+ sys.stdout.flush()
+ self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
+
+ def update_twirl(self):
+ self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
+ if self._return_early():
+ return
+ sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
+ sys.stdout.flush()
+
+ def update_quiet(self):
+ return
diff --git a/portage_with_autodep/pym/_emerge/sync/__init__.py b/portage_with_autodep/pym/_emerge/sync/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/sync/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.py b/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.py
new file mode 100644
index 0000000..5e6009c
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.py
@@ -0,0 +1,29 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+def getaddrinfo_validate(addrinfos):
+ """
+ Validate structures returned from getaddrinfo(),
+ since they may be corrupt, especially when python
+ has IPv6 support disabled (bug #340899).
+ """
+ valid_addrinfos = []
+ for addrinfo in addrinfos:
+ try:
+ if len(addrinfo) != 5:
+ continue
+ if len(addrinfo[4]) < 2:
+ continue
+ if not isinstance(addrinfo[4][0], basestring):
+ continue
+ except TypeError:
+ continue
+
+ valid_addrinfos.append(addrinfo)
+
+ return valid_addrinfos
diff --git a/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.py b/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.py
new file mode 100644
index 0000000..9b35aed
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.py
@@ -0,0 +1,98 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import locale
+import logging
+import time
+
+from portage import os
+from portage.exception import PortageException
+from portage.localization import _
+from portage.output import EOutput
+from portage.util import grabfile, writemsg_level
+
+def have_english_locale():
+ lang, enc = locale.getdefaultlocale()
+ if lang is not None:
+ lang = lang.lower()
+ lang = lang.split('_', 1)[0]
+ return lang is None or lang in ('c', 'en')
+
+def whenago(seconds):
+ sec = int(seconds)
+ mins = 0
+ days = 0
+ hrs = 0
+ years = 0
+ out = []
+
+ if sec > 60:
+ mins = sec / 60
+ sec = sec % 60
+ if mins > 60:
+ hrs = mins / 60
+ mins = mins % 60
+ if hrs > 24:
+ days = hrs / 24
+ hrs = hrs % 24
+ if days > 365:
+ years = days / 365
+ days = days % 365
+
+ if years:
+ out.append("%dy " % years)
+ if days:
+ out.append("%dd " % days)
+ if hrs:
+ out.append("%dh " % hrs)
+ if mins:
+ out.append("%dm " % mins)
+ if sec:
+ out.append("%ds " % sec)
+
+ return "".join(out).strip()
+
+def old_tree_timestamp_warn(portdir, settings):
+ unixtime = time.time()
+ default_warnsync = 30
+
+ timestamp_file = os.path.join(portdir, "metadata/timestamp.x")
+ try:
+ lastsync = grabfile(timestamp_file)
+ except PortageException:
+ return False
+
+ if not lastsync:
+ return False
+
+ lastsync = lastsync[0].split()
+ if not lastsync:
+ return False
+
+ try:
+ lastsync = int(lastsync[0])
+ except ValueError:
+ return False
+
+ var_name = 'PORTAGE_SYNC_STALE'
+ try:
+ warnsync = float(settings.get(var_name, default_warnsync))
+ except ValueError:
+ writemsg_level("!!! %s contains non-numeric value: %s\n" % \
+ (var_name, settings[var_name]),
+ level=logging.ERROR, noiselevel=-1)
+ return False
+
+ if warnsync <= 0:
+ return False
+
+ if (unixtime - 86400 * warnsync) > lastsync:
+ out = EOutput()
+ if have_english_locale():
+ out.ewarn("Last emerge --sync was %s ago." % \
+ whenago(unixtime - lastsync))
+ else:
+ out.ewarn(_("Last emerge --sync was %s.") % \
+ time.strftime('%c', time.localtime(lastsync)))
+ return True
+ return False
diff --git a/portage_with_autodep/pym/_emerge/unmerge.py b/portage_with_autodep/pym/_emerge/unmerge.py
new file mode 100644
index 0000000..3db3a8b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/unmerge.py
@@ -0,0 +1,578 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import sys
+import textwrap
+import portage
+from portage import os
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage.output import bold, colorize, darkgreen, green
+from portage._sets import SETPREFIX
+from portage._sets.base import EditablePackageSet
+from portage.util import cmp_sort_key
+
+from _emerge.emergelog import emergelog
+from _emerge.Package import Package
+from _emerge.UninstallFailure import UninstallFailure
+from _emerge.userquery import userquery
+from _emerge.countdown import countdown
+
+def _unmerge_display(root_config, myopts, unmerge_action,
+ unmerge_files, clean_delay=1, ordered=0,
+ writemsg_level=portage.util.writemsg_level):
+ """
+ Returns a tuple of (returncode, pkgmap) where returncode is
+ os.EX_OK if no errors occur, and 1 otherwise.
+ """
+
+ quiet = "--quiet" in myopts
+ settings = root_config.settings
+ sets = root_config.sets
+ vartree = root_config.trees["vartree"]
+ candidate_catpkgs=[]
+ global_unmerge=0
+ out = portage.output.EOutput()
+ pkg_cache = {}
+ db_keys = list(vartree.dbapi._aux_cache_keys)
+
+ def _pkg(cpv):
+ pkg = pkg_cache.get(cpv)
+ if pkg is None:
+ pkg = Package(built=True, cpv=cpv, installed=True,
+ metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
+ operation="uninstall", root_config=root_config,
+ type_name="installed")
+ pkg_cache[cpv] = pkg
+ return pkg
+
+ vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH)
+ try:
+ # At least the parent needs to exist for the lock file.
+ portage.util.ensure_dirs(vdb_path)
+ except portage.exception.PortageException:
+ pass
+ vdb_lock = None
+ try:
+ if os.access(vdb_path, os.W_OK):
+ vartree.dbapi.lock()
+ vdb_lock = True
+
+ realsyslist = []
+ sys_virt_map = {}
+ for x in sets["system"].getAtoms():
+ for atom in expand_new_virt(vartree.dbapi, x):
+ if not atom.blocker:
+ realsyslist.append(atom)
+ if atom.cp != x.cp:
+ sys_virt_map[atom.cp] = x.cp
+
+ syslist = []
+ for x in realsyslist:
+ mycp = x.cp
+ # Since Gentoo stopped using old-style virtuals in
+ # 2011, typically it's possible to avoid getvirtuals()
+ # calls entirely. It will not be triggered here by
+ # new-style virtuals since those are expanded to
+ # non-virtual atoms above by expand_new_virt().
+ if mycp.startswith("virtual/") and \
+ mycp in settings.getvirtuals():
+ providers = []
+ for provider in settings.getvirtuals()[mycp]:
+ if vartree.dbapi.match(provider):
+ providers.append(provider)
+ if len(providers) == 1:
+ syslist.extend(providers)
+ else:
+ syslist.append(mycp)
+ syslist = frozenset(syslist)
+
+ if not unmerge_files:
+ if unmerge_action == "unmerge":
+ print()
+ print(bold("emerge unmerge") + " can only be used with specific package names")
+ print()
+ return 1, {}
+ else:
+ global_unmerge = 1
+
+ localtree = vartree
+ # process all arguments and add all
+ # valid db entries to candidate_catpkgs
+ if global_unmerge:
+ if not unmerge_files:
+ candidate_catpkgs.extend(vartree.dbapi.cp_all())
+ else:
+ #we've got command-line arguments
+ if not unmerge_files:
+ print("\nNo packages to unmerge have been provided.\n")
+ return 1, {}
+ for x in unmerge_files:
+ arg_parts = x.split('/')
+ if x[0] not in [".","/"] and \
+ arg_parts[-1][-7:] != ".ebuild":
+ #possible cat/pkg or dep; treat as such
+ candidate_catpkgs.append(x)
+ elif unmerge_action in ["prune","clean"]:
+ print("\n!!! Prune and clean do not accept individual" + \
+ " ebuilds as arguments;\n skipping.\n")
+ continue
+ else:
+ # it appears that the user is specifying an installed
+ # ebuild and we're in "unmerge" mode, so it's ok.
+ if not os.path.exists(x):
+ print("\n!!! The path '"+x+"' doesn't exist.\n")
+ return 1, {}
+
+ absx = os.path.abspath(x)
+ sp_absx = absx.split("/")
+ if sp_absx[-1][-7:] == ".ebuild":
+ del sp_absx[-1]
+ absx = "/".join(sp_absx)
+
+ sp_absx_len = len(sp_absx)
+
+ vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH)
+
+ sp_vdb = vdb_path.split("/")
+ sp_vdb_len = len(sp_vdb)
+
+ if not os.path.exists(absx+"/CONTENTS"):
+ print("!!! Not a valid db dir: "+str(absx))
+ return 1, {}
+
+ if sp_absx_len <= sp_vdb_len:
+ # The Path is shorter... so it can't be inside the vdb.
+ print(sp_absx)
+ print(absx)
+ print("\n!!!",x,"cannot be inside "+ \
+ vdb_path+"; aborting.\n")
+ return 1, {}
+
+ for idx in range(0,sp_vdb_len):
+ if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
+ print(sp_absx)
+ print(absx)
+ print("\n!!!", x, "is not inside "+\
+ vdb_path+"; aborting.\n")
+ return 1, {}
+
+ print("="+"/".join(sp_absx[sp_vdb_len:]))
+ candidate_catpkgs.append(
+ "="+"/".join(sp_absx[sp_vdb_len:]))
+
+ newline=""
+ if (not "--quiet" in myopts):
+ newline="\n"
+ if settings["ROOT"] != "/":
+ writemsg_level(darkgreen(newline+ \
+ ">>> Using system located in ROOT tree %s\n" % \
+ settings["ROOT"]))
+
+ if (("--pretend" in myopts) or ("--ask" in myopts)) and \
+ not ("--quiet" in myopts):
+ writemsg_level(darkgreen(newline+\
+ ">>> These are the packages that would be unmerged:\n"))
+
+ # Preservation of order is required for --depclean and --prune so
+ # that dependencies are respected. Use all_selected to eliminate
+ # duplicate packages since the same package may be selected by
+ # multiple atoms.
+ pkgmap = []
+ all_selected = set()
+ for x in candidate_catpkgs:
+ # cycle through all our candidate deps and determine
+ # what will and will not get unmerged
+ try:
+ mymatch = vartree.dbapi.match(x)
+ except portage.exception.AmbiguousPackageName as errpkgs:
+ print("\n\n!!! The short ebuild name \"" + \
+ x + "\" is ambiguous. Please specify")
+ print("!!! one of the following fully-qualified " + \
+ "ebuild names instead:\n")
+ for i in errpkgs[0]:
+ print(" " + green(i))
+ print()
+ sys.exit(1)
+
+ if not mymatch and x[0] not in "<>=~":
+ mymatch = localtree.dep_match(x)
+ if not mymatch:
+ portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
+ (x.replace("null/", ""), unmerge_action), noiselevel=-1)
+ continue
+
+ pkgmap.append(
+ {"protected": set(), "selected": set(), "omitted": set()})
+ mykey = len(pkgmap) - 1
+ if unmerge_action=="unmerge":
+ for y in mymatch:
+ if y not in all_selected:
+ pkgmap[mykey]["selected"].add(y)
+ all_selected.add(y)
+ elif unmerge_action == "prune":
+ if len(mymatch) == 1:
+ continue
+ best_version = mymatch[0]
+ best_slot = vartree.getslot(best_version)
+ best_counter = vartree.dbapi.cpv_counter(best_version)
+ for mypkg in mymatch[1:]:
+ myslot = vartree.getslot(mypkg)
+ mycounter = vartree.dbapi.cpv_counter(mypkg)
+ if (myslot == best_slot and mycounter > best_counter) or \
+ mypkg == portage.best([mypkg, best_version]):
+ if myslot == best_slot:
+ if mycounter < best_counter:
+ # On slot collision, keep the one with the
+ # highest counter since it is the most
+ # recently installed.
+ continue
+ best_version = mypkg
+ best_slot = myslot
+ best_counter = mycounter
+ pkgmap[mykey]["protected"].add(best_version)
+ pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
+ if mypkg != best_version and mypkg not in all_selected)
+ all_selected.update(pkgmap[mykey]["selected"])
+ else:
+ # unmerge_action == "clean"
+ slotmap={}
+ for mypkg in mymatch:
+ if unmerge_action == "clean":
+ myslot = localtree.getslot(mypkg)
+ else:
+ # since we're pruning, we don't care about slots
+ # and put all the pkgs in together
+ myslot = 0
+ if myslot not in slotmap:
+ slotmap[myslot] = {}
+ slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
+
+ for mypkg in vartree.dbapi.cp_list(
+ portage.cpv_getkey(mymatch[0])):
+ myslot = vartree.getslot(mypkg)
+ if myslot not in slotmap:
+ slotmap[myslot] = {}
+ slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
+
+ for myslot in slotmap:
+ counterkeys = list(slotmap[myslot])
+ if not counterkeys:
+ continue
+ counterkeys.sort()
+ pkgmap[mykey]["protected"].add(
+ slotmap[myslot][counterkeys[-1]])
+ del counterkeys[-1]
+
+ for counter in counterkeys[:]:
+ mypkg = slotmap[myslot][counter]
+ if mypkg not in mymatch:
+ counterkeys.remove(counter)
+ pkgmap[mykey]["protected"].add(
+ slotmap[myslot][counter])
+
+ #be pretty and get them in order of merge:
+ for ckey in counterkeys:
+ mypkg = slotmap[myslot][ckey]
+ if mypkg not in all_selected:
+ pkgmap[mykey]["selected"].add(mypkg)
+ all_selected.add(mypkg)
+ # ok, now the last-merged package
+ # is protected, and the rest are selected
+ numselected = len(all_selected)
+ if global_unmerge and not numselected:
+ portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
+ return 1, {}
+
+ if not numselected:
+ portage.writemsg_stdout(
+ "\n>>> No packages selected for removal by " + \
+ unmerge_action + "\n")
+ return 1, {}
+ finally:
+ if vdb_lock:
+ vartree.dbapi.flush_cache()
+ vartree.dbapi.unlock()
+
+ # generate a list of package sets that are directly or indirectly listed in "selected",
+ # as there is no persistent list of "installed" sets
+ installed_sets = ["selected"]
+ stop = False
+ pos = 0
+ while not stop:
+ stop = True
+ pos = len(installed_sets)
+ for s in installed_sets[pos - 1:]:
+ if s not in sets:
+ continue
+ candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
+ if candidates:
+ stop = False
+ installed_sets += candidates
+ installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
+ del stop, pos
+
+ # we don't want to unmerge packages that are still listed in user-editable package sets
+ # listed in "world" as they would be remerged on the next update of "world" or the
+ # relevant package sets.
+ unknown_sets = set()
+ for cp in range(len(pkgmap)):
+ for cpv in pkgmap[cp]["selected"].copy():
+ try:
+ pkg = _pkg(cpv)
+ except KeyError:
+ # It could have been uninstalled
+ # by a concurrent process.
+ continue
+
+ if unmerge_action != "clean" and root_config.root == "/":
+ skip_pkg = False
+ if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
+ msg = ("Not unmerging package %s since there is no valid reason "
+ "for Portage to unmerge itself.") % (pkg.cpv,)
+ skip_pkg = True
+ elif vartree.dbapi._dblink(cpv).isowner(portage._python_interpreter):
+ msg = ("Not unmerging package %s since there is no valid reason "
+ "for Portage to unmerge currently used Python interpreter.") % (pkg.cpv,)
+ skip_pkg = True
+ if skip_pkg:
+ for line in textwrap.wrap(msg, 75):
+ out.eerror(line)
+ # adjust pkgmap so the display output is correct
+ pkgmap[cp]["selected"].remove(cpv)
+ all_selected.remove(cpv)
+ pkgmap[cp]["protected"].add(cpv)
+ continue
+
+ parents = []
+ for s in installed_sets:
+ # skip sets that the user requested to unmerge, and skip world
+ # user-selected set, since the package will be removed from
+ # that set later on.
+ if s in root_config.setconfig.active or s == "selected":
+ continue
+
+ if s not in sets:
+ if s in unknown_sets:
+ continue
+ unknown_sets.add(s)
+ out = portage.output.EOutput()
+ out.eerror(("Unknown set '@%s' in %s%s") % \
+ (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE))
+ continue
+
+ # only check instances of EditablePackageSet as other classes are generally used for
+ # special purposes and can be ignored here (and are usually generated dynamically, so the
+ # user can't do much about them anyway)
+ if isinstance(sets[s], EditablePackageSet):
+
+ # This is derived from a snippet of code in the
+ # depgraph._iter_atoms_for_pkg() method.
+ for atom in sets[s].iterAtomsForPackage(pkg):
+ inst_matches = vartree.dbapi.match(atom)
+ inst_matches.reverse() # descending order
+ higher_slot = None
+ for inst_cpv in inst_matches:
+ try:
+ inst_pkg = _pkg(inst_cpv)
+ except KeyError:
+ # It could have been uninstalled
+ # by a concurrent process.
+ continue
+
+ if inst_pkg.cp != atom.cp:
+ continue
+ if pkg >= inst_pkg:
+ # This is descending order, and we're not
+ # interested in any versions <= pkg given.
+ break
+ if pkg.slot_atom != inst_pkg.slot_atom:
+ higher_slot = inst_pkg
+ break
+ if higher_slot is None:
+ parents.append(s)
+ break
+ if parents:
+ print(colorize("WARN", "Package %s is going to be unmerged," % cpv))
+ print(colorize("WARN", "but still listed in the following package sets:"))
+ print(" %s\n" % ", ".join(parents))
+
+ del installed_sets
+
+ numselected = len(all_selected)
+ if not numselected:
+ writemsg_level(
+ "\n>>> No packages selected for removal by " + \
+ unmerge_action + "\n")
+ return 1, {}
+
+ # Unmerge order only matters in some cases
+ if not ordered:
+ unordered = {}
+ for d in pkgmap:
+ selected = d["selected"]
+ if not selected:
+ continue
+ cp = portage.cpv_getkey(next(iter(selected)))
+ cp_dict = unordered.get(cp)
+ if cp_dict is None:
+ cp_dict = {}
+ unordered[cp] = cp_dict
+ for k in d:
+ cp_dict[k] = set()
+ for k, v in d.items():
+ cp_dict[k].update(v)
+ pkgmap = [unordered[cp] for cp in sorted(unordered)]
+
+ for x in range(len(pkgmap)):
+ selected = pkgmap[x]["selected"]
+ if not selected:
+ continue
+ for mytype, mylist in pkgmap[x].items():
+ if mytype == "selected":
+ continue
+ mylist.difference_update(all_selected)
+ cp = portage.cpv_getkey(next(iter(selected)))
+ for y in localtree.dep_match(cp):
+ if y not in pkgmap[x]["omitted"] and \
+ y not in pkgmap[x]["selected"] and \
+ y not in pkgmap[x]["protected"] and \
+ y not in all_selected:
+ pkgmap[x]["omitted"].add(y)
+ if global_unmerge and not pkgmap[x]["selected"]:
+ #avoid cluttering the preview printout with stuff that isn't getting unmerged
+ continue
+ if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
+ virt_cp = sys_virt_map.get(cp)
+ if virt_cp is None:
+ cp_info = "'%s'" % (cp,)
+ else:
+ cp_info = "'%s' (%s)" % (cp, virt_cp)
+ writemsg_level(colorize("BAD","\n\n!!! " + \
+ "%s is part of your system profile.\n" % (cp_info,)),
+ level=logging.WARNING, noiselevel=-1)
+ writemsg_level(colorize("WARN","!!! Unmerging it may " + \
+ "be damaging to your system.\n\n"),
+ level=logging.WARNING, noiselevel=-1)
+ if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
+ countdown(int(settings["EMERGE_WARNING_DELAY"]),
+ colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
+ if not quiet:
+ writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
+ else:
+ writemsg_level(bold(cp) + ": ", noiselevel=-1)
+ for mytype in ["selected","protected","omitted"]:
+ if not quiet:
+ writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
+ if pkgmap[x][mytype]:
+ sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
+ sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
+ for pn, ver, rev in sorted_pkgs:
+ if rev == "r0":
+ myversion = ver
+ else:
+ myversion = ver + "-" + rev
+ if mytype == "selected":
+ writemsg_level(
+ colorize("UNMERGE_WARN", myversion + " "),
+ noiselevel=-1)
+ else:
+ writemsg_level(
+ colorize("GOOD", myversion + " "), noiselevel=-1)
+ else:
+ writemsg_level("none ", noiselevel=-1)
+ if not quiet:
+ writemsg_level("\n", noiselevel=-1)
+ if quiet:
+ writemsg_level("\n", noiselevel=-1)
+
+ writemsg_level("\nAll selected packages: %s\n" % " ".join(all_selected), noiselevel=-1)
+
+ writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
+ " packages are slated for removal.\n")
+ writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
+ " and " + colorize("GOOD", "'omitted'") + \
+ " packages will not be removed.\n\n")
+
+ return os.EX_OK, pkgmap
+
+def unmerge(root_config, myopts, unmerge_action,
+ unmerge_files, ldpath_mtimes, autoclean=0,
+ clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
+ scheduler=None, writemsg_level=portage.util.writemsg_level):
+ """
+ Returns 1 if successful, otherwise 0.
+ """
+
+ if clean_world:
+ clean_world = myopts.get('--deselect') != 'n'
+
+ rval, pkgmap = _unmerge_display(root_config, myopts,
+ unmerge_action, unmerge_files,
+ clean_delay=clean_delay, ordered=ordered,
+ writemsg_level=writemsg_level)
+
+ if rval != os.EX_OK:
+ return 0
+
+ enter_invalid = '--ask-enter-invalid' in myopts
+ vartree = root_config.trees["vartree"]
+ sets = root_config.sets
+ settings = root_config.settings
+ mysettings = portage.config(clone=settings)
+ xterm_titles = "notitles" not in settings.features
+
+ if "--pretend" in myopts:
+ #we're done... return
+ return 0
+ if "--ask" in myopts:
+ if userquery("Would you like to unmerge these packages?",
+ enter_invalid) == "No":
+ # enter pretend mode for correct formatting of results
+ myopts["--pretend"] = True
+ print()
+ print("Quitting.")
+ print()
+ return 0
+ #the real unmerging begins, after a short delay....
+ if clean_delay and not autoclean:
+ countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
+
+ for x in range(len(pkgmap)):
+ for y in pkgmap[x]["selected"]:
+ writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
+ emergelog(xterm_titles, "=== Unmerging... ("+y+")")
+ mysplit = y.split("/")
+ #unmerge...
+ retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
+ mysettings, unmerge_action not in ["clean","prune"],
+ vartree=vartree, ldpath_mtimes=ldpath_mtimes,
+ scheduler=scheduler)
+
+ if retval != os.EX_OK:
+ emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
+ if raise_on_error:
+ raise UninstallFailure(retval)
+ sys.exit(retval)
+ else:
+ if clean_world and hasattr(sets["selected"], "cleanPackage")\
+ and hasattr(sets["selected"], "lock"):
+ sets["selected"].lock()
+ if hasattr(sets["selected"], "load"):
+ sets["selected"].load()
+ sets["selected"].cleanPackage(vartree.dbapi, y)
+ sets["selected"].unlock()
+ emergelog(xterm_titles, " >>> unmerge success: "+y)
+
+ if clean_world and hasattr(sets["selected"], "remove")\
+ and hasattr(sets["selected"], "lock"):
+ sets["selected"].lock()
+ # load is called inside remove()
+ for s in root_config.setconfig.active:
+ sets["selected"].remove(SETPREFIX + s)
+ sets["selected"].unlock()
+
+ return 1
+
diff --git a/portage_with_autodep/pym/_emerge/userquery.py b/portage_with_autodep/pym/_emerge/userquery.py
new file mode 100644
index 0000000..e7ed400
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/userquery.py
@@ -0,0 +1,55 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+
+from portage.output import bold, create_color_func
+
+def userquery(prompt, enter_invalid, responses=None, colours=None):
+ """Displays a prompt and a set of responses, then waits for a response
+ which is checked against the responses and the first to match is
+ returned. An empty response will match the first value in responses,
+ unless enter_invalid is True. The input buffer is *not* cleared prior
+ to the prompt!
+
+ prompt: a String.
+ responses: a List of Strings.
+ colours: a List of Functions taking and returning a String, used to
+ process the responses for display. Typically these will be functions
+ like red() but could be e.g. lambda x: "DisplayString".
+ If responses is omitted, defaults to ["Yes", "No"], [green, red].
+ If only colours is omitted, defaults to [bold, ...].
+
+ Returns a member of the List responses. (If called without optional
+ arguments, returns "Yes" or "No".)
+ KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
+ printed."""
+ if responses is None:
+ responses = ["Yes", "No"]
+ colours = [
+ create_color_func("PROMPT_CHOICE_DEFAULT"),
+ create_color_func("PROMPT_CHOICE_OTHER")
+ ]
+ elif colours is None:
+ colours=[bold]
+ colours=(colours*len(responses))[:len(responses)]
+ print(bold(prompt), end=' ')
+ try:
+ while True:
+ if sys.hexversion >= 0x3000000:
+ response=input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
+ else:
+ response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
+ if response or not enter_invalid:
+ for key in responses:
+ # An empty response will match the
+ # first value in responses.
+ if response.upper()==key[:len(response)].upper():
+ return key
+ print("Sorry, response '%s' not understood." % response, end=' ')
+ except (EOFError, KeyboardInterrupt):
+ print("Interrupted.")
+ sys.exit(1)
+