aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarkus Ullmann <jokey@gentoo.org>2007-09-30 15:47:56 +0200
committerMarkus Ullmann <jokey@gentoo.org>2007-09-30 15:47:56 +0200
commit1c31dadf01898b21afb0404a3ce0594411a27405 (patch)
tree236c6fa233edd0de268cad12c8703192f29217fd
parentMinor requirements fix (diff)
downloadpackages-3-1c31dadf01898b21afb0404a3ce0594411a27405.tar.gz
packages-3-1c31dadf01898b21afb0404a3ce0594411a27405.tar.bz2
packages-3-1c31dadf01898b21afb0404a3ce0594411a27405.zip
Add pkgcore and snakeoil snapshots so parts can be used
-rw-r--r--README11
-rw-r--r--pkgcore/__init__.py3
-rwxr-xr-xpkgcore/bin/ebuild-env/ebuild-daemon.lib62
-rwxr-xr-xpkgcore/bin/ebuild-env/ebuild-daemon.sh266
-rwxr-xr-xpkgcore/bin/ebuild-env/ebuild-default-functions.sh903
-rwxr-xr-xpkgcore/bin/ebuild-env/ebuild-functions.sh339
-rwxr-xr-xpkgcore/bin/ebuild-env/ebuild.sh771
-rwxr-xr-xpkgcore/bin/ebuild-env/filter-env9
-rw-r--r--pkgcore/bin/ebuild-env/isolated-functions.sh187
-rwxr-xr-xpkgcore/bin/ebuild-env/portageq_emulation178
-rwxr-xr-xpkgcore/bin/ebuild-helpers/dobin27
-rwxr-xr-xpkgcore/bin/ebuild-helpers/doconfd14
-rwxr-xr-xpkgcore/bin/ebuild-helpers/dodir7
-rwxr-xr-xpkgcore/bin/ebuild-helpers/dodoc26
-rwxr-xr-xpkgcore/bin/ebuild-helpers/doenvd14
-rwxr-xr-xpkgcore/bin/ebuild-helpers/doexe33
-rwxr-xr-xpkgcore/bin/ebuild-helpers/dohard13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/dohtml172
-rwxr-xr-xpkgcore/bin/ebuild-helpers/doinfo21
-rwxr-xr-xpkgcore/bin/ebuild-helpers/doinitd14
-rwxr-xr-xpkgcore/bin/ebuild-helpers/doins59
-rwxr-xr-xpkgcore/bin/ebuild-helpers/dolib41
-rwxr-xr-xpkgcore/bin/ebuild-helpers/dolib.a7
-rwxr-xr-xpkgcore/bin/ebuild-helpers/dolib.so7
-rwxr-xr-xpkgcore/bin/ebuild-helpers/doman58
-rwxr-xr-xpkgcore/bin/ebuild-helpers/domo26
l---------pkgcore/bin/ebuild-helpers/donewins1
-rwxr-xr-xpkgcore/bin/ebuild-helpers/dosbin27
-rwxr-xr-xpkgcore/bin/ebuild-helpers/dosed22
-rwxr-xr-xpkgcore/bin/ebuild-helpers/dosym14
-rwxr-xr-xpkgcore/bin/ebuild-helpers/emake14
-rwxr-xr-xpkgcore/bin/ebuild-helpers/fowners7
-rwxr-xr-xpkgcore/bin/ebuild-helpers/fperms7
-rwxr-xr-xpkgcore/bin/ebuild-helpers/newbin13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/newconfd13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/newdoc13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/newenvd13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/newexe13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/newinitd13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/newins13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/newlib.a13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/newlib.so13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/newman13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/newsbin13
-rwxr-xr-xpkgcore/bin/ebuild-helpers/prepall86
-rwxr-xr-xpkgcore/bin/ebuild-helpers/prepalldocs57
-rwxr-xr-xpkgcore/bin/ebuild-helpers/prepallinfo8
-rwxr-xr-xpkgcore/bin/ebuild-helpers/prepallman9
-rwxr-xr-xpkgcore/bin/ebuild-helpers/prepallstrip10
-rwxr-xr-xpkgcore/bin/ebuild-helpers/prepinfo58
-rwxr-xr-xpkgcore/bin/ebuild-helpers/preplib25
-rwxr-xr-xpkgcore/bin/ebuild-helpers/preplib.so10
-rwxr-xr-xpkgcore/bin/ebuild-helpers/prepman61
-rwxr-xr-xpkgcore/bin/ebuild-helpers/prepstrip48
-rw-r--r--pkgcore/binpkg/__init__.py3
-rw-r--r--pkgcore/binpkg/repo_ops.py99
-rw-r--r--pkgcore/binpkg/repository.py298
-rw-r--r--pkgcore/binpkg/xpak.py263
-rw-r--r--pkgcore/cache/__init__.py6
-rw-r--r--pkgcore/cache/anydbm.py79
-rw-r--r--pkgcore/cache/cdb.py109
-rw-r--r--pkgcore/cache/errors.py42
-rw-r--r--pkgcore/cache/flat_hash.py136
-rw-r--r--pkgcore/cache/fs_template.py80
-rw-r--r--pkgcore/cache/metadata.py201
-rw-r--r--pkgcore/cache/sql_template.py327
-rw-r--r--pkgcore/cache/sqlite.py80
-rw-r--r--pkgcore/cache/template.py236
-rw-r--r--pkgcore/cache/util.py118
-rw-r--r--pkgcore/chksum/__init__.py116
-rw-r--r--pkgcore/chksum/defaults.py288
-rw-r--r--pkgcore/chksum/errors.py22
-rw-r--r--pkgcore/chksum/gpg.py39
-rw-r--r--pkgcore/config/__init__.py80
-rw-r--r--pkgcore/config/basics.py536
-rw-r--r--pkgcore/config/central.py490
-rw-r--r--pkgcore/config/cparser.py29
-rw-r--r--pkgcore/config/dhcpformat.py169
-rw-r--r--pkgcore/config/domain.py30
-rw-r--r--pkgcore/config/errors.py104
-rw-r--r--pkgcore/config/mke2fsformat.py77
-rw-r--r--pkgcore/const.py65
-rw-r--r--pkgcore/ebuild/__init__.py6
-rwxr-xr-xpkgcore/ebuild/_atom.sobin0 -> 28265 bytes
-rwxr-xr-xpkgcore/ebuild/_cpv.sobin0 -> 22898 bytes
-rwxr-xr-xpkgcore/ebuild/_depset.sobin0 -> 17213 bytes
-rwxr-xr-xpkgcore/ebuild/_filter_env.sobin0 -> 18337 bytes
-rw-r--r--pkgcore/ebuild/atom.py504
-rw-r--r--pkgcore/ebuild/atom_restricts.py116
-rw-r--r--pkgcore/ebuild/conditionals.py342
-rw-r--r--pkgcore/ebuild/const.py22
-rw-r--r--pkgcore/ebuild/cpv.py311
-rw-r--r--pkgcore/ebuild/digest.py228
-rw-r--r--pkgcore/ebuild/domain.py436
-rw-r--r--pkgcore/ebuild/ebd.py666
-rw-r--r--pkgcore/ebuild/ebuild_built.py224
-rw-r--r--pkgcore/ebuild/ebuild_src.py367
-rw-r--r--pkgcore/ebuild/eclass_cache.py135
-rw-r--r--pkgcore/ebuild/errors.py53
-rw-r--r--pkgcore/ebuild/filter_env.py418
-rw-r--r--pkgcore/ebuild/formatter.py486
-rw-r--r--pkgcore/ebuild/misc.py131
-rw-r--r--pkgcore/ebuild/overlay_repository.py89
-rw-r--r--pkgcore/ebuild/portage_conf.py491
-rw-r--r--pkgcore/ebuild/processor.py703
-rw-r--r--pkgcore/ebuild/profiles.py562
-rw-r--r--pkgcore/ebuild/repo_objs.py165
-rw-r--r--pkgcore/ebuild/repository.py316
-rw-r--r--pkgcore/ebuild/resolver.py136
-rw-r--r--pkgcore/ebuild/triggers.py458
-rw-r--r--pkgcore/fetch/__init__.py126
-rw-r--r--pkgcore/fetch/base.py80
-rw-r--r--pkgcore/fetch/custom.py158
-rw-r--r--pkgcore/fetch/errors.py35
-rw-r--r--pkgcore/fs/__init__.py7
-rw-r--r--pkgcore/fs/contents.py279
-rw-r--r--pkgcore/fs/fs.py285
-rw-r--r--pkgcore/fs/livefs.py154
-rw-r--r--pkgcore/fs/ops.py323
-rw-r--r--pkgcore/fs/tar.py127
-rw-r--r--pkgcore/interfaces/__init__.py6
-rw-r--r--pkgcore/interfaces/data_source.py86
-rw-r--r--pkgcore/interfaces/format.py195
-rw-r--r--pkgcore/interfaces/observer.py106
-rw-r--r--pkgcore/interfaces/repo.py331
-rw-r--r--pkgcore/log.py21
-rw-r--r--pkgcore/merge/__init__.py6
-rw-r--r--pkgcore/merge/const.py6
-rw-r--r--pkgcore/merge/engine.py316
-rw-r--r--pkgcore/merge/errors.py42
-rw-r--r--pkgcore/merge/todo.txt40
-rw-r--r--pkgcore/merge/triggers.py539
-rw-r--r--pkgcore/os_data.py74
-rw-r--r--pkgcore/package/__init__.py10
-rw-r--r--pkgcore/package/base.py67
-rw-r--r--pkgcore/package/conditionals.py249
-rw-r--r--pkgcore/package/errors.py18
-rw-r--r--pkgcore/package/metadata.py126
-rw-r--r--pkgcore/package/mutated.py43
-rw-r--r--pkgcore/package/virtual.py49
-rw-r--r--pkgcore/pkgsets/__init__.py6
-rw-r--r--pkgcore/pkgsets/filelist.py78
-rw-r--r--pkgcore/pkgsets/glsa.py253
-rw-r--r--pkgcore/pkgsets/installed.py38
-rw-r--r--pkgcore/pkgsets/system.py13
-rw-r--r--pkgcore/plugin.py224
-rw-r--r--pkgcore/plugins/__init__.py42
-rw-r--r--pkgcore/plugins/pkgcore_configurables.py50
-rw-r--r--pkgcore/plugins/pkgcore_ebuild_built.py8
-rw-r--r--pkgcore/plugins/pkgcore_ebuild_src.py8
-rw-r--r--pkgcore/plugins/pkgcore_formatters.py26
-rw-r--r--pkgcore/plugins/pkgcore_fsops_default.py12
-rw-r--r--pkgcore/plugins/pkgcore_syncers.py16
-rw-r--r--pkgcore/plugins/pkgcore_triggers.py18
-rw-r--r--pkgcore/repository/__init__.py6
-rw-r--r--pkgcore/repository/configured.py62
-rw-r--r--pkgcore/repository/errors.py17
-rw-r--r--pkgcore/repository/misc.py90
-rw-r--r--pkgcore/repository/multiplex.py120
-rw-r--r--pkgcore/repository/prototype.py521
-rw-r--r--pkgcore/repository/syncable.py20
-rw-r--r--pkgcore/repository/util.py41
-rw-r--r--pkgcore/repository/virtual.py51
-rw-r--r--pkgcore/repository/visibility.py64
-rw-r--r--pkgcore/repository/wrapper.py36
-rw-r--r--pkgcore/resolver/__init__.py0
-rw-r--r--pkgcore/resolver/choice_point.py155
-rw-r--r--pkgcore/resolver/pigeonholes.py82
-rw-r--r--pkgcore/resolver/plan.py877
-rw-r--r--pkgcore/resolver/state.py205
-rw-r--r--pkgcore/resolver/util.py42
-rw-r--r--pkgcore/restrictions/__init__.py4
-rwxr-xr-xpkgcore/restrictions/_restrictions.sobin0 -> 15492 bytes
-rw-r--r--pkgcore/restrictions/boolean.py490
-rw-r--r--pkgcore/restrictions/delegated.py53
-rw-r--r--pkgcore/restrictions/packages.py245
-rw-r--r--pkgcore/restrictions/restriction.py200
-rw-r--r--pkgcore/restrictions/util.py37
-rw-r--r--pkgcore/restrictions/values.py685
-rw-r--r--pkgcore/scripts/__init__.py16
-rw-r--r--pkgcore/scripts/filter_env.py95
-rw-r--r--pkgcore/scripts/pclone_cache.py73
-rw-r--r--pkgcore/scripts/pconfig.py317
-rw-r--r--pkgcore/scripts/pebuild.py57
-rw-r--r--pkgcore/scripts/pmaint.py393
-rw-r--r--pkgcore/scripts/pmerge.py561
-rw-r--r--pkgcore/scripts/pplugincache.py40
-rw-r--r--pkgcore/scripts/pquery.py882
-rw-r--r--pkgcore/spawn.py532
-rw-r--r--pkgcore/sync/__init__.py3
-rw-r--r--pkgcore/sync/base.py175
-rw-r--r--pkgcore/sync/bzr.py28
-rw-r--r--pkgcore/sync/cvs.py57
-rw-r--r--pkgcore/sync/darcs.py28
-rw-r--r--pkgcore/sync/git.py35
-rw-r--r--pkgcore/sync/hg.py28
-rw-r--r--pkgcore/sync/rsync.py188
-rw-r--r--pkgcore/sync/svn.py40
-rw-r--r--pkgcore/util/__init__.py4
-rw-r--r--pkgcore/util/bzip2.py69
-rw-r--r--pkgcore/util/commandline.py425
-rw-r--r--pkgcore/util/packages.py15
-rw-r--r--pkgcore/util/parserestrict.py153
-rw-r--r--pkgcore/util/repo_utils.py35
-rw-r--r--pkgcore/vdb/__init__.py30
-rw-r--r--pkgcore/vdb/contents.py180
-rw-r--r--pkgcore/vdb/ondisk.py201
-rw-r--r--pkgcore/vdb/repo_ops.py157
-rw-r--r--pkgcore/vdb/virtuals.py182
-rw-r--r--pkgcore/version.py57
-rw-r--r--snakeoil/__init__.py4
-rw-r--r--snakeoil/caching.py86
-rw-r--r--snakeoil/compatibility.py32
-rw-r--r--snakeoil/containers.py207
-rw-r--r--snakeoil/currying.py129
-rwxr-xr-xsnakeoil/debug_imports.py99
-rw-r--r--snakeoil/demandload.py226
-rw-r--r--snakeoil/dependant_methods.py86
-rw-r--r--snakeoil/descriptors.py28
-rw-r--r--snakeoil/fileutils.py284
-rw-r--r--snakeoil/fix_copy.py74
-rw-r--r--snakeoil/formatters.py495
-rw-r--r--snakeoil/iterables.py202
-rw-r--r--snakeoil/klass.py95
-rw-r--r--snakeoil/lists.py171
-rw-r--r--snakeoil/mappings.py579
-rw-r--r--snakeoil/modules.py53
-rw-r--r--snakeoil/obj.py206
-rw-r--r--snakeoil/osutils/__init__.py340
-rw-r--r--snakeoil/osutils/native_readdir.py60
-rw-r--r--snakeoil/pickling.py18
-rw-r--r--snakeoil/tar.py35
-rw-r--r--snakeoil/version.py39
-rw-r--r--snakeoil/weakrefs.py12
-rw-r--r--snakeoil/xml/__init__.py46
-rw-r--r--snakeoil/xml/bundled_elementtree.py1254
236 files changed, 34073 insertions, 4 deletions
diff --git a/README b/README
index eb8c1e2..7a69d0b 100644
--- a/README
+++ b/README
@@ -2,8 +2,11 @@ DIRS
====
dbgenerator: stuff to generate a pgo.db file
-template: the initial template contributed by cla
-web: web-frontend to the pgo.db file
+template: the initial template contributed by cla
+web: web-frontend to the pgo.db file
+pkgcore: pkgcore version in case you don't have one set up
+ and use PYTHONPATH=.
+snakeoil: same as pkgcore, both are taken from post-0.3.1 HEAD
REQUIREMENTS
============
@@ -12,8 +15,8 @@ dbgenerator:
------------
- up-to-date portage tree
(non-unionfs'ed as it might give weird mtimes)
-- pkgcore 0.3.1 or later
- Python 2.5 with sqlite or Python 2.4 with pysqlite2
+- optional: pkgcore and snakeoil (both shipped)
generation time:
at the time of writing, on my box (3ghz p4, udma100 hdd, partly cluttered fs)
@@ -45,5 +48,5 @@ pick up the resulting pgo.db file
this should give you something like
"HTTP Serving HTTP on http://0.0.0.0:8080/"
-6) now point a webbrowser at http://localhost:080/
+6) now point a webbrowser at http://localhost:8080/
and you should see the packages.gentoo.org site
diff --git a/pkgcore/__init__.py b/pkgcore/__init__.py
new file mode 100644
index 0000000..a9adf28
--- /dev/null
+++ b/pkgcore/__init__.py
@@ -0,0 +1,3 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
diff --git a/pkgcore/bin/ebuild-env/ebuild-daemon.lib b/pkgcore/bin/ebuild-env/ebuild-daemon.lib
new file mode 100755
index 0000000..9bc1b91
--- /dev/null
+++ b/pkgcore/bin/ebuild-env/ebuild-daemon.lib
@@ -0,0 +1,62 @@
+#!/bin/bash
+# ebuild-daemon.lib; daemon lib code.
+# Copyright 2005-2006 Brian Harring <ferringb@gmail.com>
+
+alias die='diefunc "$FUNCNAME" "$LINENO" "$?"'
+#alias listen='read -u 3 -t 10'
+alias assert='_pipestatus="${PIPESTATUS[*]}"; [[ "${_pipestatus// /}" -eq 0 ]] || diefunc "$FUNCNAME" "$LINENO" "$_pipestatus"'
+
+
+# ask the python side to display sandbox complaints.
+request_sandbox_summary() {
+ local line
+ speak "request_sandbox_summary ${SANDBOX_LOG}"
+ listen line
+ while [ "$line" != "end_sandbox_summary" ]; do
+ echo "$line"
+ listen line
+ done
+}
+
+internal_inherit() {
+ local line
+ if [ "$#" != "1" ]; then
+ die "internal_inherit accepts one arg, requested eclass location. $* is a bit much"
+ fi
+ speak "request_inherit $1"
+ listen line
+ if [ "$line" == "path" ]; then
+ listen line;
+ source "${line}" || die "failed sources inherit: ${line}"
+ elif [ "$line" == "transfer" ]; then
+ listen line;
+ eval "$line" || die "failed evaluating eclass $x on an inherit transfer"
+ elif [ "$line" == "failed" ]; then
+ die "inherit for $x failed"
+ else
+ die "unknown inherit command from pythonic side, '$line' for eclass $x"
+ fi
+}
+
+source_profiles() {
+ local line
+ speak request_profiles
+ listen line
+ while [ "$line" != end_request ]; do
+ if [ "$line" == "path" ]; then
+ listen line;
+ source "${line}"
+ elif [ "$line" == "transfer" ]; then
+ listen line;
+ eval "$line" || die "failed evaluating profile bashrc: ${line}"
+ else
+ speak "failed"
+ die "unknown profile bashrc transfer mode from pythonic side, '$line'"
+ fi
+ speak "next"
+ listen line
+ done
+}
+DONT_EXPORT_FUNCS="${DONT_EXPORT_FUNCS} $(declare -F | cut -s -d ' ' -f 3)"
+
+:
diff --git a/pkgcore/bin/ebuild-env/ebuild-daemon.sh b/pkgcore/bin/ebuild-env/ebuild-daemon.sh
new file mode 100755
index 0000000..f2ae35f
--- /dev/null
+++ b/pkgcore/bin/ebuild-env/ebuild-daemon.sh
@@ -0,0 +1,266 @@
+#!/bin/bash
+# ebuild-daemon.sh; core ebuild processor handling code
+# Copyright 2004-2006 Brian Harring <ferringb@gmail.com>
+
+alias die='diefunc "$FUNCNAME" "$LINENO" "$?"'
+#alias listen='read -u 3 -t 10'
+alias assert='_pipestatus="${PIPESTATUS[*]}"; [[ "${_pipestatus// /}" -eq 0 ]] || diefunc "$FUNCNAME" "$LINENO" "$_pipestatus"'
+
+# use listen/speak for talking to the running portage instance instead of echo'ing to the fd yourself.
+# this allows us to move the open fd's w/out issues down the line.
+listen() {
+ if ! read -u ${EBD_READ_FD} $1; then
+ echo "coms error, read failed: backing out of daemon."
+ exit 1
+ fi
+}
+
+speak() {
+ echo "$*" >&${EBD_WRITE_FD}
+}
+declare -rf speak
+declare -r EBD_WRITE_FD EBD_READ_FD
+# ensure the other side is still there. Well, this moreso is for the python side to ensure
+# loading up the intermediate funcs succeeded.
+listen com
+if [ "$com" != "dude?" ]; then
+ echo "serv init coms failed, received $com when expecting 'dude?'"
+ exit 1
+fi
+speak "dude!"
+listen PKGCORE_BIN_PATH
+[ -z "$PKGCORE_BIN_PATH" ] && die "PKGCORE_BIN_PATH=$PKGCORE_BIN_PATH , bailing"
+declare -rx PKGCORE_BIN_PATH
+listen PKGCORE_PYTHON
+[ -z "$PKGCORE_PYTHON" ] && die "empty PKGCORE_PYTHON, bailing"
+declare -rx PKGCORE_PYTHON
+listen PKGCORE_PYTHONPATH
+[ -z "$PKGCORE_PYTHONPATH" ] && die "empty PKGCORE_PYTHONPATH, bailing"
+declare -rx PKGCORE_PYTHONPATH
+
+if ! source "${PKGCORE_BIN_PATH}/ebuild.sh" daemonize; then
+ speak "failed"
+ die "failed sourcing ${PKGCORE_BIN_PATH}/ebuild.sh"
+fi
+
+if [ -n "$SANDBOX_LOG" ]; then
+ listen com
+ if [ "$com" != "sandbox_log?" ]; then
+ echo "unknown com '$com'"
+ exit 1
+ fi
+ speak "$SANDBOX_LOG"
+ declare -rx SANDBOX_LOG="$SANDBOX_LOG" # #="/tmp/sandbox-${P}-${PORTAGE_SANDBOX_PID}.log"
+ addwrite $SANDBOX_LOG
+fi
+
+alive='1'
+re="$(readonly | cut -s -d '=' -f 1 | cut -s -d ' ' -f 3)"
+for x in $re; do
+ if ! hasq $x "$DONT_EXPORT_VARS"; then
+ DONT_EXPORT_VARS="${DONT_EXPORT_VARS} $x"
+ fi
+done
+speak $re
+unset x re
+
+
+if ! source "${PKGCORE_BIN_PATH}/ebuild-daemon.lib"; then
+ speak failed
+ die "failed source ${PKGCORE_BIN_PATH}/ebuild-daemon.lib"
+fi
+
+DONT_EXPORT_FUNCS="$(declare -F | cut -s -d ' ' -f 3)"
+DONT_EXPORT_VARS="${DONT_EXPORT_VARS} alive com PORTAGE_LOGFILE cont"
+
+# depend's speed up. turn on qa interceptors by default, instead of flipping them on for each depends
+# call.
+export QA_CONTROLLED_EXTERNALLY="yes"
+enable_qa_interceptors
+
+if ! source "${PKGCORE_BIN_PATH}/ebuild-functions.sh"; then
+ speak failed
+ die "failed sourcing ${PORTAGE_LIB}/ebuild-functions.sh"
+fi
+
+export PORTAGE_PRELOADED_ECLASSES=''
+unset_colors
+
+
+sigint_handler() {
+ EBD_DISABLE_DIEFUNC="asdf"
+ exec 2>/dev/null
+ exec 1>/dev/null
+ kill -2 $PPID
+ speak "killed"
+ # this relies on the python side to *not* discard the killed
+ #exit 2
+}
+trap sigint_handler SIGINT
+
+sigkill_handler() {
+ EBD_DISABLE_DIEFUNC="asdf"
+ exec 2>/dev/null
+ exec 1>/dev/null
+ kill -9 $$PID
+ speak "killed"
+ exit 9
+}
+
+trap sigkill_handler SIGKILL
+
+while [ "$alive" == "1" ]; do
+ com=''
+ listen com
+ case $com in
+ process_ebuild*)
+ # cleanse whitespace.
+ phases="$(echo ${com#process_ebuild})"
+ PORTAGE_SANDBOX_PID="$PPID"
+ # note the (; forks. prevents the initialized ebd env from being polluted by ebuild calls.
+ (
+ if [ "${phases/depend/}" == "$phases" ]; then
+ disable_qa_interceptors
+ fi
+ line=''
+ cont=0
+
+ while [ "$cont" == 0 ]; do
+ line=''
+ listen line
+ if [ "$line" == "start_receiving_env" ]; then
+ while listen line && [ "$line" != "end_receiving_env" ]; do #[ "$line" != "end_receiving_env" ]; do
+ save_IFS
+ IFS=$'\0'
+ eval ${line};
+ val=$?;
+ restore_IFS
+ if [ $val != "0" ]; then
+ echo "err, env receiving threw an error for '$line': $?" >&2
+ speak "env_receiving_failed"
+ cont=1
+ break
+ fi
+ if [ "${on:-unset}" != "unset" ]; then
+ echo "sudo = ${SUDO_COMMAND}" >&2
+ declare | grep -i sudo_command >&@
+ echo "disabling" >&2
+ unset on
+ fi
+ done
+ if [ "$cont" == "0" ]; then
+ speak "env_received"
+ fi
+ elif [ "${line:0:7}" == "logging" ]; then
+ PORTAGE_LOGFILE="$(echo ${line#logging})"
+ speak "logging_ack"
+ elif [ "${line:0:17}" == "set_sandbox_state" ]; then
+ if [ $((${line:18})) -eq 0 ]; then
+ export SANDBOX_DISABLED=1
+ else
+ export SANDBOX_DISABLED=0
+ export SANDBOX_VERBOSE="no"
+ fi
+ elif [ "${line}" == "start_processing" ]; then
+ cont=2
+ else
+ echo "received unknown com: $line" >&2
+ fi
+ done
+ if [ "$cont" != 2 ]; then
+ exit $cont
+ else
+ reset_sandbox
+ if [ -n "$SANDBOX_LOG" ]; then
+ addwrite $SANDBOX_LOG
+ if [ -n "$PORTAGE_LOGFILE" ]; then
+ addwrite "$PORTAGE_LOGFILE"
+ fi
+ fi
+ if [ -z $RC_NOCOLOR ]; then
+ set_colors
+ fi
+
+ DONT_EXPORT_FUNCS="${DONT_EXPORT_FUNCS} ${PORTAGE_PRELOADED_ECLASSES}"
+ for x in $DONT_EXPORT_FUNCS; do
+ declare -fr $x &> /dev/null
+ done
+ for e in $phases; do
+ umask 0022
+ if [ -z $PORTAGE_LOGFILE ]; then
+ execute_phases ${e}
+ ret=$?
+ else
+ # why do it this way rather then the old '[ -f ${T}/.succesfull }'?
+ # simple. this allows the actual exit code to be used, rather then just stating no .success == 1 || 0
+ # note this was
+ # execute_phases ${e] &> >(umask 0002; tee -i -a $PORTAGE_LOGFILE)
+ # less then bash v3 however hates it. And I hate less then v3.
+ # circle of hate you see.
+ execute_phases ${e} 2>&1 | {
+ # this applies to the subshell only.
+ umask 0002
+ tee -i -a $PORTAGE_LOGFILE
+ }
+ ret=${PIPESTATUS[0]}
+ fi
+ # if sandbox log exists, then there were complaints from it.
+ # tell python to display the errors, then dump relevant vars for debugging.
+ if [ -n "$SANDBOX_LOG" ] && [ -e "$SANDBOX_LOG" ]; then
+ ret=1
+ echo "sandbox exists- $SANDBOX_LOG"
+ request_sandbox_summary
+ echo "SANDBOX_ON:=${SANDBOX_ON:-unset}" >&2
+ echo "SANDBOX_DISABLED:=${SANDBOX_DISABLED:-unset}" >&2
+ echo "SANDBOX_READ:=${SANDBOX_READ:-unset}" >&2
+ echo "SANDBOX_WRITE:=${SANDBOX_WRITE:-unset}" >&2
+ echo "SANDBOX_PREDICT:=${SANDBOX_PREDICT:-unset}" >&2
+ echo "SANDBOX_DEBUG:=${SANDBOX_DEBUG:-unset}" >&2
+ echo "SANDBOX_DEBUG_LOG:=${SANDBOX_DEBUG_LOG:-unset}" >&2
+ echo "SANDBOX_LOG:=${SANDBOX_LOG:-unset}" >&2
+ echo "SANDBOX_ARMED:=${SANDBOX_ARMED:-unset}" >&2
+ fi
+ if [ "$ret" != "0" ]; then
+ exit $(($ret))
+ fi
+ done
+ fi
+ )
+ # post fork. tell python if it succeeded or not.
+ if [ $? != 0 ]; then
+ echo "phases failed"
+ speak "phases failed"
+ else
+ speak "phases succeeded"
+ fi
+ ;;
+ shutdown_daemon)
+ alive="0"
+ ;;
+ preload_eclass*)
+ echo "preloading eclasses into funcs." >&2
+ disable_qa_interceptors
+ success="succeeded"
+ com="${com#preload_eclass }"
+ for e in ${com}; do
+ x="${e##*/}"
+ x="${x%.eclass}"
+ echo "preloading eclass $x" >&2
+ if ! bash -n "$e"; then
+ echo "errors detected in '$e'" >&2
+ success='failed'
+ break
+ fi
+ y="$( < $e)"
+ eval "eclass_${x}_inherit() {
+ $y
+ }"
+ done
+ speak "preload_eclass ${success}"
+ unset e x y success
+ enable_qa_interceptors
+ export PORTAGE_PRELOADED_ECLASSES="$PORTAGE_PRELOADED_ECLASSES ${com}"
+ ;;
+ esac
+done
+exit 0
diff --git a/pkgcore/bin/ebuild-env/ebuild-default-functions.sh b/pkgcore/bin/ebuild-env/ebuild-default-functions.sh
new file mode 100755
index 0000000..fc299bc
--- /dev/null
+++ b/pkgcore/bin/ebuild-env/ebuild-default-functions.sh
@@ -0,0 +1,903 @@
+#!/bin/bash
+# ebuild-default-functions.sh; default functions for ebuild env that aren't saved- specific to the portage instance.
+# Copyright 2005-2006 Brian Harring <ferringb@gmail.com>
+# Copyright 2004-2006 Gentoo Foundation
+
+portageq() {
+ if [[ $EBUILD_PHASE == depend ]]; then
+ die "portageq calls in depends phase is disallowed"
+ fi
+ PYTHONPATH="$PKGCORE_PYTHONPATH" \
+ "${PKGCORE_PYTHON}" "${PKGCORE_BIN_PATH}/portageq_emulation" \
+ --domain "${PKGCORE_DOMAIN}" "$@"
+}
+
+has_version()
+{
+ portageq 'has_version' "${ROOT}" "$1"
+}
+
+best_version()
+{
+ portageq 'best_version' "${ROOT}" "$1"
+}
+
+check_KV()
+{
+ if [ -z "${KV}" ]; then
+ eerror ""
+ eerror "Could not determine your kernel version."
+ eerror "Make sure that you have /usr/src/linux symlink."
+ eerror "And that said kernel has been configured."
+ eerror "You can also simply run the following command"
+ eerror "in the kernel referenced by /usr/src/linux:"
+ eerror " make include/linux/version.h"
+ eerror ""
+ die
+ fi
+}
+
+# adds ".keep" files so that dirs aren't auto-cleaned
+keepdir()
+{
+ dodir "$@"
+ local x
+ if [ "$1" == "-R" ] || [ "$1" == "-r" ]; then
+ shift
+ find "$@" -type d -printf "${D}/%p/.keep\n" | tr "\n" "\0" | $XARGS -0 -n100 touch || die "Failed to recursive create .keep files"
+ else
+ for x in "$@"; do
+ touch "${D}/${x}/.keep" || die "Failed to create .keep in ${D}/${x}"
+ done
+ fi
+}
+
+# sandbox support functions
+addread()
+{
+ export SANDBOX_READ="$SANDBOX_READ:$1"
+}
+
+addwrite()
+{
+ export SANDBOX_WRITE="$SANDBOX_WRITE:$1"
+}
+
+adddeny()
+{
+ export SANDBOX_DENY="$SANDBOX_DENY:$1"
+}
+
+addpredict()
+{
+ export SANDBOX_PREDICT="$SANDBOX_PREDICT:$1"
+}
+
+unpack()
+{
+ local x y myfail srcdir taropts tar_subdir
+ taropts='--no-same-owner'
+
+ [ -z "$*" ] && die "Nothing passed to the 'unpack' command"
+
+ for x in "$@"; do
+ echo ">>> Unpacking ${x} to ${PWD}"
+ myfail="failure unpacking ${x}"
+ y="${x%.*}"
+ y="${y##*.}"
+ if [ "${x:0:2}" == "./" ]; then
+ srcdir=''
+ else
+ srcdir="${DISTDIR}"
+ fi
+
+ [ ! -s "${srcdir}${x}" ] && die "$myfail: empty file"
+ [ "${x/${DISTDIR}}" != "${x}" ] && \
+ die "Arguments to unpack() should not begin with \${DISTDIR}."
+
+ case "${x}" in
+ *.tar)
+ tar xf "${srcdir}${x}" ${taropts} || die "$myfail"
+ ;;
+ *.tar.gz|*.tgz|*.tar.Z)
+ tar xzf "${srcdir}${x}" ${taropts} || die "$myfail"
+ ;;
+ *.tar.bz2|*.tbz2|*.tbz)
+ bzip2 -dc "${srcdir}${x}" | tar xf - ${taropts}
+ assert "$myfail"
+ ;;
+ *.ZIP|*.zip|*.jar)
+ unzip -qo "${srcdir}${x}" || die "$myfail"
+ ;;
+ *.gz|*.Z|*.z)
+ gzip -dc "${srcdir}${x}" > ${x%.*} || die "$myfail"
+ ;;
+ *.bz2|*.bz)
+ bzip2 -dc "${srcdir}${x}" > ${x%.*} || die "$myfail"
+ ;;
+ *.7Z|*.7z)
+ local my_output
+ my_output="$(7z x -y "${srcdir}/${x}")"
+ if [ $? -ne 0 ]; then
+ echo "${my_output}" >&2
+ die "$myfail"
+ fi
+ ;;
+ *.RAR|*.rar)
+ unrar x -idq -o+ "${srcdir}/${x}" || die "$myfail"
+ ;;
+ *.LHa|*.LHA|*.lha|*.lzh)
+ lha xfq "${srcdir}/${x}" || die "$myfail"
+ ;;
+ *.a|*.deb)
+ ar x "${srcdir}/${x}" || die "$myfail"
+ ;;
+ *)
+ echo "unpack ${x}: file format not recognized. Ignoring."
+ ;;
+ esac
+ done
+ find . -mindepth 1 -maxdepth 1 ! -type l -print0 | \
+ ${XARGS} -0 chmod -fR a+rX,u+w,g-w,o-w
+
+}
+
+dyn_setup()
+{
+ MUST_EXPORT_ENV="yes"
+ pkg_setup
+}
+
+dyn_unpack()
+{
+ local newstuff="no"
+ MUST_EXPORT_ENV="yes"
+ if [ -e "${WORKDIR}" ]; then
+ local x
+ local checkme
+ for x in ${AA}; do
+ echo ">>> Checking ${x}'s mtime..."
+ if [ "${DISTDIR}/${x}" -nt "${WORKDIR}" ]; then
+ echo ">>> ${x} has been updated; recreating WORKDIR..."
+ newstuff="yes"
+ rm -rf "${WORKDIR}"
+ break
+ fi
+ done
+ if [ "${EBUILD}" -nt "${WORKDIR}" ]; then
+ echo ">>> ${EBUILD} has been updated; recreating WORKDIR..."
+ newstuff="yes"
+ rm -rf "${WORKDIR}"
+ fi
+ fi
+
+ cd "${WORKDIR}"
+ src_unpack
+}
+
+abort_handler()
+{
+ local msg
+ if [ "$2" != "fail" ]; then
+ msg="${EBUILD}: ${1} aborted; exiting."
+ else
+ msg="${EBUILD}: ${1} failed; exiting."
+ fi
+ echo
+ echo "$msg"
+ echo
+ eval ${3}
+ #unset signal handler
+}
+
+abort_compile()
+{
+ abort_handler "src_compile" $1
+ exit 1
+}
+
+abort_unpack()
+{
+ abort_handler "src_unpack" $1
+ exit 1
+}
+
+abort_package()
+{
+ abort_handler "dyn_package" $1
+ rm -f "${PKGDIR}"/All/${PF}.t*
+ exit 1
+}
+
+abort_test()
+{
+ abort_handler "dyn_test" $1
+ exit 1
+}
+
+abort_install()
+{
+ abort_handler "src_install" $1
+ exit 1
+}
+
+dyn_compile()
+{
+ MUST_EXPORT_ENV="yes"
+ export DESTTREE=/usr
+ export INSDESTTREE=""
+ export EXEDESTTREE=""
+ export DOCDESTTREE=""
+ export INSOPTIONS="-m0644"
+ export EXEOPTIONS="-m0755"
+ export LIBOPTIONS="-m0644"
+ export DIROPTIONS="-m0755"
+ export MOPREFIX=${PN}
+
+ [ "${CFLAGS-unset}" != "unset" ] && export CFLAGS
+ [ "${CXXFLAGS-unset}" != "unset" ] && export CXXFLAGS
+ [ "${LIBCFLAGS-unset}" != "unset" ] && export LIBCFLAGS
+ [ "${LIBCXXFLAGS-unset}" != "unset" ] && export LIBCXXFLAGS
+ [ "${LDFLAGS-unset}" != "unset" ] && export LDFLAGS
+ [ "${ASFLAGS-unset}" != "unset" ] && export ASFLAGS
+
+ [ ! -z "${DISTCC_DIR}" ] && addwrite "${DISTCC_DIR}"
+
+ if [ -d "${S}" ]; then
+ cd "${S}"
+ else
+ # cd to some random dir that we at least control.
+ cd "${WORKDIR}"
+ fi
+ #our custom version of libtool uses $S and $D to fix
+ #invalid paths in .la files
+ export S D
+ #some packages use an alternative to $S to build in, cause
+ #our libtool to create problematic .la files
+ export PWORKDIR="$WORKDIR"
+ src_compile
+ #|| abort_compile "fail"
+ if hasq nostrip $FEATURES $RESTRICT; then
+ touch DEBUGBUILD
+ fi
+}
+
+
+dyn_test()
+{
+ echo ">>> Test phase [enabled]: ${CATEGORY}/${PF}"
+ MUST_EXPORT_ENV="yes"
+ if [ -d "${S}" ]; then
+ cd "${S}"
+ else
+ cd "${WORKDIR}"
+ fi
+ src_test
+}
+
+
+dyn_install()
+{
+ rm -rf "${D}"
+ mkdir "${D}"
+ if [ -d "${S}" ]; then
+ cd "${S}"
+ else
+ cd "$WORKDIR"
+ fi
+ echo
+ echo ">>> Install ${PF} into ${D} category ${CATEGORY}"
+ #our custom version of libtool uses $S and $D to fix
+ #invalid paths in .la files
+ export S D
+ #some packages uses an alternative to $S to build in, cause
+ #our libtool to create problematic .la files
+ export PWORKDIR="$WORKDIR"
+ src_install
+ #|| abort_install "fail"
+ prepall
+ cd "${D}"
+
+ if type -p scanelf > /dev/null ; then
+ # Make sure we disallow insecure RUNPATH/RPATH's
+ # Don't want paths that point to the tree where the package was built
+ # (older, broken libtools would do this). Also check for null paths
+ # because the loader will search $PWD when it finds null paths.
+ f=$(scanelf -qyRF '%r %p' "${D}" | grep -E "(${WORKDIR}|${D}|: |::|^ )")
+ if [[ -n ${f} ]] ; then
+ echo -ne '\a\n'
+ echo "QA Notice: the following files contain insecure RUNPATH's"
+ echo " Please file a bug about this at http://bugs.gentoo.org/"
+ echo " For more information on this issue, kindly review:"
+ echo " http://bugs.gentoo.org/81745"
+ echo "${f}"
+ echo -ne '\a\n'
+ die "Insecure binaries detected"
+ fi
+
+ # Check for setid binaries but are not built with BIND_NOW
+ f=$(scanelf -qyRF '%b %p' "${D}")
+ if [[ -n ${f} ]] ; then
+ echo -ne '\a\n'
+ echo "QA Notice: the following files are setXid, dyn linked, and using lazy bindings"
+ echo " This combination is generally discouraged. Try re-emerging the package:"
+ echo " LDFLAGS='-Wl,-z,now' emerge ${PN}"
+ echo "${f}"
+ echo -ne '\a\n'
+ [[ ${FEATURES/stricter} != "${FEATURES}" ]] \
+ && die "Aborting due to lazy bindings"
+ sleep 1
+ fi
+
+ # TEXTREL's are baaaaaaaad
+ f=$(scanelf -qyRF '%t %p' "${D}")
+ if [[ -n ${f} ]] ; then
+ echo -ne '\a\n'
+ echo "QA Notice: the following files contain runtime text relocations"
+ echo " Text relocations require a lot of extra work to be preformed by the"
+ echo " dynamic linker which will cause serious performance impact on IA-32"
+ echo " and might not function properly on other architectures hppa for example."
+ echo " If you are a programmer please take a closer look at this package and"
+ echo " consider writing a patch which addresses this problem."
+ echo "${f}"
+ echo -ne '\a\n'
+ [[ ${FEATURES/stricter} != "${FEATURES}" ]] \
+ && die "Aborting due to textrels"
+ sleep 1
+ fi
+
+ # Check for files with executable stacks
+ f=$(scanelf -qyRF '%e %p' "${D}")
+ if [[ -n ${f} ]] ; then
+ echo -ne '\a\n'
+ echo "QA Notice: the following files contain executable stacks"
+ echo " Files with executable stacks will not work properly (or at all!)"
+ echo " on some architectures/operating systems. A bug should be filed"
+ echo " at http://bugs.gentoo.org/ to make sure the file is fixed."
+ echo "${f}"
+ echo -ne '\a\n'
+ [[ ${FEATURES/stricter} != "${FEATURES}" ]] \
+ && die "Aborting due to +x stack"
+ sleep 1
+ fi
+
+ # disabled by harring; we don't use it currently.
+ # Save NEEDED information
+ #scanelf -qyRF '%p %n' "${D}" | sed -e 's:^:/:' > "${T}/NEEDED"
+ fi
+
+ if hasq multilib-strict ${FEATURES} && [ -x /usr/bin/file -a -x /usr/bin/find -a \
+ -n "${MULTILIB_STRICT_DIRS}" -a -n "${MULTILIB_STRICT_DENY}" ]; then
+ MULTILIB_STRICT_EXEMPT=${MULTILIB_STRICT_EXEMPT:-"(perl5|gcc|gcc-lib)"}
+ for dir in ${MULTILIB_STRICT_DIRS}; do
+ [ -d "${D}/${dir}" ] || continue
+ for file in $(find ${D}/${dir} -type f | egrep -v "^${D}/${dir}/${MULTILIB_STRICT_EXEMPT}"); do
+ file ${file} | egrep -q "${MULTILIB_STRICT_DENY}" && die "File ${file} matches a file type that is not allowed in ${dir}"
+ done
+ done
+ fi
+
+ echo ">>> Completed installing ${PF} into ${D}"
+ echo
+ unset dir
+ MUST_EXPORT_ENV="yes"
+}
+
+dyn_postinst()
+{
+ pkg_postinst
+}
+
+dyn_preinst()
+{
+ # set IMAGE depending if this is a binary or compile merge
+ local IMAGE=${D}
+
+ # Make sure D is where the package expects it
+ D=${IMAGE} pkg_preinst
+
+ # Smart FileSystem Permissions
+ if hasq sfperms $FEATURES; then
+ for i in $(find "${IMAGE}"/ -type f -perm -4000); do
+ ebegin ">>> SetUID: [chmod go-r] $i "
+ chmod go-r "$i"
+ eend $?
+ done
+ for i in $(find "${IMAGE}"/ -type f -perm -2000); do
+ ebegin ">>> SetGID: [chmod o-r] $i "
+ chmod o-r "$i"
+ eend $?
+ done
+ fi
+
+ # total suid control.
+ if hasq suidctl $FEATURES > /dev/null ; then
+ sfconf=/etc/portage/suidctl.conf
+ echo ">>> Preforming suid scan in ${IMAGE}"
+ for i in $(find "${IMAGE}"/ -type f \( -perm -4000 -o -perm -2000 \) ); do
+ if [ -s "${sfconf}" ]; then
+ suid=$(grep ^${i/${IMAGE}/}$ ${sfconf})
+ if [ "${suid}" = "${i/${IMAGE}/}" ]; then
+ echo "- ${i/${IMAGE}/} is an approved suid file"
+ else
+ echo ">>> Removing sbit on non registered ${i/${IMAGE}/}"
+ sleepbeep 6
+ chmod ugo-s "${i}"
+ grep ^#${i/${IMAGE}/}$ ${sfconf} > /dev/null || {
+ # sandbox prevents us from writing directly
+ # to files outside of the sandbox, but this
+ # can easly be bypassed using the addwrite() function
+ addwrite "${sfconf}"
+ echo ">>> Appending commented out entry to ${sfconf} for ${PF}"
+ ls_ret=`ls -ldh "${i}"`
+ echo "## ${ls_ret%${IMAGE}*}${ls_ret#*${IMAGE}}" >> ${sfconf}
+ echo "#${i/${IMAGE}/}" >> ${sfconf}
+ # no delwrite() eh?
+ # delwrite ${sconf}
+ }
+ fi
+ else
+ echo "suidctl feature set but you are lacking a ${sfconf}"
+ fi
+ done
+ fi
+
+ # SELinux file labeling (needs to always be last in dyn_preinst)
+ if hasq selinux $FEATURES || use selinux; then
+ # only attempt to label if setfiles is executable
+ # and 'context' is available on selinuxfs.
+ if [ -f /selinux/context -a -x /usr/sbin/setfiles ]; then
+ echo ">>> Setting SELinux security labels"
+ if [ -f ${POLICYDIR}/file_contexts/file_contexts ]; then
+ cp -f "${POLICYDIR}/file_contexts/file_contexts" "${T}"
+ else
+ make -C "${POLICYDIR}" FC=${T}/file_contexts "${T}/file_contexts"
+ fi
+
+ addwrite /selinux/context
+ /usr/sbin/setfiles -r "${IMAGE}" "${T}/file_contexts" "${IMAGE}" \
+ || die "Failed to set SELinux security labels."
+ else
+ # nonfatal, since merging can happen outside a SE kernel
+ # like during a recovery situation
+ echo "!!! Unable to set SELinux security labels"
+ fi
+ fi
+ MUST_EXPORT_ENV="yes"
+}
+
+
+# debug-print() gets called from many places with verbose status information useful
+# for tracking down problems. The output is in $T/eclass-debug.log.
+# You can set ECLASS_DEBUG_OUTPUT to redirect the output somewhere else as well.
+# The special "on" setting echoes the information, mixing it with the rest of the
+# emerge output.
+# You can override the setting by exporting a new one from the console, or you can
+# set a new default in make.*. Here the default is "" or unset.
+
+# in the future might use e* from /etc/init.d/functions.sh if i feel like it
+debug-print()
+{
+ if [ "$EBUILD_PHASE" == "depend" ] && [ -z "${PKGCORE_DEBUG}" ]; then
+ return
+ fi
+ # if $T isn't defined, we're in dep calculation mode and
+ # shouldn't do anything
+ [ -z "$T" ] && return 0
+
+ while [ "$1" ]; do
+
+ # extra user-configurable targets
+ if [ "$ECLASS_DEBUG_OUTPUT" == "on" ]; then
+ echo "debug: $1"
+ elif [ -n "$ECLASS_DEBUG_OUTPUT" ]; then
+ echo "debug: $1" >> $ECLASS_DEBUG_OUTPUT
+ fi
+
+ # default target
+ echo "$1" >> "${T}/eclass-debug.log"
+ # let the portage user own/write to this file
+ chmod g+w "${T}/eclass-debug.log" &>/dev/null
+
+ shift
+ done
+}
+
+# The following 2 functions are debug-print() wrappers
+
+debug-print-function()
+{
+ str="$1: entering function"
+ shift
+ debug-print "$str, parameters: $*"
+}
+
+debug-print-section()
+{
+ debug-print "now in section $*"
+}
+
+
+internal_inherit()
+{
+ # default, backwards compatible beast.
+ local location overlay
+ location="${ECLASSDIR}/${1}.eclass"
+
+ if [ -n "$PORTDIR_OVERLAY" ]; then
+ local overlay
+ for overlay in ${PORTDIR_OVERLAY}; do
+ if [ -e "${overlay}/eclass/${1}.eclass" ]; then
+ location="${overlay}/eclass/${1}.eclass"
+ debug-print " eclass exists: ${location}"
+ fi
+ done
+ fi
+ debug-print "inherit: $1 -> $location"
+ source "$location" || die "died sourcing $location in inherit()"
+ return 0
+}
+
+# Sources all eclasses in parameters
+declare -ix ECLASS_DEPTH=0
+inherit()
+{
+ local SAVED_INHERIT_COUNT=0 INHERITED_ALREADY=0
+
+ if [[ $ECLASS_DEPTH < 0 ]] && [ "${EBUILD_PHASE}" == "depend" ]; then
+ echo "QA Notice: ${CATEGORY}/${PF} makes multiple inherit calls: $1" >&2
+ SAVED_INHERIT_COUNT=$ECLASS_DEPTH
+ ECLASS_DEPTH=0
+ fi
+ if hasq $1 $INHERITED && [ "${EBUILD_PHASE}" == "depend" ]; then
+ #echo "QA notice: $1 is inherited multiple times: ${CATEGORY}/${PF}" >&2
+ INHERITED_ALREADY=1
+ fi
+ ECLASS_DEPTH=$(($ECLASS_DEPTH + 1))
+ if [[ $ECLASS_DEPTH > 1 ]]; then
+ debug-print "*** Multiple Inheritence (Level: ${ECLASS_DEPTH})"
+ fi
+
+ local location olocation
+ local PECLASS
+
+ local B_IUSE
+ local B_DEPEND
+ local B_RDEPEND
+ local B_CDEPEND
+ local B_PDEPEND
+ while [ -n "$1" ]; do
+
+ # PECLASS is used to restore the ECLASS var after recursion.
+ PECLASS="$ECLASS"
+ export ECLASS="$1"
+
+ if [ "$EBUILD_PHASE" != "depend" ]; then
+ if ! hasq $ECLASS $INHERITED; then
+ echo
+ echo "QA Notice: ECLASS '$ECLASS' illegal conditional inherit in $CATEGORY/$PF" >&2
+ echo
+ fi
+ fi
+
+ #We need to back up the value of DEPEND and RDEPEND to B_DEPEND and B_RDEPEND
+ #(if set).. and then restore them after the inherit call.
+
+ #turn off glob expansion
+ set -f
+
+ # Retain the old data and restore it later.
+ unset B_IUSE B_DEPEND B_RDEPEND B_CDEPEND B_PDEPEND
+ [ "${IUSE-unset}" != "unset" ] && B_IUSE="${IUSE}"
+ [ "${DEPEND-unset}" != "unset" ] && B_DEPEND="${DEPEND}"
+ [ "${RDEPEND-unset}" != "unset" ] && B_RDEPEND="${RDEPEND}"
+ [ "${CDEPEND-unset}" != "unset" ] && B_CDEPEND="${CDEPEND}"
+ [ "${PDEPEND-unset}" != "unset" ] && B_PDEPEND="${PDEPEND}"
+ unset IUSE DEPEND RDEPEND CDEPEND PDEPEND
+ #turn on glob expansion
+ set +f
+ if ! internal_inherit "$1"; then
+ die "failed sourcing $1 in inherit()"
+ fi
+
+ #turn off glob expansion
+ set -f
+
+ # If each var has a value, append it to the global variable E_* to
+ # be applied after everything is finished. New incremental behavior.
+ [ "${IUSE-unset}" != "unset" ] && export E_IUSE="${E_IUSE} ${IUSE}"
+ [ "${DEPEND-unset}" != "unset" ] && export E_DEPEND="${E_DEPEND} ${DEPEND}"
+ [ "${RDEPEND-unset}" != "unset" ] && export E_RDEPEND="${E_RDEPEND} ${RDEPEND}"
+ [ "${CDEPEND-unset}" != "unset" ] && export E_CDEPEND="${E_CDEPEND} ${CDEPEND}"
+ [ "${PDEPEND-unset}" != "unset" ] && export E_PDEPEND="${E_PDEPEND} ${PDEPEND}"
+
+ [ "${B_IUSE-unset}" != "unset" ] && IUSE="${B_IUSE}"
+ [ "${B_IUSE-unset}" != "unset" ] || unset IUSE
+
+ [ "${B_DEPEND-unset}" != "unset" ] && DEPEND="${B_DEPEND}"
+ [ "${B_DEPEND-unset}" != "unset" ] || unset DEPEND
+
+ [ "${B_RDEPEND-unset}" != "unset" ] && RDEPEND="${B_RDEPEND}"
+ [ "${B_RDEPEND-unset}" != "unset" ] || unset RDEPEND
+
+ [ "${B_CDEPEND-unset}" != "unset" ] && CDEPEND="${B_CDEPEND}"
+ [ "${B_CDEPEND-unset}" != "unset" ] || unset CDEPEND
+
+ [ "${B_PDEPEND-unset}" != "unset" ] && PDEPEND="${B_PDEPEND}"
+ [ "${B_PDEPEND-unset}" != "unset" ] || unset PDEPEND
+
+ #turn on glob expansion
+ set +f
+
+ if hasq $1 $INHERITED && [ $INHERITED_ALREADY == 0 ]; then
+#
+# enable this one eclasses no longer fool with eclass and inherited.
+# if [ "${EBUILD_PHASE}" == "depend" ]; then
+# echo "QA Notice: ${CATEGORY}/${PF}: eclass $1 is incorrectly setting \$INHERITED." >&2
+# fi
+ :
+ else
+ INHERITED="$INHERITED $ECLASS"
+ fi
+ export ECLASS="$PECLASS"
+
+ shift
+ done
+ ECLASS_DEPTH=$(($ECLASS_DEPTH - 1))
+ if [[ $ECLASS_DEPTH == 0 ]]; then
+ ECLASS_DEPTH=$(($SAVED_INHERIT_COUNT - 1))
+ fi
+}
+
+# Exports stub functions that call the eclass's functions, thereby making them default.
+# For example, if ECLASS="base" and you call "EXPORT_FUNCTIONS src_unpack", the following
+# code will be eval'd:
+# src_unpack() { base_src_unpack; }
+EXPORT_FUNCTIONS()
+{
+ if [ -z "$ECLASS" ]; then
+ echo "EXPORT_FUNCTIONS without a defined ECLASS" >&2
+ exit 1
+ fi
+ while [ "$1" ]; do
+ debug-print "EXPORT_FUNCTIONS: ${1} -> ${ECLASS}_${1}"
+ eval "$1() { ${ECLASS}_$1 "\$@" ; }" > /dev/null
+ shift
+ done
+}
+
+# adds all parameters to E_DEPEND and E_RDEPEND, which get added to DEPEND
+# and RDEPEND after the ebuild has been processed. This is important to
+# allow users to use DEPEND="foo" without frying dependencies added by an
+# earlier inherit. It also allows RDEPEND to work properly, since a lot
+# of ebuilds assume that an unset RDEPEND gets its value from DEPEND.
+# Without eclasses, this is true. But with them, the eclass may set
+# RDEPEND itself (or at least used to) which would prevent RDEPEND from
+# getting its value from DEPEND. This is a side-effect that made eclasses
+# have unreliable dependencies.
+
+newdepend()
+{
+ debug-print-function newdepend $*
+ debug-print "newdepend: E_DEPEND=$E_DEPEND E_RDEPEND=$E_RDEPEND"
+
+ while [ -n "$1" ]; do
+ case $1 in
+ "/autotools")
+ do_newdepend DEPEND sys-devel/autoconf sys-devel/automake sys-devel/make
+ ;;
+ "/c")
+ do_newdepend DEPEND sys-devel/gcc virtual/libc
+ do_newdepend RDEPEND virtual/libc
+ ;;
+ *)
+ do_newdepend DEPEND $1
+ ;;
+ esac
+ shift
+ done
+}
+
+newrdepend()
+{
+ debug-print-function newrdepend $*
+ do_newdepend RDEPEND $1
+}
+
+newcdepend()
+{
+ debug-print-function newcdepend $*
+ do_newdepend CDEPEND $1
+}
+
+newpdepend()
+{
+ debug-print-function newpdepend $*
+ do_newdepend PDEPEND $1
+}
+
+do_newdepend()
+{
+ # This function does a generic change determining whether we're in an
+ # eclass or not. If we are, we change the E_* variables for deps.
+ debug-print-function do_newdepend $*
+ [ -z "$1" ] && die "do_newdepend without arguments"
+
+ # Grab what we're affecting... Figure out if we're affecting eclasses.
+ [[ ${ECLASS_DEPTH} > 0 ]] && TARGET="E_$1"
+ [[ ${ECLASS_DEPTH} > 0 ]] || TARGET="$1"
+ shift # $1 was a variable name.
+
+ while [ -n "$1" ]; do
+ # This bit of evil takes TARGET and uses it to evaluate down to a
+ # variable. This is a sneaky way to make this infinately expandable.
+ # The normal translation of this would look something like this:
+ # E_DEPEND="${E_DEPEND} $1" :::::: Cool, huh? :)
+ eval export ${TARGET}=\"\${${TARGET}} \$1\"
+ shift
+ done
+}
+
+# this is a function for removing any directory matching a passed in pattern from
+# PATH
+remove_path_entry()
+{
+ save_IFS
+ IFS=":"
+ stripped_path="${PATH}"
+ while [ -n "$1" ]; do
+ cur_path=""
+ for p in ${stripped_path}; do
+ if [ "${p/${1}}" == "${p}" ]; then
+ cur_path="${cur_path}:${p}"
+ fi
+ done
+ stripped_path="${cur_path#:*}"
+ shift
+ done
+ restore_IFS
+ PATH="${stripped_path}"
+}
+
+QA_INTERCEPTORS="javac java-config python python-config perl grep egrep fgrep sed gcc g++ cc bash awk nawk pkg-config"
+enable_qa_interceptors()
+{
+
+ # Turn of extended glob matching so that g++ doesn't get incorrectly matched.
+ shopt -u extglob
+
+ # QA INTERCEPTORS
+ local FUNC_SRC BIN BODY BIN_PATH
+ for BIN in ${QA_INTERCEPTORS}; do
+ BIN_PATH=$(type -pf ${BIN})
+ if [ "$?" != "0" ]; then
+ BODY="echo \"*** missing command: ${BIN}\" >&2; return 127"
+ else
+ BODY="${BIN_PATH} \"\$@\"; return \$?"
+ fi
+ FUNC_SRC="function ${BIN}() {
+ echo -n \"QA Notice: ${BIN} in global scope: \" >&2
+ if [ \$ECLASS_DEPTH -gt 0 ]; then
+ echo \"eclass \${ECLASS}\" >&2
+ else
+ echo \"\${CATEGORY}/\${PF}\" >&2
+ fi
+ ${BODY}
+ }";
+ eval "$FUNC_SRC" || echo "error creating QA interceptor ${BIN}" >&2
+ done
+}
+
+disable_qa_interceptors()
+{
+ for x in $QA_INTERCEPTORS; do
+ unset -f $x
+ done
+}
+
+useq()
+{
+ local u="${1}"
+ local neg=0
+ if [ "${u:0:1}" == "!" ]; then
+ u="${u:1}"
+ neg=1
+ fi
+ local x
+
+ # Make sure we have this USE flag in IUSE
+ # temp disable due to PORTAGE_ARCHLIST not being exported in
+ #if ! hasq "${u}" ${IUSE} ${E_IUSE} && ! hasq "${u}" ${PORTAGE_ARCHLIST} selinux; then
+ # echo "QA Notice: USE Flag '${u}' not in IUSE for ${CATEGORY}/${PF}" >&2
+ #fi
+
+ for x in ${USE}; do
+ if [ "${x}" == "${u}" ]; then
+ if [ ${neg} -eq 1 ]; then
+ return 1
+ else
+ return 0
+ fi
+ fi
+ done
+ if [ ${neg} -eq 1 ]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+usev()
+{
+ if useq ${1}; then
+ echo "${1}"
+ return 0
+ fi
+ return 1
+}
+
+# Used to generate the /lib/cpp and /usr/bin/cc wrappers
+gen_wrapper()
+{
+ cat > $1 << END
+#!/bin/sh
+
+$2 "\$@"
+END
+
+ chmod 0755 $1
+}
+
+insopts()
+{
+ INSOPTIONS=""
+ for x in $*; do
+ #if we have a debug build, let's not strip anything
+ if hasq nostrip $FEATURES $RESTRICT && [ "$x" == "-s" ]; then
+ continue
+ else
+ INSOPTIONS="$INSOPTIONS $x"
+ fi
+ done
+ export INSOPTIONS
+}
+
+diropts()
+{
+ DIROPTIONS=""
+ for x in $*; do
+ DIROPTIONS="${DIROPTIONS} $x"
+ done
+ export DIROPTIONS
+}
+
+exeopts()
+{
+ EXEOPTIONS=""
+ for x in $*; do
+ #if we have a debug build, let's not strip anything
+ if hasq nostrip $FEATURES $RESTRICT && [ "$x" == "-s" ]; then
+ continue
+ else
+ EXEOPTIONS="$EXEOPTIONS $x"
+ fi
+ done
+ export EXEOPTIONS
+}
+
+libopts()
+{
+ LIBOPTIONS=""
+ for x in $*; do
+ #if we have a debug build, let's not strip anything
+ if hasq nostrip $FEATURES $RESTRICT && [ "$x" == "-s" ]; then
+ continue
+ else
+ LIBOPTIONS="$LIBOPTIONS $x"
+ fi
+ done
+ export LIBOPTIONS
+}
+
+DONT_EXPORT_VARS="${DONT_EXPORT_VARS} ECLASS_DEPTH"
+true
diff --git a/pkgcore/bin/ebuild-env/ebuild-functions.sh b/pkgcore/bin/ebuild-env/ebuild-functions.sh
new file mode 100755
index 0000000..da60995
--- /dev/null
+++ b/pkgcore/bin/ebuild-env/ebuild-functions.sh
@@ -0,0 +1,339 @@
+#!/bin/bash
+# ebuild-functions.sh; ebuild env functions, saved with the ebuild (not specific to the portage version).
+# Copyright 2004-2005 Gentoo Foundation
+
+use()
+{
+ if useq ${1}; then
+ return 0
+ fi
+ return 1
+}
+
+has()
+{
+ if hasq "$@"; then
+ return 0
+ fi
+ return 1
+}
+
+use_with()
+{
+ if [ -z "$1" ]; then
+ echo "!!! use_with() called without a parameter." >&2
+ echo "!!! use_with <USEFLAG> [<flagname> [value]]" >&2
+ return
+ fi
+
+ local UW_SUFFIX=""
+ if [ ! -z "${3}" ]; then
+ UW_SUFFIX="=${3}"
+ fi
+
+ local UWORD="$2"
+ if [ -z "${UWORD}" ]; then
+ UWORD="$1"
+ fi
+
+ if useq $1; then
+ echo "--with-${UWORD}${UW_SUFFIX}"
+ return 0
+ else
+ echo "--without-${UWORD}"
+ return 1
+ fi
+}
+
+use_enable()
+{
+ if [ -z "$1" ]; then
+ echo "!!! use_enable() called without a parameter." >&2
+ echo "!!! use_enable <USEFLAG> [<flagname> [value]]" >&2
+ return
+ fi
+
+ local UE_SUFFIX=""
+ if [ ! -z "${3}" ]; then
+ UE_SUFFIX="=${3}"
+ fi
+
+ local UWORD="$2"
+ if [ -z "${UWORD}" ]; then
+ UWORD="$1"
+ fi
+
+ if useq $1; then
+ echo "--enable-${UWORD}${UE_SUFFIX}"
+ return 0
+ else
+ echo "--disable-${UWORD}"
+ return 1
+ fi
+}
+
+econf()
+{
+ local ret
+ ECONF_SOURCE="${ECONF_SOURCE:-.}"
+ if [ ! -x "${ECONF_SOURCE}/configure" ]; then
+ [ -f "${ECONF_SOURCE}/configure" ] && die "configure script isn't executable"
+ die "no configure script found"
+ fi
+ if ! hasq autoconfig $RESTRICT; then
+ if [ -e /usr/share/gnuconfig/ ]; then
+ local x
+ for x in $(find ${WORKDIR} -type f '(' -name config.guess -o -name config.sub ')' ); do
+ echo " * econf: updating ${x/${WORKDIR}\/} with /usr/share/gnuconfig/${x##*/}"
+ cp -f "/usr/share/gnuconfig/${x##*/}" "${x}"
+ chmod a+x "${x}"
+ done
+ fi
+ fi
+ if [ ! -z "${CBUILD}" ]; then
+ EXTRA_ECONF="--build=${CBUILD} ${EXTRA_ECONF}"
+ fi
+
+ # if the profile defines a location to install libs to aside from default, pass it on.
+ # if the ebuild passes in --libdir, they're responsible for the conf_libdir fun.
+ LIBDIR_VAR="LIBDIR_${ABI}"
+ if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
+ CONF_LIBDIR="${!LIBDIR_VAR}"
+ fi
+ unset LIBDIR_VAR
+ if [ -n "${CONF_LIBDIR}" ] && [ "${*/--libdir}" == "$*" ]; then
+ if [ "${*/--exec-prefix}" != "$*" ]; then
+ local args="$(echo $*)"
+ local -a pref=($(echo ${args/*--exec-prefix[= ]}))
+ CONF_PREFIX=${pref}
+ [ "${CONF_PREFIX:0:1}" != "/" ] && CONF_PREFIX="/${CONF_PREFIX}"
+ elif [ "${*/--prefix}" != "$*" ]; then
+ local args="$(echo $*)"
+ local -a pref=($(echo ${args/*--prefix[= ]}))
+ CONF_PREFIX=${pref}
+ [ "${CONF_PREFIX:0:1}" != "/" ] && CONF_PREFIX="/${CONF_PREFIX}"
+ else
+ CONF_PREFIX="/usr"
+ fi
+ export CONF_PREFIX
+ [ "${CONF_LIBDIR:0:1}" != "/" ] && CONF_LIBDIR="/${CONF_LIBDIR}"
+
+ CONF_LIBDIR_RESULT="${CONF_PREFIX}${CONF_LIBDIR}"
+ for X in 1 2 3; do
+ # The escaping is weird. It will break if you escape the last one.
+ CONF_LIBDIR_RESULT="${CONF_LIBDIR_RESULT//\/\///}"
+ done
+
+ EXTRA_ECONF="--libdir=${CONF_LIBDIR_RESULT} ${EXTRA_ECONF}"
+ fi
+ local EECONF_CACHE
+ echo ${ECONF_SOURCE}/configure \
+ --prefix=/usr \
+ --host=${CHOST} \
+ --mandir=/usr/share/man \
+ --infodir=/usr/share/info \
+ --datadir=/usr/share \
+ --sysconfdir=/etc \
+ --localstatedir=/var/lib \
+ ${EXTRA_ECONF} \
+ ${EECONF_CACHE} \
+ "$@"
+
+ if ! ${ECONF_SOURCE}/configure \
+ --prefix=/usr \
+ --host=${CHOST} \
+ --mandir=/usr/share/man \
+ --infodir=/usr/share/info \
+ --datadir=/usr/share \
+ --sysconfdir=/etc \
+ --localstatedir=/var/lib \
+ ${EXTRA_ECONF} \
+ ${EECONF_CACHE} \
+ "$@" ; then
+
+ if [ -s config.log ]; then
+ echo
+ echo "!!! Please attach the config.log to your bug report:"
+ echo "!!! ${PWD}/config.log"
+ fi
+ die "econf failed"
+ fi
+ return $?
+}
+
+strip_duplicate_slashes ()
+{
+ if [ -n "${1}" ]; then
+ local removed="${1/\/\///}"
+ [ "${removed}" != "${removed/\/\///}" ] && removed=$(strip_duplicate_slashes "${removed}")
+ echo ${removed}
+ fi
+}
+
+einstall()
+{
+ # CONF_PREFIX is only set if they didn't pass in libdir above
+ local LOCAL_EXTRA_EINSTALL="${EXTRA_EINSTALL}"
+ LIBDIR_VAR="LIBDIR_${ABI}"
+ if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
+ CONF_LIBDIR="${!LIBDIR_VAR}"
+ fi
+ unset LIBDIR_VAR
+ if [ -n "${CONF_LIBDIR}" ] && [ "${CONF_PREFIX:-unset}" != "unset" ]; then
+ EI_DESTLIBDIR="${D}/${CONF_PREFIX}/${CONF_LIBDIR}"
+ EI_DESTLIBDIR="$(strip_duplicate_slashes ${EI_DESTLIBDIR})"
+ LOCAL_EXTRA_EINSTALL="${LOCAL_EXTRA_EINSTALL} libdir=${EI_DESTLIBDIR}"
+ unset EI_DESTLIBDIR
+ fi
+
+ if [ -f ./[mM]akefile -o -f ./GNUmakefile ] ; then
+ if [ ! -z "${PKGCORE_DEBUG}" ]; then
+ ${MAKE:-make} -n prefix=${D}/usr \
+ datadir=${D}/usr/share \
+ infodir=${D}/usr/share/info \
+ localstatedir=${D}/var/lib \
+ mandir=${D}/usr/share/man \
+ sysconfdir=${D}/etc \
+ ${LOCAL_EXTRA_EINSTALL} \
+ "$@" install
+ fi
+ ${MAKE:-make} prefix=${D}/usr \
+ datadir=${D}/usr/share \
+ infodir=${D}/usr/share/info \
+ localstatedir=${D}/var/lib \
+ mandir=${D}/usr/share/man \
+ sysconfdir=${D}/etc \
+ ${LOCAL_EXTRA_EINSTALL} \
+ "$@" install || die "einstall failed"
+ else
+ die "no Makefile found"
+ fi
+}
+
+pkg_setup()
+{
+ return
+}
+
+pkg_nofetch()
+{
+ [ -z "${SRC_URI}" ] && return
+
+ echo "!!! The following are listed in SRC_URI for ${PN}:"
+ for MYFILE in `echo ${SRC_URI}`; do
+ echo "!!! $MYFILE"
+ done
+}
+
+src_unpack()
+{
+ if [ "${A}" != "" ]; then
+ unpack ${A}
+ fi
+}
+
+src_compile()
+{
+ if [ -x ./configure ]; then
+ econf || die "econf failed"
+ fi
+ if [ -f Makefile ] || [ -f GNUmakefile ] || [ -f makefile ]; then
+ emake || die "emake failed"
+ fi
+}
+
+src_test()
+{
+ addpredict /
+ if make check -n &> /dev/null; then
+ echo ">>> Test phase [check]: ${CATEGORY}/${PF}"
+ if ! make check; then
+ hasq test $FEATURES && die "Make check failed. See above for details."
+ hasq test $FEATURES || eerror "Make check failed. See above for details."
+ fi
+ elif make test -n &> /dev/null; then
+ echo ">>> Test phase [test]: ${CATEGORY}/${PF}"
+ if ! make test; then
+ hasq test $FEATURES && die "Make test failed. See above for details."
+ hasq test $FEATURES || eerror "Make test failed. See above for details."
+ fi
+ else
+ echo ">>> Test phase [none]: ${CATEGORY}/${PF}"
+ fi
+ SANDBOX_PREDICT="${SANDBOX_PREDICT%:/}"
+}
+
+src_install()
+{
+ return
+}
+
+pkg_preinst()
+{
+ return
+}
+
+pkg_postinst()
+{
+ return
+}
+
+pkg_prerm()
+{
+ return
+}
+
+pkg_postrm()
+{
+ return
+}
+
+into()
+{
+ if [ $1 == "/" ]; then
+ export DESTTREE=""
+ else
+ export DESTTREE=$1
+ if [ ! -d "${D}${DESTTREE}" ]; then
+ install -d "${D}${DESTTREE}"
+ fi
+ fi
+}
+
+insinto()
+{
+ if [ "$1" == "/" ]; then
+ export INSDESTTREE=""
+ else
+ export INSDESTTREE=$1
+ if [ ! -d "${D}${INSDESTTREE}" ]; then
+ install -d "${D}${INSDESTTREE}"
+ fi
+ fi
+}
+
+exeinto()
+{
+ if [ "$1" == "/" ]; then
+ export EXEDESTTREE=""
+ else
+ export EXEDESTTREE="$1"
+ if [ ! -d "${D}${EXEDESTTREE}" ]; then
+ install -d "${D}${EXEDESTTREE}"
+ fi
+ fi
+}
+
+docinto()
+{
+ if [ "$1" == "/" ]; then
+ export DOCDESTTREE=""
+ else
+ export DOCDESTTREE="$1"
+ if [ ! -d "${D}usr/share/doc/${PF}/${DOCDESTTREE}" ]; then
+ install -d "${D}usr/share/doc/${PF}/${DOCDESTTREE}"
+ fi
+ fi
+}
+
+true
diff --git a/pkgcore/bin/ebuild-env/ebuild.sh b/pkgcore/bin/ebuild-env/ebuild.sh
new file mode 100755
index 0000000..029bf3e
--- /dev/null
+++ b/pkgcore/bin/ebuild-env/ebuild.sh
@@ -0,0 +1,771 @@
+#!/bin/bash
+# ebuild.sh; ebuild phase processing, env handling
+# Copyright 2005-2006 Brian Harring <ferringb@gmail.com>
+# Copyright 2004-2005 Gentoo Foundation
+
+# general phase execution path-
+# execute_phases is called, which sets EBUILD_PHASE, and then depending on the phase,
+# loads or initializes. Env is init'd for non src based stages if the env isn't found- otherwise
+# it loads the environ via load_environ call. In cases where env isn't found for phases setup -> merge,
+# it bails (theres no way the env should be missing- exemption is setup phase).
+#
+# for env filtering for restoration and reloading, note the updates to DONT_EXPORT_(VARS|FUNCS).
+# those vars are basically used to track what shouldn't be saved/restored. Whitespace seperated,
+# those vars can support posix (think egrep) regex. They should hold all vars/funcs that are internal
+# ebuild.sh vars. Basically, filter all vars/funcs that are specific to ebuild.sh, not the ebuild.
+#
+# after loading the env, user defined pre hooks are executed, dyn_${EBUILD_PHASE} is executed,
+# and the post hooks are executed. If the env needs to be flushed to disk, MUST_EXPORT_ENV is set to
+# "yes", and execute_phases will dump it to ${T}/environment.
+#
+# few notes on general env stuff- if it's not ebuild specific or a user option, it's typically marked
+# readonly. This limits users, but also helps to ensure that reloaded envs from older portages don't
+# overwrite an internal ebd.sh function that has since changed.
+
+ORIG_VARS=`declare | egrep '^[^[:space:]{}()]+=' | cut -s -d '=' -f 1`
+ORIG_FUNCS=`declare -F | cut -s -d ' ' -f 3`
+
+DONT_EXPORT_FUNCS='portageq speak'
+
+DONT_EXPORT_VARS="ORIG_VARS GROUPS ORIG_FUNCS FUNCNAME DAEMONIZED CCACHE.* DISTCC.* SYNC
+\(TMP\|\)DIR FEATURES CONFIG_PROTECT.* P\?WORKDIR RSYNC_.* GENTOO_MIRRORS
+\(DIST\|FILES\|RPM\|ECLASS\)DIR HOME MUST_EXPORT_ENV QA_CONTROLLED_EXTERNALLY COLORTERM HOSTNAME
+myarg SANDBOX_.* BASH.* EUID PPID SHELLOPTS UID ACCEPT_\(KEYWORDS\|LICENSE\) BUILD\(_PREFIX\|DIR\) T DIRSTACK
+DISPLAY \(EBUILD\)\?_PHASE PORTAGE_.* SUDO_.* LD_PRELOAD ret line phases D IMAGE
+PORT\(_LOGDIR\|DIR\(_OVERLAY\)\?\) ROOT TERM _ done e PROFILE_.* EBUILD ECLASS LINENO
+HILITE TMP HISTCMD OPTIND RANDOM OLDPWD PKGCORE_DOMAIN IFS"
+
+
+if [ -z "$PKGCORE_BIN_PATH" ]; then
+ echo "PKGCORE_BIN_PATH is unset!"
+ exit 1
+fi
+
+# knock the sandbox vars back to the pkgs defaults.
+reset_sandbox() {
+ export SANDBOX_ON="1"
+ export SANDBOX_PREDICT="${SANDBOX_PREDICT:+${SANDBOX_PREDICT}:}/proc/self/maps:/dev/console:/dev/random:${PORTAGE_TMPDIR}"
+ export SANDBOX_WRITE="${SANDBOX_WRITE:+${SANDBOX_WRITE}:}/dev/shm:${PORTAGE_TMPDIR}"
+ export SANDBOX_READ="${SANDBOX_READ:+${SANDBOX_READ}:}/dev/shm:${PORTAGE_TMPDIR}"
+ local s
+ for x in CCACHE_DIR DISTCC_DIR D WORKDIR T; do
+ if [ -n "${!x}" ]; then
+ addread "${!x}"
+ addwrite "${!x}"
+ fi
+ done
+}
+
+# Prevent aliases from causing portage to act inappropriately.
+# Make sure it's before everything so we don't mess aliases that follow.
+unalias -a
+
+# We need this next line for "die" and "assert". It expands
+# It _must_ preceed all the calls to die and assert.
+shopt -s expand_aliases
+
+# Unset some variables that break things.
+unset GZIP BZIP BZIP2 CDPATH GREP_OPTIONS GREP_COLOR GLOB_IGNORE
+
+alias die='diefunc "$FUNCNAME" "$LINENO" "$?"'
+alias assert='_pipestatus="${PIPESTATUS[*]}"; [[ "${_pipestatus// /}" -eq 0 ]] || diefunc "$FUNCNAME" "$LINENO" "$_pipestatus"'
+alias save_IFS='[ "${IFS:-unset}" != "unset" ] && portage_old_IFS="${IFS}"'
+alias restore_IFS='if [ "${portage_old_IFS:-unset}" != "unset" ]; then IFS="${portage_old_IFS}"; unset portage_old_IFS; else unset IFS; fi'
+
+diefunc() {
+ set +x
+ # if we were signaled to die...
+ if [[ -n $EBD_DISABLE_DIEFUNC ]]; then
+ return
+ fi
+ local funcname="$1" lineno="$2" exitcode="$3"
+ shift 3
+ echo >&2
+ echo "!!! ERROR: $CATEGORY/$PF failed." >&2
+ dump_trace 2 >&2
+ echo "!!! ${*:-(no error message)}" >&2
+ echo "!!! If you need support, post the topmost build error, NOT this status message." >&2
+ if [ "${EBUILD_PHASE/depend}" == "${EBUILD_PHASE}" ]; then
+ for x in ${EBUILD_DEATH_HOOKS}; do
+ ${x} ${1} ${2} ${3} "${@}" >&2 1>&2
+ done
+ fi
+ echo >&2
+ exit 1
+}
+
+
+shopt -s extdebug &> /dev/null
+
+# usage- first arg is the number of funcs on the stack to ignore.
+# defaults to 1 (ignoring dump_trace)
+dump_trace() {
+ local funcname="" sourcefile="" lineno="" n e s="yes"
+
+ declare -i strip=1
+
+ if [[ -n $1 ]]; then
+ strip=$(( $1 ))
+ fi
+
+ echo "Call stack:"
+ for (( n = ${#FUNCNAME[@]} - 1, p = ${#BASH_ARGV[@]} ; n > $strip ; n-- )) ; do
+ funcname=${FUNCNAME[${n} - 1]}
+ sourcefile=$(basename ${BASH_SOURCE[${n}]})
+ lineno=${BASH_LINENO[${n} - 1]}
+ # Display function arguments
+ args=
+ if [[ -n "${BASH_ARGV[@]}" ]]; then
+ for (( j = 0 ; j < ${BASH_ARGC[${n} - 1]} ; ++j )); do
+ newarg=${BASH_ARGV[$(( p - j - 1 ))]}
+ args="${args:+${args} }'${newarg}'"
+ done
+ (( p -= ${BASH_ARGC[${n} - 1]} ))
+ fi
+ echo " ${sourcefile}, line ${lineno}: Called ${funcname}${args:+ ${args}}"
+ done
+}
+
+hasq() {
+ local x
+
+ local me=$1
+ shift
+
+ # All the TTY checks really only help out depend. Which is nice.
+ # Logging kills all this anyway. Everything becomes a pipe. --NJ
+ for x in "$@"; do
+ if [ "${x}" == "${me}" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+hasv() {
+ if hasq "$@"; then
+ echo "${1}"
+ return 0
+ fi
+ return 1
+}
+
+#if no perms are specified, dirs/files will have decent defaults
+#(not secretive, but not stupid)
+umask 022
+
+# the sandbox is disabled by default except when overridden in the relevant stages
+export SANDBOX_ON="0"
+
+escape_regex() {
+ local f
+ while [ -n "$1" ]; do
+ f="${1//+/\+}"
+ f="${f//.*/[A-Za-z0-9_-+./]*}"
+ echo -n "$f"
+ shift
+ done
+}
+
+filter_env_func_filter() {
+ while [ -n "$1" ]; do
+ echo -n "$(escape_regex "$1")"
+ [ "$#" != 1 ] && echo -n ','
+ shift
+ done
+}
+
+gen_regex_func_filter() {
+ local f
+ if [ "$#" == 1 ]; then
+ echo -n "$(escape_regex "$1")"
+ return
+ fi
+ echo -n "\($(escape_regex "$1")"
+ shift
+ while [ -n "$1" ]; do
+ echo -n "\|$(escape_regex "$1")"
+ shift
+ done
+ echo -n "\)"
+}
+
+filter_env_var_filter() {
+ local _internal_var
+ while [ -n "$1" ]; do
+ echo -n "$1"
+ [ "$#" != 1 ] && echo -n ','
+ shift
+ done
+}
+
+gen_regex_var_filter() {
+ local _internal_var
+ if [ "$#" == 1 ]; then
+ echo -n "$1"
+ return
+ fi
+ echo -n "\($1"
+ shift
+ while [ -n "$1" ]; do
+ echo -n "\|$1"
+ shift
+ done
+ echo -n '\)'
+}
+
+# func for beeping and delaying a defined period of time.
+sleepbeep() {
+ if [ ! "$#" -lt 3 ] || [ ! "$#" -gt 0 ]; then
+ echo "sleepbeep requires one arg- number of beeps"
+ echo "additionally, can supply a 2nd arg- interval between beeps (defaults to 0.25s"
+ die "invalid call to sleepbeep"
+ fi
+ local count=$(($1))
+ local interval="${2:-0.25}"
+ while [ $count -gt 0 ]; do
+ echo -en "\a";
+ sleep $interval &> /dev/null
+ count=$(($count - 1))
+ done
+ return 0
+}
+
+# selectively saves the environ- specifically removes things that have been marked to not be exported.
+# dump the environ to stdout.
+dump_environ() {
+ local x y;
+
+ #env dump, if it doesn't match a var pattern, stop processing, else print only if
+ #it doesn't match one of the filter lists.
+ # vars, then funcs.
+
+ local opts=""
+
+ [[ $PKGCORE_DEBUG -ge 3 ]] && opts="$opts --debug"
+
+ declare | PYTHONPATH="${PKGCORE_PYTHONPATH}" "${PKGCORE_PYTHON}" \
+ "${PKGCORE_BIN_PATH}/filter-env" $opts -f \
+ "$(filter_env_func_filter ${DONT_EXPORT_FUNCS} )" -v \
+ "$(filter_env_var_filter ${DONT_EXPORT_VARS} f x )"
+
+ if ! hasq "--no-attributes" "$@"; then
+ echo "# env attributes"
+ # leave this form so that it's easier to add others in.
+ for y in export ; do
+ x=$(${y} | sed -n "/declare \(-[^ ]\+ \)*/!d; s:^declare \(-[^ ]\+ \)*\([A-Za-z0-9_+]\+\)\(=.*$\)\?$:\2:; /^$(gen_regex_var_filter ${DONT_EXPORT_VARS} x y)$/! p;")
+ [ -n "$x" ] && echo "${y} $(echo $x);"
+ done
+
+ # if it's just declare -f some_func, filter it, else drop it if it's one of the filtered funcs
+ declare -F | sed -n "/^declare -[^ ]\( \|[^ ]? $(gen_regex_func_filter ${DONT_EXPORT_FUNCS})$\)\?/d; s/^/ /;s/;*$/;/p;"
+
+ shopt -p
+ fi
+}
+
+# dump environ to $1, optionally piping it through $2 and redirecting $2's output to $1.
+export_environ() {
+ local temp_umask
+ if [ "${1:-unset}" == "unset" ]; then
+ die "export_environ requires at least one arguement"
+ fi
+
+ #the spaces on both sides are important- otherwise, the later ${DONT_EXPORT_VARS/ temp_umask /} won't match.
+ #we use spaces on both sides, to ensure we don't remove part of a variable w/ the same name-
+ # ex: temp_umask_for_some_app == _for_some_app.
+ #Do it with spaces on both sides.
+
+ DONT_EXPORT_VARS="${DONT_EXPORT_VARS} temp_umask "
+ temp_umask=`umask`
+ umask 0002
+
+ if [ "${2:-unset}" == "unset" ]; then
+ dump_environ > "$1"
+ else
+ dump_environ | $2 > "$1"
+ fi
+ chown portage:portage "$1" &>/dev/null
+ chmod 0664 "$1" &>/dev/null
+
+ DONT_EXPORT_VARS="${DONT_EXPORT_VARS/ temp_umask /}"
+
+ umask $temp_umask
+}
+
+# reload a saved env, applying usual filters to the env prior to eval'ing it.
+load_environ() {
+ local src e ret EXISTING_PATH
+ # localize these so the reload doesn't have the ability to change them
+ local DONT_EXPORT_VARS="${DONT_EXPORT_VARS} src e ret"
+ local DONT_EXPORT_FUNCS="${DONT_EXPORT_FUNCS} load_file declare"
+ local SANDBOX_STATE=$SANDBOX_ON
+ local EBUILD_PHASE=$EBUILD_PHASE
+ local reload_failure=0
+ SANDBOX_ON=0
+
+ SANDBOX_READ="/bin:${SANDBOX_READ}:/dev/urandom:/dev/random:$PKGCORE_BIN_PATH"
+ SANDBOX_ON=$SANDBOX_STATE
+
+ [ ! -f "$1" ] && die "load_environ called with a nonexist env: $1"
+
+ if [ -z "$1" ]; then
+ die "load_environ called with no args, need args"
+ fi
+ src="$1"
+
+ EXISTING_PATH=$PATH
+ PKGCORE_ATTRS_EXPORTED=
+ PKGCORE_ATTRS_READONLY=
+ PKGCORE_SHOPTS_SET=
+ PKGCORE_SHOPTS_UNSET=
+ if [ -f "$src" ]; then
+ # other managers shove the export/declares inline; we store it in a
+ # func so that the var attrs can be dropped if needed.
+ # thus we define these temporarily, to intercept the inlined statements
+ # and push them into a func.
+ function declare() {
+ local r e vars
+ while [ "${1:0:1}" == "-" ]; do
+ if [ "${1/r}" != "$1" ]; then
+ r=1
+ fi
+ if [ "${1/x}" != "$1" ]; then
+ e=1
+ fi
+ shift
+ done
+ if [ -z "$r" ] && [ -z "$e" ]; then
+ return
+ fi
+ while [ -n "$1" ]; do
+ vars="${vars} ${1/=*}"
+ shift
+ done
+ if [ -n "$r" ]; then
+ PKGCORE_ATTRS_READONLY="${PKGCORE_ATTRS_READONLY} ${vars}"
+ fi
+ if [ -n "$e" ]; then
+ PKGCORE_ATTRS_EXPORTED="${PKGCORE_ATTRS_EXPORTED} ${vars}"
+ fi
+ };
+ function export() {
+ declare -x "$@"
+ };
+ function readonly() {
+ declare -r "$@"
+ };
+ function shopt() {
+ if [ "$1" == "-s" ]; then
+ shift
+ PKGCORE_SHOPTS_SET="${PKGCORE_SHOPTS_SET} $*"
+ elif [ "$1" == "-u" ]; then
+ shift
+ PKGCORE_SHOPTS_UNSET="${PKGCORE_SHOPTS_UNSET} $*"
+ else
+ echo "ignoring unexpected shopt arg in env dump- $*" >&2
+ fi
+ }
+ local opts=""
+ [[ $PKGCORE_DEBUG -ge 3 ]] && opts="$opts --debug"
+
+ # run the filtered env.
+ eval "$(PYTHONPATH=${PKGCORE_PYTHONPATH} \
+ "${PKGCORE_PYTHON}" "${PKGCORE_BIN_PATH}/filter-env" $opts \
+ -f "$(filter_env_func_filter ${DONT_EXPORT_FUNCS} )" \
+ -v "$(filter_env_var_filter ${DONT_EXPORT_VARS} f x EXISTING_PATH)" -i "$src")"
+ ret=$?
+
+ # if reinstate_loaded_env_attributes exists, run it to add to the vars.
+ type reinstate_loaded_env_attributes &> /dev/null && \
+ reinstate_loaded_env_attributes
+ unset -f declare readonly export reinstate_loaded_env_attributes shopt
+
+ # do not export/readonly an attr that is filtered- those vars are internal/protected,
+ # thus their state is guranteed
+ # additionally, if the var *was* nonexistant, export'ing it serves to create it
+
+ pkgcore_tmp_func() {
+ while [ -n "$1" ]; do
+ echo "$1"
+ shift
+ done
+ }
+
+ filter="^$(gen_regex_var_filter $DONT_EXPORT_VARS XARGS)$"
+ # yes we're intentionally ignoring PKGCORE_ATTRS_READONLY. readonly isn't currently used.
+ PKGCORE_ATTRS_EXPORTED=$(echo $(pkgcore_tmp_func $PKGCORE_ATTRS_EXPORTED | grep -v "$filter"))
+ unset pkgcore_tmp_func filter
+
+ # rebuild the func.
+ local body=
+ [ -n "$PKGCORE_ATTRS_EXPORTED" ] && body="export $PKGCORE_ATTRS_EXPORTED;"
+ [ -n "$PKGCORE_SHOPTS_SET" ] && body="${body} shopt -s ${PKGCORE_SHOPTS_SET};"
+ [ -n "$PKGCORE_SHOPTS_UNSET" ] && body="${body} shopt -u ${PKGCORE_SHOPTS_UNSET};"
+ unset PKGCORE_ATTRS_READONLY PKGCORE_ATTRS_EXPORTED PKGCORE_SHOPTS_UNSET PKGCORE_SHOPTS_SET
+
+ # and... finally make the func.
+ eval "reinstate_loaded_env_attributes() { ${body:-:;} };"
+ else
+ echo "ebuild=${EBUILD}, phase $EBUILD_PHASE" >&2
+ ret=1
+ fi
+ pkgcore_ensure_PATH "$EXISTING_PATH"
+ return $(( $ret ))
+}
+
+# ensure the passed in PATH has its components in $PATH
+pkgcore_ensure_PATH()
+{
+ local EXISTING_PATH="$1"
+ local adds
+ # note this isolates the adds in the same order they appear in
+ # the passed in path, maintaining that order.
+ if [ "$EXISTING_PATH" != "$PATH" ]; then
+ save_IFS
+ IFS=':'
+ for x in ${EXISTING_PATH}; do
+ # keep in mind PATH=":foon" is a valid way to say "cwd"
+ [ -z "${x}" ] && continue
+ if ! hasq ${x} ${PATH} && ! hasq ${x} ${adds}; then
+ adds="${adds:+${adds}:}${x}"
+ fi
+ done
+ restore_IFS
+ [ -n "$adds" ] && PATH="${PATH}${PATH:+:}${adds}"
+ export PATH
+ fi
+ export PATH
+}
+
+# walk the cascaded profile src'ing it's various bashrcs.
+# overriden by daemon normally.
+source_profiles() {
+ local dir
+ save_IFS
+ # XXX: Given the following unset, is this set needed?
+ IFS=$'\n'
+ for dir in ${PROFILE_PATHS}; do
+ # Must unset it so that it doesn't mess up assumptions in the RCs.
+ unset IFS
+ if [ -f "${dir}/profile.bashrc" ]; then
+ source "${dir}/profile.bashrc"
+ fi
+ done
+ restore_IFS
+ if [ -f "$PORTAGE_BASHRC" ]; then
+ source "$PORTAGE_BASHRC"
+ fi
+}
+
+# do all profile, bashrc's, and ebuild sourcing. Should only be called in setup phase, unless the
+# env is *completely* missing, as it is occasionally for ebuilds during prerm/postrm.
+init_environ() {
+ OCC="$CC"
+ OCXX="$CXX"
+ local EXISTING_PATH="$PATH"
+
+ if [ "${EBUILD_PHASE}" == "setup" ]; then
+ #we specifically save the env so it's not stomped on by sourcing.
+ #bug 51552
+ dump_environ --no-attributes > "${T}/.temp_env"
+
+ if [ "$USERLAND" == "GNU" ]; then
+ local PORTAGE_SHIFTED_PATH="$PATH"
+ source /etc/profile.env &>/dev/null
+ fi
+
+ #restore the saved env vars.
+ if ! load_environ "${T}/.temp_env"; then
+ #this shouldn't happen.
+ die "failed to load ${T}/.tmp_env- fs is readonly?"
+ fi
+
+ rm "${T}/.temp_env"
+ source_profiles
+ fi
+
+ if [ "${EBUILD_PHASE}" != "depend" ]; then
+ [ ! -z "$OCC" ] && export CC="$OCC"
+ [ ! -z "$OCXX" ] && export CXX="$OCXX"
+
+ fi
+
+ # if daemonized, it's already loaded these funcs.
+ if [ "$DAEMONIZED" != "yes" ]; then
+ source "${PKGCORE_BIN_PATH}/ebuild-functions.sh" || die "failed sourcing ebuild-functions.sh"
+ fi
+ SANDBOX_ON="1"
+ export S=${WORKDIR}/${P}
+
+ # Expand KEYWORDS
+ # We need to turn off pathname expansion for -* in KEYWORDS and
+ # we need to escape ~ to avoid tilde expansion (damn bash) :)
+ set -f
+ KEYWORDS="$(echo ${KEYWORDS//~/\\~})"
+ set +f
+
+ unset IUSE DEPEND RDEPEND CDEPEND PDEPEND
+ unset E_IUSE E_DEPEND E_RDEPEND E_CDEPEND E_PDEPEND
+
+ if [ ! -f "${EBUILD}" ]; then
+ echo "bailing, ebuild not found at '$EBUILD'"
+ die "EBUILD=${EBUILD}; problem is, it doesn't exist. bye." >&2
+ fi
+
+ # XXX: temp hack to make misc broken eclasses behave, java-utils-2 for example
+ # XXX: as soon as these eclasses behave, remove this.
+ export DESTTREE=/usr
+
+ source "${EBUILD}"
+ if [ "${EBUILD_PHASE}" != "depend" ]; then
+ RESTRICT="${FINALIZED_RESTRICT}"
+ unset FINALIZED_RESTRICT
+ fi
+
+ [ -z "${ERRORMSG}" ] || die "${ERRORMSG}"
+
+ hasq nostrip ${RESTRICT} && export DEBUGBUILD=1
+
+ #a reasonable default for $S
+ if [ "$S" = "" ]; then
+ export S=${WORKDIR}/${P}
+ fi
+
+ #some users have $TMP/$TMPDIR to a custom dir in their home ...
+ #this will cause sandbox errors with some ./configure
+ #scripts, so set it to $T.
+ export TMP="${T}"
+ export TMPDIR="${T}"
+
+ # Note: this next line is not the same as export RDEPEND=${RDEPEND:-${DEPEND}}
+ # That will test for unset *or* NULL (""). We want just to set for unset...
+
+ if [ "${RDEPEND-unset}" == "unset" ]; then
+ export RDEPEND="${DEPEND}"
+ fi
+
+ #add in dependency info from eclasses
+ IUSE="$IUSE $E_IUSE"
+ DEPEND="${DEPEND} ${E_DEPEND}"
+ RDEPEND="$RDEPEND $E_RDEPEND"
+ CDEPEND="$CDEPEND $E_CDEPEND"
+ PDEPEND="$PDEPEND $E_PDEPEND"
+
+ unset E_IUSE E_DEPEND E_RDEPEND E_CDEPEND E_PDEPEND
+ pkgcore_ensure_PATH "$EXISTING_PATH"
+}
+
+# short version. think these should be sourced via at the daemons choice, rather then defacto.
+source "${PKGCORE_BIN_PATH}/ebuild-default-functions.sh" || die "failed sourcing ebuild-default-functions.sh"
+source "${PKGCORE_BIN_PATH}/isolated-functions.sh" || die "failed sourcing stripped down functions.sh"
+
+# general func to call for phase execution. this handles necessary env loading/dumping, and executing pre/post/dyn
+# calls.
+execute_phases() {
+ local ret
+ for myarg in $*; do
+ EBUILD_PHASE="$myarg"
+ MUST_EXPORT_ENV="no"
+ case $EBUILD_PHASE in
+ nofetch)
+ init_environ
+ pkg_nofetch
+ ;;
+ prerm|postrm|preinst|postinst|config)
+ export SANDBOX_ON="0"
+
+ if ! load_environ "${T}/environment"; then
+ #hokay. this sucks.
+ ewarn
+ ewarn "failed to load env"
+ ewarn "this installed pkg may not behave correctly"
+ ewarn
+ sleepbeep 10
+ fi
+
+ [[ $PKGCORE_DEBUG -ge 3 ]] && set -x
+ if type reinstate_loaded_env_attributes &> /dev/null; then
+ reinstate_loaded_env_attributes
+ unset -f reinstate_loaded_env_attributes
+ fi
+ [[ -n $PKGCORE_DEBUG ]] && set -x
+ type -p pre_pkg_${EBUILD_PHASE} &> /dev/null && pre_pkg_${EBUILD_PHASE}
+ if type -p dyn_${EBUILD_PHASE}; then
+ dyn_${EBUILD_PHASE}
+ else
+ pkg_${EBUILD_PHASE}
+ fi
+ ret=0
+
+ type -p post_pkg_${EBUILD_PHASE} &> /dev/null && post_pkg_${EBUILD_PHASE}
+ [[ $PKGCORE_DEBUG -lt 2 ]] && set +x
+ ;;
+ unpack|compile|test|install)
+ if [ "${SANDBOX_DISABLED="0"}" == "0" ]; then
+ export SANDBOX_ON="1"
+ else
+ export SANDBOX_ON="0"
+ fi
+
+ [[ $PKGCORE_DEBUG -ge 3 ]] && set -x
+ if ! load_environ ${T}/environment; then
+ ewarn
+ ewarn "failed to load env. This is bad, bailing."
+ die "unable to load saved env for phase $EBUILD_PHASE, unwilling to continue"
+ fi
+ if type reinstate_loaded_env_attributes &> /dev/null; then
+ reinstate_loaded_env_attributes
+ unset -f reinstate_loaded_env_attributes
+ fi
+ [[ -n $PKGCORE_DEBUG ]] && set -x
+ type -p pre_src_${EBUILD_PHASE} &> /dev/null && pre_src_${EBUILD_PHASE}
+ dyn_${EBUILD_PHASE}
+ ret=0
+ type -p post_src_${EBUILD_PHASE} &> /dev/null && post_src_${EBUILD_PHASE}
+ [[ $PKGCORE_DEBUG -lt 2 ]] && set +x
+ export SANDBOX_ON="0"
+ ;;
+ setup|setup-binpkg)
+ #pkg_setup needs to be out of the sandbox for tmp file creation;
+ #for example, awking and piping a file in /tmp requires a temp file to be created
+ #in /etc. If pkg_setup is in the sandbox, both our lilo and apache ebuilds break.
+
+ export SANDBOX_ON="0"
+
+ # binpkgs don't need to reinitialize the env.
+ if [ "$myarg" == "setup" ]; then
+ [ ! -z "${DISTCC_LOG}" ] && addwrite "$(dirname ${DISTCC_LOG})"
+
+ local x
+ # if they aren't set, then holy hell ensues. deal.
+
+ [ -z "${CCACHE_SIZE}" ] && export CCACHE_SIZE="500M"
+ ccache -M ${CCACHE_SIZE} &> /dev/null
+ [[ $PKGCORE_DEBUG == 2 ]] && set -x
+ init_environ
+ MUST_EXPORT_ENV="yes"
+ elif ! load_environ ${T}/environment; then
+ die "failed loading saved env; at ${T}/environment"
+ fi
+
+ [[ -n $PKGCORE_DEBUG ]] && set -x
+ type -p pre_pkg_setup &> /dev/null && \
+ pre_pkg_setup
+ dyn_setup
+ ret=0;
+ type -p post_pkg_setup &> /dev/null && \
+ post_pkg_setup
+ [[ $PKGCORE_DEBUG -lt 2 ]] && set +x
+
+ ;;
+ depend)
+ SANDBOX_ON="1"
+ MUST_EXPORT_ENV="no"
+
+ if [ -z "$QA_CONTROLLED_EXTERNALLY" ]; then
+ enable_qa_interceptors
+ fi
+
+ init_environ
+
+ if [ -z "$QA_CONTROLLED_EXTERNALLY" ]; then
+ disable_qa_interceptors
+ fi
+
+ speak "$(pkgcore_dump_metadata_keys)"
+ ;;
+ *)
+ export SANDBOX_ON="1"
+ echo "Please specify a valid command: $EBUILD_PHASE isn't valid."
+ echo
+ dyn_help
+ exit 1
+ ;;
+ esac
+
+ if [ "${MUST_EXPORT_ENV}" == "yes" ]; then
+ export_environ "${T}/environment"
+ MUST_EXPORT_ENV="no"
+ fi
+ [[ $PKGCORE_DEBUG -lt 4 ]] && set +x
+ done
+ return ${ret:-0}
+}
+
+pkgcore_dump_metadata_keys() {
+ set -f
+ [ "${DEPEND:-unset}" != "unset" ] && echo "key DEPEND=$(echo $DEPEND)"
+ [ "${RDEPEND:-unset}" != "unset" ] && echo "key RDEPEND=$(echo $RDEPEND)"
+ [ "$SLOT:-unset}" != "unset" ] && echo "key SLOT=$(echo $SLOT)"
+ [ "$SRC_URI:-unset}" != "unset" ] && echo "key SRC_URI=$(echo $SRC_URI)"
+ [ "$RESTRICT:-unset}" != "unset" ] && echo "key RESTRICT=$(echo $RESTRICT)"
+ [ "$HOMEPAGE:-unset}" != "unset" ] && echo "key HOMEPAGE=$(echo $HOMEPAGE)"
+ [ "$LICENSE:-unset}" != "unset" ] && echo "key LICENSE=$(echo $LICENSE)"
+ [ "$DESCRIPTION:-unset}" != "unset" ] && echo "key DESCRIPTION=$(echo $DESCRIPTION)"
+ [ "$KEYWORDS:-unset}" != "unset" ] && echo "key KEYWORDS=$(echo $KEYWORDS)"
+ [ "$INHERITED:-unset}" != "unset" ] && echo "key INHERITED=$(echo $INHERITED)"
+ [ "$IUSE:-unset}" != "unset" ] && echo "key IUSE=$(echo $IUSE)"
+ [ "$CDEPEND:-unset}" != "unset" ] && echo "key CDEPEND=$(echo $CDEPEND)"
+ [ "$PDEPEND:-unset}" != "unset" ] && echo "key PDEPEND=$(echo $PDEPEND)"
+ [ "$PROVIDE:-unset}" != "unset" ] && echo "key PROVIDE=$(echo $PROVIDE)"
+ [ "$EAPI:-unset}" != "unset" ] && echo "key EAPI=$(echo $EAPI)"
+ set +f
+}
+
+#echo, everything has been sourced. now level the read-only's.
+if [ "$*" != "daemonize" ]; then
+ for x in ${DONT_EXPORT_FUNCS}; do
+ declare -fr "$x"
+ done
+ unset x
+fi
+
+f="$(declare | {
+ read l;
+ while [ "${l% \(\)}" == "$l" ]; do
+ echo "${l/=*}";
+ read l;
+ done;
+ unset l
+ })"
+
+#update the don't export filters.
+if [ -z "${ORIG_VARS}" ]; then
+ DONT_EXPORT_VARS="${DONT_EXPORT_VARS} ${f}"
+else
+ DONT_EXPORT_VARS="${DONT_EXPORT_VARS} $(echo "${f}" | grep -v "^$(gen_regex_var_filter ${ORIG_VARS})\$")"
+fi
+unset f
+
+[ -z "${ORIG_FUNCS}" ] && DONT_EXPORT_FUNCS="${DONT_EXPORT_FUNCS} $(declare -F | cut -s -d ' ' -f 3)"
+set +f
+
+export XARGS
+if [ "$(id -nu)" == "portage" ] ; then
+ export USER=portage
+fi
+set +H -h
+# if we're being src'd for our functions, do nothing. if called directly, define a few necessary funcs.
+if [ "$*" != "daemonize" ]; then
+
+ if [ "${*/depend}" != "$*" ]; then
+ speak() {
+ echo "$*" >&4
+ }
+ declare -rf speak
+ fi
+ if [ -z "${NOCOLOR}" ]; then
+ set_colors
+ else
+ unset_colors
+ fi
+ unset x
+ execute_phases $*
+ exit 0
+else
+ DAEMONIZED="yes"
+ export DAEMONIZED
+ readonly DAEMONIZED
+fi
+:
diff --git a/pkgcore/bin/ebuild-env/filter-env b/pkgcore/bin/ebuild-env/filter-env
new file mode 100755
index 0000000..455e337
--- /dev/null
+++ b/pkgcore/bin/ebuild-env/filter-env
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+"""Commandline wrapper."""
+
+from pkgcore.scripts import filter_env
+from pkgcore.util import commandline
+
+if __name__ == '__main__':
+ commandline.main({None: (filter_env.OptionParser, filter_env.main)})
diff --git a/pkgcore/bin/ebuild-env/isolated-functions.sh b/pkgcore/bin/ebuild-env/isolated-functions.sh
new file mode 100644
index 0000000..ee5d690
--- /dev/null
+++ b/pkgcore/bin/ebuild-env/isolated-functions.sh
@@ -0,0 +1,187 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header$
+
+# Internal logging function, don't use this in ebuilds
+elog_base() {
+ local messagetype
+ [ -z "${1}" -o -z "${T}" -o ! -d "${T}/logging" ] && return 1
+ case "${1}" in
+ INFO|WARN|ERROR|LOG)
+ messagetype="${1}"
+ shift
+ ;;
+ *)
+ echo -e " ${PKGCORE_RC_BAD}*${PKGCORE_RC_NORMAL} Invalid use of internal function elog_base(), next message will not be logged"
+ return 1
+ ;;
+ esac
+ echo "$*" >> ${T}/logging/${EBUILD_PHASE}.${messagetype}
+ return 0
+}
+
+elog() {
+ elog_base LOG "$*"
+ echo -e " ${PKGCORE_RC_GOOD}*${PKGCORE_RC_NORMAL} $*"
+ return 0
+}
+
+esyslog() {
+ local pri=
+ local tag=
+
+ if [ -x /usr/bin/logger ]
+ then
+ pri="$1"
+ tag="$2"
+
+ shift 2
+ [ -z "$*" ] && return 0
+
+ /usr/bin/logger -p "${pri}" -t "${tag}" -- "$*"
+ fi
+
+ return 0
+}
+
+einfo() {
+ einfon "$*\n"
+ PKGCORE_RC_LAST_CMD="einfo"
+ return 0
+}
+
+einfon() {
+ elog_base INFO "$*"
+ echo -ne " ${PKGCORE_RC_GOOD}*${PKGCORE_RC_NORMAL} $*"
+ PKGCORE_RC_LAST_CMD="einfon"
+ return 0
+}
+
+ewarn() {
+ elog_base WARN "$*"
+ echo -e " ${PKGCORE_RC_WARN}*${PKGCORE_RC_NORMAL} $*"
+ PKGCORE_RC_LAST_CMD="ewarn"
+ return 0
+}
+
+eerror() {
+ elog_base ERROR "$*"
+ echo -e " ${PKGCORE_RC_BAD}*${PKGCORE_RC_NORMAL} $*"
+ PKGCORE_RC_LAST_CMD="eerror"
+ return 0
+}
+
+ebegin() {
+ local msg="$* ..."
+ einfon "${msg}"
+ echo
+ PKGCORE_RC_LAST_CMD="ebegin"
+ return 0
+}
+
+_eend() {
+ local retval=${1:-0} efunc=${2:-eerror} msg
+ shift 2
+
+ if [[ ${retval} == "0" ]] ; then
+ msg="${PKGCORE_RC_BRACKET}[ ${PKGCORE_RC_GOOD}ok${PKGCORE_RC_BRACKET} ]${PKGCORE_RC_NORMAL}"
+ else
+ if [[ -n $* ]] ; then
+ ${efunc} "$*"
+ fi
+ msg="${PKGCORE_RC_BRACKET}[ ${PKGCORE_RC_BAD}!!${PKGCORE_RC_BRACKET} ]${PKGCORE_RC_NORMAL}"
+ fi
+
+ echo -e "${PKGCORE_RC_ENDCOL} ${msg}"
+
+ return ${retval}
+}
+
+eend() {
+ local retval=${1:-0}
+ shift
+
+ _eend ${retval} eerror "$*"
+
+ return ${retval}
+}
+
+KV_major() {
+ [[ -z $1 ]] && return 1
+
+ local KV=$@
+ echo "${KV%%.*}"
+}
+
+KV_minor() {
+ [[ -z $1 ]] && return 1
+
+ local KV=$@
+ KV=${KV#*.}
+ echo "${KV%%.*}"
+}
+
+KV_micro() {
+ [[ -z $1 ]] && return 1
+
+ local KV=$@
+ KV=${KV#*.*.}
+ echo "${KV%%[^[:digit:]]*}"
+}
+
+KV_to_int() {
+ [[ -z $1 ]] && return 1
+
+ local KV_MAJOR=$(KV_major "$1")
+ local KV_MINOR=$(KV_minor "$1")
+ local KV_MICRO=$(KV_micro "$1")
+ local KV_int=$(( KV_MAJOR * 65536 + KV_MINOR * 256 + KV_MICRO ))
+
+ # We make version 2.2.0 the minimum version we will handle as
+ # a sanity check ... if its less, we fail ...
+ if [[ ${KV_int} -ge 131584 ]] ; then
+ echo "${KV_int}"
+ return 0
+ fi
+
+ return 1
+}
+
+get_KV() {
+ echo $(KV_to_int "$(uname -r)")
+}
+
+unset_colors() {
+ PKGCORE_RC_COLS="25 80"
+ PKGCORE_RC_ENDCOL=
+ PKGCORE_RC_GOOD=
+ PKGCORE_RC_WARN=
+ PKGCORE_RC_BAD=
+ PKGCORE_RC_NORMAL=
+ PKGCORE_RC_HILITE=
+ PKGCORE_RC_BRACKET=
+}
+
+set_colors() {
+ # try setting the column width to bash's internal COLUMNS variable,
+ # then try to get it via stty. no go? hardcode it to 80.
+ PKGCORE_RC_COLS=${COLUMNS:-0}
+ (( PKGCORE_RC_COLS == 0 )) && PKGCORE_RC_COLS=$(set -- `stty size 2>/dev/null` ; echo $2)
+ (( PKGCORE_RC_COLS > 0 )) || (( PKGCORE_RC_COLS = 80 ))
+ PKGCORE_RC_COLS=$((${PKGCORE_RC_COLS} - 8)) # width of [ ok ] == 7
+
+ PKGCORE_RC_ENDCOL=$'\e[A\e['${PKGCORE_RC_COLS}'C'
+ # Now, ${PKGCORE_RC_ENDCOL} will move us to the end of the
+ # column; irregardless of character width
+
+ PKGCORE_RC_GOOD=$'\e[32;01m'
+ PKGCORE_RC_WARN=$'\e[33;01m'
+ PKGCORE_RC_BAD=$'\e[31;01m'
+ PKGCORE_RC_HILITE=$'\e[36;01m'
+ PKGCORE_RC_BRACKET=$'\e[34;01m'
+ PKGCORE_RC_NORMAL=$'\e[0m'
+}
+
+unset_colors
+DONT_EXPORT_VARS="${DONT_EXPORT_VARS} PKGCORE_RC_.*"
+true
diff --git a/pkgcore/bin/ebuild-env/portageq_emulation b/pkgcore/bin/ebuild-env/portageq_emulation
new file mode 100755
index 0000000..8d644bc
--- /dev/null
+++ b/pkgcore/bin/ebuild-env/portageq_emulation
@@ -0,0 +1,178 @@
+#!/usr/bin/python -O
+# Copyright 2006 Brian Harring <ferringb@gmail.com>
+
+# disable sandbox for any pyc regens
+import os
+env = os.environ["SANDBOX_ON"] = "0"
+
+from snakeoil.demandload import demandload
+demandload(globals(),
+ "snakeoil.iterables:caching_iter",
+ "pkgcore.config:load_config",
+ "pkgcore.ebuild.atom:atom",
+ "pkgcore.util.packages:get_raw_pkg",
+ "sys",
+ "os",
+)
+
+def str_pkg(pkg):
+ pkg = get_raw_pkg(pkg)
+ # special casing; old style virtuals come through as the original pkg.
+ if pkg.package_is_real:
+ return pkg.cpvstr
+ if hasattr(pkg, "actual_pkg"):
+ return pkg.actual_pkg.cpvstr
+ # icky, but works.
+ return str(pkg.rdepends).lstrip("=")
+
+def expose_to_commandline(count, **kwds):
+ def internal_f(f):
+ f.args = count
+ f.swallow_root = kwds.pop("swallow_root", False)
+ f.command_handler = True
+ return f
+ return internal_f
+
+def set_arg_count(count):
+ def internal_f(f):
+ f.args = count
+ return f
+ return internal_f
+
+default_get = lambda d,k: d.settings.get(k, "")
+distdir_get = lambda d,k: d.settings["fetcher"].distdir
+envvar_getter = {"DISTDIR":distdir_get}
+
+@expose_to_commandline(-1)
+def envvar(domain, *keys):
+ """
+ return configuration defined variables
+ """
+ return ["".join("%s\n" % envvar_getter.get(x, default_get)(domain, x)
+ for x in keys), 0]
+
+def make_atom(a):
+ a = atom(a)
+ # force expansion.
+ a.restrictions
+ return a
+
+@expose_to_commandline(1, swallow_root=True)
+def has_version(domain, arg):
+ """
+ @param domain: L{pkgcore.config.domain.domain} instance
+ @param atom_str: L{pkgcore.ebuild.atom.atom} instance
+ """
+ arg = make_atom(arg)
+ if caching_iter(domain.all_vdbs.itermatch(arg)):
+ return ['', 0]
+ return ['', 1]
+
+@expose_to_commandline(-1, swallow_root=True)
+def mass_best_version(domain, *args):
+ """
+ multiple best_version calls
+ """
+ return ["".join("%s:%s\n" % (x, best_version(domain, x)[0].rstrip())
+ for x in args), 0]
+
+@expose_to_commandline(1, swallow_root=True)
+def best_version(domain, arg):
+ """
+ @param domain: L{pkgcore.config.domain.domain} instance
+ @param atom_str: L{pkgcore.ebuild.atom.atom} instance
+ """
+ # temp hack, configured pkgs yield "configured(blah) pkg"
+ arg = make_atom(arg)
+ try:
+ p = max(domain.all_vdbs.itermatch(arg))
+ except ValueError:
+ # empty sequence.
+ return ['', 1]
+ return [str_pkg(get_raw_pkg(p)) + "\n", 0]
+
+
+@expose_to_commandline(1, swallow_root=True)
+def match(domain, arg):
+ """
+ @param domain: L{pkgcore.config.domain.domain} instance
+ @param atom_str: L{pkgcore.ebuild.atom.atom} instance
+ """
+ arg = make_atom(arg)
+ # temp hack, configured pkgs yield "configured(blah) pkg"
+ l = sorted(get_raw_pkg(x) for x in domain.all_repos.itermatch(arg))
+ if not l:
+ return ['', 1]
+ return ["".join(str_pkg(x) +"\n" for x in l), 0]
+
+
+def usage():
+ print "\nusage: command domain atom"
+ print "domain is the string name of the domain to query from; if exempted, will use the default domain"
+ print "\n=available commands=\n"
+ for k, v in globals().iteritems():
+ if not getattr(v, "command_handler", False):
+ continue
+ print k
+ print "\n".join(" "+x for x in [s.strip() for s in v.__doc__.split("\n")] if x)
+ print
+
+def main():
+ a = sys.argv[1:]
+ if "--usage" in a or "--help" in a:
+ usage()
+ sys.exit(0)
+ if not a:
+ usage()
+ sys.exit(1)
+
+ if "--domain" in a:
+ i = a.index("--domain")
+ domain = a[i+1]
+ del a[i]
+ del a[i]
+ else:
+ domain = None
+ try:
+ command = globals()[a[0]]
+ if not getattr(command, "command_handler", False):
+ raise KeyError
+ except KeyError:
+ print "%s isn't a valid command" % a[0]
+ usage()
+ sys.exit(2)
+
+ if command.swallow_root:
+ try:
+ a.pop(0)
+ except IndexError:
+ print "arg count is wrong"
+ usage()
+ sys.exit(2)
+
+ bad = False
+ if command.args == -1:
+ bad = not a
+ else:
+ bad = len(a) - 1 != command.args
+ if bad:
+ print "arg count is wrong"
+ usage()
+ sys.exit(2)
+
+ if domain is None:
+ domain = load_config().get_default("domain")
+ else:
+ domain = load_config().domain.get(domain)
+
+ if domain is None:
+ print "no default domain in your configuration, or what was specified manually wasn't found."
+ print "known domains- %r" % list(load_config().domain.iterkeys())
+ sys.exit(2)
+
+ s, ret = command(domain, *a[1:])
+ sys.stdout.write(s)
+ sys.exit(ret)
+
+if __name__ == "__main__":
+ main()
diff --git a/pkgcore/bin/ebuild-helpers/dobin b/pkgcore/bin/ebuild-helpers/dobin
new file mode 100755
index 0000000..a3269ed
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/dobin
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/dobin,v 1.13 2004/10/04 13:56:50 vapier Exp $
+
+if [[ $# -lt 1 ]] ; then
+ echo "$0: at least one argument needed" 1>&2
+ exit 1
+fi
+
+if [[ ! -d ${D}${DESTTREE}/bin ]] ; then
+ install -d "${D}${DESTTREE}/bin" || exit 2
+fi
+
+ret=0
+
+for x in "$@" ; do
+ if [[ -e ${x} ]] ; then
+ install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${D}${DESTTREE}/bin"
+ else
+ echo "!!! ${0##*/}: ${x} does not exist" 1>&2
+ false
+ fi
+ ((ret+=$?))
+done
+
+exit ${ret}
diff --git a/pkgcore/bin/ebuild-helpers/doconfd b/pkgcore/bin/ebuild-helpers/doconfd
new file mode 100755
index 0000000..e9ea1fd
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/doconfd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/doconfd,v 1.2.2.1 2005/01/13 04:51:56 vapier Exp $
+
+if [[ $# -lt 1 ]] ; then
+ echo "$0: at least one argument needed" 1>&2
+ exit 1
+fi
+
+exec \
+env \
+INSDESTTREE="/etc/conf.d/" \
+doins "$@"
diff --git a/pkgcore/bin/ebuild-helpers/dodir b/pkgcore/bin/ebuild-helpers/dodir
new file mode 100755
index 0000000..bc4f7f5
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/dodir
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/dodir,v 1.5 2004/10/04 13:56:50 vapier Exp $
+
+slash=/
+exec install -d ${DIROPTIONS} "${@/#${slash}/${D}${slash}}"
diff --git a/pkgcore/bin/ebuild-helpers/dodoc b/pkgcore/bin/ebuild-helpers/dodoc
new file mode 100755
index 0000000..60b6a27
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/dodoc
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [ $# -lt 1 ] ; then
+ echo "$0: at least one argument needed" 1>&2
+ exit 1
+fi
+
+dir="${D}usr/share/doc/${PF}/${DOCDESTTREE}"
+if [ ! -d "${dir}" ] ; then
+ install -d "${dir}"
+fi
+
+ret=0
+for x in "$@" ; do
+ if [ -s "${x}" ] ; then
+ install -m0644 "${x}" "${dir}"
+ gzip -f -9 "${dir}/${x##*/}"
+ elif [ ! -e "${x}" ] ; then
+ echo "dodoc: ${x} does not exist" 1>&2
+ ((++ret))
+ fi
+done
+
+exit ${ret}
diff --git a/pkgcore/bin/ebuild-helpers/doenvd b/pkgcore/bin/ebuild-helpers/doenvd
new file mode 100755
index 0000000..5232ed9
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/doenvd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/doenvd,v 1.2.2.1 2005/01/13 04:51:56 vapier Exp $
+
+if [[ $# -lt 1 ]] ; then
+ echo "$0: at least one argument needed" 1>&2
+ exit 1
+fi
+
+exec \
+env \
+INSDESTTREE="/etc/env.d/" \
+doins "$@"
diff --git a/pkgcore/bin/ebuild-helpers/doexe b/pkgcore/bin/ebuild-helpers/doexe
new file mode 100755
index 0000000..3badead
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/doexe
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/doexe,v 1.10.2.1 2004/12/06 03:01:43 carpaski Exp $
+
+
+if [ -z "${PKGCORE_BIN_PATH}" ]; then
+ echo "PKGCORE_BIN_PATH is unset!"
+ exit -1
+fi
+source "${PKGCORE_BIN_PATH}/isolated-functions.sh"
+
+if [[ $# -lt 1 ]] ; then
+ echo "$0: at least one argument needed" 1>&2
+ exit 1
+fi
+
+if [[ ! -d ${D}${EXEDESTTREE} ]] ; then
+ install -d "${D}${EXEDESTTREE}"
+fi
+
+for x in "$@" ; do
+ if [ -L "${x}" ] ; then
+ cp "${x}" "${T}"
+ mysrc="${T}"/$(/usr/bin/basename "${x}")
+ elif [ -d "${x}" ] ; then
+ vecho "doexe: warning, skipping directory ${x}"
+ continue
+ else
+ mysrc="${x}"
+ fi
+ install ${EXEOPTIONS} "${mysrc}" "${D}${EXEDESTTREE}"
+done
diff --git a/pkgcore/bin/ebuild-helpers/dohard b/pkgcore/bin/ebuild-helpers/dohard
new file mode 100755
index 0000000..2270487
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/dohard
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -ne 2 ]] ; then
+ echo "$0: two arguments needed" 1>&2
+ exit 1
+fi
+
+destdir=${2%/*}
+[[ ! -d ${D}${destdir} ]] && dodir "${destdir}"
+
+exec ln -f "${D}$1" "${D}$2"
diff --git a/pkgcore/bin/ebuild-helpers/dohtml b/pkgcore/bin/ebuild-helpers/dohtml
new file mode 100755
index 0000000..e5614ab
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/dohtml
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/dohtml,v 1.14.2.1 2004/10/27 14:39:29 jstubbs Exp $
+
+#
+# Typical usage:
+# dohtml -r docs/*
+# - put all files and directories in docs into /usr/share/doc/${PF}/html
+# dohtml foo.html
+# - put foo.html into /usr/share/doc/${PF}/html
+#
+#
+# Detailed usage:
+# dohtml <list-of-files>
+# - will install the files in the list of files (space-separated list) into
+# /usr/share/doc/${PF}/html, provided the file ends in .html, .png, .jpg
+# or .css
+# dohtml -r <list-of-files-and-directories>
+# - will do as 'dohtml', but recurse into all directories, as long as the
+# directory name is not CVS
+# dohtml -A jpe,java [-r] <list-of-files[-and-directories]>
+# - will do as 'dohtml' but add .jpe,.java (default filter list is
+# added to your list)
+# dohtml -a png,gif,html,htm [-r] <list-of-files[-and-directories]>
+# - will do as 'dohtml' but filter on .png,.gif,.html,.htm (default filter
+# list is ignored)
+# dohtml -x CVS,SCCS,RCS -r <list-of-files-and-directories>
+# - will do as 'dohtml -r', but ignore directories named CVS, SCCS, RCS
+#
+
+import os
+import string
+import sys
+import types
+
+def dodir(path):
+ os.system("install -d '%s'" % path)
+
+def dofile(src,dst):
+
+ os.system("install -m0644 '%s' '%s'" % (src, dst))
+
+def install(basename, dirname, options, prefix=""):
+
+ fullpath = basename
+ if prefix: fullpath = prefix + "/" + fullpath
+ if dirname: fullpath = dirname + "/" + fullpath
+
+ if options.DOCDESTTREE:
+ destdir = options.D + "usr/share/doc/" + options.PF + "/" + options.DOCDESTTREE + "/" + options.doc_prefix + "/" + prefix
+ else:
+ destdir = options.D + "usr/share/doc/" + options.PF + "/html/" + options.doc_prefix + "/" + prefix
+
+ if os.path.isfile(fullpath):
+ ext = os.path.splitext(basename)[1]
+ if (len(ext) and ext[1:] in options.allowed_exts) or basename in options.allowed_files:
+ dodir(destdir)
+ dofile(fullpath, destdir + "/" + basename)
+ elif options.recurse and os.path.isdir(fullpath) and \
+ basename not in options.disallowed_dirs:
+ for i in os.listdir(fullpath):
+ pfx = basename
+ if prefix: pfx = prefix + "/" + pfx
+ install(i, dirname, options, pfx)
+ else:
+ return False
+ return True
+
+
+class OptionsClass:
+ def __init__(self):
+ self.PF = ""
+ self.D = ""
+ self.DOCDESTTREE = ""
+
+ if os.environ.has_key("PF"):
+ self.PF = os.environ["PF"]
+ if os.environ.has_key("D"):
+ self.D = os.environ["D"]
+ if os.environ.has_key("DOCDESTTREE"):
+ self.DOCDESTTREE = os.environ["DOCDESTTREE"]
+
+ self.allowed_exts = [ 'png', 'gif', 'html', 'htm', 'jpg', 'css', 'js' ]
+ self.allowed_files = []
+ self.disallowed_dirs = [ 'CVS' ]
+ self.recurse = False
+ self.verbose = False
+ self.doc_prefix = ""
+
+def print_help():
+ opts = OptionsClass()
+
+ print "dohtml [-a .foo,.bar] [-A .foo,.bar] [-f foo,bar] [-x foo,bar]"
+ print " [-r] [-V] <file> [file ...]"
+ print
+ print " -a Set the list of allowed to those that are specified."
+ print " Default:", string.join(opts.allowed_exts, ",")
+ print " -A Extend the list of allowed file types."
+ print " -f Set list of allowed extensionless file names."
+ print " -x Set directories to be excluded from recursion."
+ print " Default:", string.join(opts.disallowed_dirs, ",")
+ print " -r Install files and directories recursively."
+ print " -V Be verbose."
+ print
+
+def parse_args():
+ options = OptionsClass()
+ args = []
+
+ x = 1
+ while x < len(sys.argv):
+ arg = sys.argv[x]
+ if arg in ["-h","-r","-V"]:
+ if arg == "-h":
+ print_help()
+ sys.exit(0)
+ elif arg == "-r":
+ options.recurse = True
+ elif arg == "-V":
+ options.verbose = True
+ elif sys.argv[x] in ["-A","-a","-f","-x","-p"]:
+ x += 1
+ if x == len(sys.argv):
+ print_help()
+ sys.exit(0)
+ elif arg == "-p":
+ options.doc_prefix = sys.argv[x]
+ else:
+ values = string.split(sys.argv[x], ",")
+ if arg == "-A":
+ options.allowed_exts.extend(values)
+ elif arg == "-a":
+ options.allowed_exts = values
+ elif arg == "-f":
+ options.allowed_files = values
+ elif arg == "-x":
+ options.disallowed_dirs = values
+ else:
+ args.append(sys.argv[x])
+ x += 1
+
+ return (options, args)
+
+def main():
+
+ (options, args) = parse_args()
+
+ if type(options.allowed_exts) == types.StringType:
+ options.allowed_exts = options.allowed_exts.split(",")
+
+ if options.verbose:
+ print "Allowed extensions:", options.allowed_exts
+ print "Document prefix : '" + options.doc_prefix + "'"
+ print "Allowed files :", options.allowed_files
+
+ success = True
+
+ for x in args:
+ basename = os.path.basename(x)
+ dirname = os.path.dirname(x)
+ success = success and install(basename, dirname, options)
+
+ if success:
+ retcode = 0
+ else:
+ retcode = 1
+
+ sys.exit(retcode)
+
+if __name__ == "__main__":
+ main()
diff --git a/pkgcore/bin/ebuild-helpers/doinfo b/pkgcore/bin/ebuild-helpers/doinfo
new file mode 100755
index 0000000..67b163b
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/doinfo
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/doinfo,v 1.7 2004/10/04 13:56:50 vapier Exp $
+
+if [ ${#} -lt 1 ] ; then
+ echo "doinfo: at least one argument needed"
+ exit 1
+fi
+if [ ! -d "${D}usr/share/info" ] ; then
+ install -d "${D}usr/share/info"
+fi
+
+for x in "$@" ; do
+ if [ -e "${x}" ] ; then
+ install -m0644 "${x}" "${D}usr/share/info"
+ gzip -f -9 "${D}usr/share/info/${x##*/}"
+ else
+ echo "doinfo: ${x} does not exist"
+ fi
+done
diff --git a/pkgcore/bin/ebuild-helpers/doinitd b/pkgcore/bin/ebuild-helpers/doinitd
new file mode 100755
index 0000000..8aae1b3
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/doinitd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/doinitd,v 1.2.2.1 2005/01/13 04:51:56 vapier Exp $
+
+if [[ $# -lt 1 ]] ; then
+ echo "$0: at least one argument needed" 1>&2
+ exit 1
+fi
+
+exec \
+env \
+EXEDESTTREE="/etc/init.d/" \
+doexe "$@"
diff --git a/pkgcore/bin/ebuild-helpers/doins b/pkgcore/bin/ebuild-helpers/doins
new file mode 100755
index 0000000..2a3fb5b
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/doins
@@ -0,0 +1,59 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/doins,v 1.7.2.2 2004/12/17 22:25:13 carpaski Exp $
+
+if [ -z "${PKGCORE_BIN_PATH}" ]; then
+ echo "PKGCORE_BIN_PATH is unset!"
+ exit -1
+fi
+source "${PKGCORE_BIN_PATH}/isolated-functions.sh"
+
+if [ $# -lt 1 ] ; then
+ echo "${0}: at least one argument needed"
+ exit 1
+fi
+
+if [ "${1}" == "-r" ] ; then
+ DOINSRECUR=y
+ shift
+else
+ DOINSRECUR=n
+fi
+[ -z "${INSDEPTH}" ] && declare -i INSDEPTH=0
+if [ ${INSDEPTH} -gt 30 ] ; then
+ echo "${0}: sanity check ... 30 directories is too much :("
+ exit 1
+fi
+
+if [ "${INSDESTTREE%${D}*}" == "" ]; then
+ vecho "-------------------------------------------------------" 1>&2
+ vecho "You should not use \${D} with helpers." 1>&2
+ vecho " --> ${INSDESTTREE}" 1>&2
+ vecho "-------------------------------------------------------" 1>&2
+ #exit 1
+fi
+
+[ ! -d "${D}${INSDESTTREE}" ] && dodir "${INSDESTTREE}"
+
+for x in "$@" ; do
+ if [ -L "$x" ] ; then
+ cp "$x" "${T}"
+ mysrc="${T}/$(/usr/bin/basename "${x}")"
+ elif [ -d "$x" ] ; then
+ if [ "${DOINSRECUR}" == "n" ] ; then
+ continue
+ fi
+
+ mydir="${INSDESTTREE}/$(basename "${x}")"
+ find "${x}" -mindepth 1 -maxdepth 1 -exec \
+ env \
+ INSDESTTREE="${mydir}" \
+ INSDEPTH=$((INSDEPTH+1)) \
+ doins -r {} \;
+ continue
+ else
+ mysrc="${x}"
+ fi
+ install ${INSOPTIONS} "${mysrc}" "${D}${INSDESTTREE}"
+done
diff --git a/pkgcore/bin/ebuild-helpers/dolib b/pkgcore/bin/ebuild-helpers/dolib
new file mode 100755
index 0000000..1a61525
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/dolib
@@ -0,0 +1,41 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/dolib,v 1.8.2.2 2005/01/12 02:07:15 carpaski Exp $
+
+# Setup ABI cruft
+LIBDIR_VAR="LIBDIR_${ABI}"
+if [[ -n ${ABI} && -n ${!LIBDIR_VAR} ]] ; then
+ CONF_LIBDIR=${!LIBDIR_VAR}
+fi
+unset LIBDIR_VAR
+# we need this to default to lib so that things dont break
+CONF_LIBDIR=${CONF_LIBDIR:-lib}
+libdir="${D}${DESTTREE}/${CONF_LIBDIR}"
+
+
+if [[ $# -lt 1 ]] ; then
+ echo "$0: at least one argument needed" 1>&2
+ exit 1
+fi
+if [[ ! -d ${libdir} ]] ; then
+ install -d "${libdir}" || exit 1
+fi
+
+ret=0
+
+for x in "$@" ; do
+ if [[ -e ${x} ]] ; then
+ if [[ ! -L ${x} ]] ; then
+ install ${LIBOPTIONS} "${x}" "${libdir}"
+ else
+ ln -s "$(readlink "${x}")" "${libdir}/${x##*/}"
+ fi
+ else
+ echo "!!! ${0##*/}: ${x} does not exist" 1>&2
+ false
+ fi
+ ((ret+=$?))
+done
+
+exit ${ret}
diff --git a/pkgcore/bin/ebuild-helpers/dolib.a b/pkgcore/bin/ebuild-helpers/dolib.a
new file mode 100755
index 0000000..c4df4a4
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/dolib.a
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/dolib.a,v 1.8 2004/10/10 10:07:20 carpaski Exp $
+
+exec env LIBOPTIONS="-m0644" \
+ dolib "$@"
diff --git a/pkgcore/bin/ebuild-helpers/dolib.so b/pkgcore/bin/ebuild-helpers/dolib.so
new file mode 100755
index 0000000..efd8c16
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/dolib.so
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/dolib.so,v 1.12 2004/10/10 10:07:20 carpaski Exp $
+
+exec env LIBOPTIONS="-m0755" \
+ dolib "$@"
diff --git a/pkgcore/bin/ebuild-helpers/doman b/pkgcore/bin/ebuild-helpers/doman
new file mode 100755
index 0000000..4c7f2bd
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/doman
@@ -0,0 +1,58 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/doman,v 1.13.2.2 2005/07/29 05:55:34 vapier Exp $
+
+if [ -z "${PKGCORE_BIN_PATH}" ]; then
+ echo "PKGCORE_BIN_PATH is unset!"
+ exit -1
+fi
+source "${PKGCORE_BIN_PATH}/isolated-functions.sh"
+
+if [[ $# -lt 1 ]] ; then
+ echo "$0: at least one argument needed" 1>&2
+ exit 1
+fi
+
+i18n=""
+
+ret=0
+
+for x in "$@" ; do
+ if [[ ${x:0:6} == "-i18n=" ]] ; then
+ i18n=${x:6}/
+ continue
+ fi
+ if [[ ${x} == ".keep" ]] ; then
+ continue
+ fi
+
+ suffix=${x##*.}
+
+ if [[ ${suffix} == "gz" ]] ; then
+ realname=${x%.*}
+ suffix=${realname##*.}
+ vecho "QA Notice: you should let portage compress '${realname}' for you" 2>&1
+ fi
+
+ mandir=${i18n}man${suffix:0:1}
+
+ if echo ${mandir} | egrep -q 'man[0-9n](|f|p|pm)$' -; then
+ if [[ -s ${x} ]] ; then
+ if [[ ! -d ${D}/usr/share/man/${mandir} ]] ; then
+ install -d "${D}/usr/share/man/${mandir}"
+ fi
+
+ install -m0644 "${x}" "${D}/usr/share/man/${mandir}"
+ ((ret+=$?))
+ elif [[ ! -e ${x} ]] ; then
+ vecho "doman: ${x} does not exist" 1>&2
+ ((++ret))
+ fi
+ else
+ vecho "doman: '${x}' is probably not a man page; skipping" 1>&2
+ ((++ret))
+ fi
+done
+
+exit ${ret}
diff --git a/pkgcore/bin/ebuild-helpers/domo b/pkgcore/bin/ebuild-helpers/domo
new file mode 100755
index 0000000..8295059
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/domo
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/domo,v 1.6 2004/10/04 13:56:50 vapier Exp $
+
+mynum=${#}
+if [ ${mynum} -lt 1 ] ; then
+ echo "${0}: at least one argument needed"
+ exit 1
+fi
+if [ ! -d "${D}${DESTTREE}/share/locale" ] ; then
+ install -d "${D}${DESTTREE}/share/locale/"
+fi
+
+for x in "$@" ; do
+ if [ -e "${x}" ] ; then
+ mytiny="${x##*/}"
+ mydir="${D}${DESTTREE}/share/locale/${mytiny%.*}/LC_MESSAGES"
+ if [ ! -d "${mydir}" ] ; then
+ install -d "${mydir}"
+ fi
+ install -m0644 "${x}" "${mydir}/${MOPREFIX}.mo"
+ else
+ echo "${0}: ${x} does not exist"
+ fi
+done
diff --git a/pkgcore/bin/ebuild-helpers/donewins b/pkgcore/bin/ebuild-helpers/donewins
new file mode 120000
index 0000000..59a0db2
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/donewins
@@ -0,0 +1 @@
+newins \ No newline at end of file
diff --git a/pkgcore/bin/ebuild-helpers/dosbin b/pkgcore/bin/ebuild-helpers/dosbin
new file mode 100755
index 0000000..30aa789
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/dosbin
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/dosbin,v 1.11 2004/10/04 13:56:50 vapier Exp $
+
+if [[ $# -lt 1 ]] ; then
+ echo "$0: at least one argument needed" 1>&2
+ exit 1
+fi
+
+if [[ ! -d ${D}${DESTTREE}/sbin ]] ; then
+ install -d "${D}${DESTTREE}/sbin" || exit 2
+fi
+
+ret=0
+
+for x in "$@" ; do
+ if [[ -e ${x} ]] ; then
+ install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${D}${DESTTREE}/sbin"
+ else
+ echo "!!! ${0##*/}: ${x} does not exist" 1>&2
+ false
+ fi
+ ((ret+=$?))
+done
+
+exit ${ret}
diff --git a/pkgcore/bin/ebuild-helpers/dosed b/pkgcore/bin/ebuild-helpers/dosed
new file mode 100755
index 0000000..7422c7d
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/dosed
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/dosed,v 1.7 2004/10/04 13:56:50 vapier Exp $
+
+mysed="s:${D}::g"
+
+for x in "$@" ; do
+ y="${D}${x}"
+ if [ -e "${y}" ] ; then
+ if [ -f "${y}" ] ; then
+ mysrc="${T}/${y##*/}"
+ cp "${y}" "${mysrc}"
+ sed -e "${mysed}" "${mysrc}" > "${y}"
+ else
+ echo "${y} is not a regular file!"
+ exit 1
+ fi
+ else
+ mysed="${x}"
+ fi
+done
diff --git a/pkgcore/bin/ebuild-helpers/dosym b/pkgcore/bin/ebuild-helpers/dosym
new file mode 100755
index 0000000..e0af15e
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/dosym
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: /var/cvsroot/gentoo-src/portage/bin/dosym,v 1.7 2004/10/04 13:56:50 vapier Exp $
+
+if [[ $# -ne 2 ]] ; then
+ echo "$0: two arguments needed" 1>&2
+ exit 1
+fi
+
+destdir=${2%/*}
+[[ ! -d ${D}${destdir} ]] && dodir "${destdir}"
+
+exec ln -snf "$1" "${D}$2"
diff --git a/pkgcore/bin/ebuild-helpers/emake b/pkgcore/bin/ebuild-helpers/emake
new file mode 100755
index 0000000..d9f548f
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/emake
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: emake 1912 2005-08-25 03:54:42Z ferringb $
+#
+# emake: Supplies some default parameters to GNU make. At the moment the
+# only parameter supplied is -jN, where N is a number of
+# parallel processes that should be ideal for the running host
+# (e.g. on a single-CPU machine, N=2). The MAKEOPTS variable
+# is set in /etc/make.globals. We don't source
+# /etc/make.globals here because emake is only called from an
+# ebuild.
+
+exec ${MAKE:-make} ${MAKEOPTS} ${EXTRA_EMAKE} "$@"
diff --git a/pkgcore/bin/ebuild-helpers/fowners b/pkgcore/bin/ebuild-helpers/fowners
new file mode 100755
index 0000000..99f0685
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/fowners
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: fowners 1912 2005-08-25 03:54:42Z ferringb $
+
+slash=/
+exec chown "${@/#${slash}/${D}${slash}}"
diff --git a/pkgcore/bin/ebuild-helpers/fperms b/pkgcore/bin/ebuild-helpers/fperms
new file mode 100755
index 0000000..383894e
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/fperms
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: fperms 1912 2005-08-25 03:54:42Z ferringb $
+
+slash=/
+exec chmod "${@/#${slash}/${D}${slash}}"
diff --git a/pkgcore/bin/ebuild-helpers/newbin b/pkgcore/bin/ebuild-helpers/newbin
new file mode 100755
index 0000000..ba7852d
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/newbin
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: newbin 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "${T}" ] || [ -z "${2}" ] ; then
+ echo "Nothing defined to do."
+ exit 1
+fi
+
+rm -rf "${T}/${2}"
+cp "${1}" "${T}/${2}"
+dobin "${T}/${2}"
diff --git a/pkgcore/bin/ebuild-helpers/newconfd b/pkgcore/bin/ebuild-helpers/newconfd
new file mode 100755
index 0000000..e9f2aa5
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/newconfd
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: newconfd 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "${T}" ] || [ -z "${2}" ] ; then
+ echo "$0: nothing defined to do" 1>&2
+ exit 1
+fi
+
+rm -rf "${T}/${2}"
+cp "${1}" "${T}/${2}"
+exec doconfd "${T}/${2}"
diff --git a/pkgcore/bin/ebuild-helpers/newdoc b/pkgcore/bin/ebuild-helpers/newdoc
new file mode 100755
index 0000000..bc56d73
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/newdoc
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: newdoc 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "${T}" ] || [ -z "${2}" ] ; then
+ echo "newdoc: Nothing defined to do" 1>&2
+ exit 1
+fi
+
+rm -rf "${T}/${2}"
+cp "${1}" "${T}/${2}"
+exec dodoc "${T}/${2}"
diff --git a/pkgcore/bin/ebuild-helpers/newenvd b/pkgcore/bin/ebuild-helpers/newenvd
new file mode 100755
index 0000000..68cf65c
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/newenvd
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: newenvd 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "${T}" ] || [ -z "${2}" ] ; then
+ echo "$0: nothing defined to do" 1>&2
+ exit 1
+fi
+
+rm -rf "${T}/${2}"
+cp "${1}" "${T}/${2}"
+exec doenvd "${T}/${2}"
diff --git a/pkgcore/bin/ebuild-helpers/newexe b/pkgcore/bin/ebuild-helpers/newexe
new file mode 100755
index 0000000..4769694
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/newexe
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: newexe 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "${T}" ] || [ -z "${2}" ] ; then
+ echo "Nothing defined to do."
+ exit 1
+fi
+
+rm -rf "${T}/${2}"
+cp "${1}" "${T}/${2}"
+doexe "${T}/${2}"
diff --git a/pkgcore/bin/ebuild-helpers/newinitd b/pkgcore/bin/ebuild-helpers/newinitd
new file mode 100755
index 0000000..f461bba
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/newinitd
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: newinitd 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "${T}" ] || [ -z "${2}" ] ; then
+ echo "$0: nothing defined to do" 1>&2
+ exit 1
+fi
+
+rm -rf "${T}/${2}"
+cp "${1}" "${T}/${2}"
+exec doinitd "${T}/${2}"
diff --git a/pkgcore/bin/ebuild-helpers/newins b/pkgcore/bin/ebuild-helpers/newins
new file mode 100755
index 0000000..bb89feb
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/newins
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: newins 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "${T}" ] || [ -z "${2}" ] ; then
+ echo "Error: Nothing defined to do."
+ exit 1
+fi
+
+rm -rf "${T}/${2}"
+cp "${1}" "${T}/${2}"
+doins "${T}/${2}"
diff --git a/pkgcore/bin/ebuild-helpers/newlib.a b/pkgcore/bin/ebuild-helpers/newlib.a
new file mode 100755
index 0000000..ac4b035
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/newlib.a
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: newlib.a 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "${T}" ] || [ -z "${2}" ] ; then
+ echo "Error: Nothing defined to do."
+ exit 1
+fi
+
+rm -rf "${T}/${2}"
+cp "${1}" "${T}/${2}"
+dolib.a "${T}/${2}"
diff --git a/pkgcore/bin/ebuild-helpers/newlib.so b/pkgcore/bin/ebuild-helpers/newlib.so
new file mode 100755
index 0000000..5dec75a
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/newlib.so
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: newlib.so 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "${T}" ] || [ -z "${2}" ] ; then
+ echo "Error: Nothing defined to do."
+ exit 1
+fi
+
+rm -rf "${T}/${2}"
+cp "${1}" "${T}/${2}"
+dolib.so "${T}/${2}"
diff --git a/pkgcore/bin/ebuild-helpers/newman b/pkgcore/bin/ebuild-helpers/newman
new file mode 100755
index 0000000..0081851
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/newman
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: newman 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "${T}" ] || [ -z "${2}" ] ; then
+ echo "newman: Nothing defined to do" 1>&2
+ exit 1
+fi
+
+rm -rf "${T}/${2}"
+cp "${1}" "${T}/${2}"
+exec doman "${T}/${2}"
diff --git a/pkgcore/bin/ebuild-helpers/newsbin b/pkgcore/bin/ebuild-helpers/newsbin
new file mode 100755
index 0000000..ab9f397
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/newsbin
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: newsbin 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "${T}" ] || [ -z "${2}" ] ; then
+ echo "Nothing defined to do."
+ exit 1
+fi
+
+rm -rf "${T}/${2}"
+cp "${1}" "${T}/${2}"
+dosbin "${T}/${2}"
diff --git a/pkgcore/bin/ebuild-helpers/prepall b/pkgcore/bin/ebuild-helpers/prepall
new file mode 100755
index 0000000..1f1f458
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/prepall
@@ -0,0 +1,86 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: prepall 2394 2005-12-17 17:23:07Z vapier $
+
+prepallman
+prepallinfo
+prepallstrip
+
+# this should help to ensure that all (most?) shared libraries are executable
+# and that all libtool scripts / static libraries are not executable
+for i in "${D}"opt/*/lib{,32,64} \
+ "${D}"lib{,32,64} \
+ "${D}"usr/lib{,32,64} \
+ "${D}"usr/X11R6/lib{,32,64} ; do
+ [[ ! -d ${i} ]] && continue
+
+ for j in "${i}"/*.so.* "${i}"/*.so ; do
+ [[ ! -e ${j} ]] && continue
+ [[ -L ${j} ]] && continue
+ [[ -x ${j} ]] && continue
+ echo "making executable: /${j/${D}/}"
+ chmod +x "${j}"
+ done
+
+ for j in "${i}"/*.a "${i}"/*.la ; do
+ [[ ! -e ${j} ]] && continue
+ [[ -L ${j} ]] && continue
+ [[ ! -x ${j} ]] && continue
+ echo "removing executable bit: /${j/${D}/}"
+ chmod -x "${j}"
+ done
+done
+
+# When installing static libraries into /usr/lib and shared libraries into
+# /lib, we have to make sure we have a linker script in /usr/lib along side
+# the static library, or gcc will utilize the static lib when linking :(.
+# http://bugs.gentoo.org/4411
+for a in "${D}"usr/lib*/*.a ; do
+ s=${a%.a}.so
+ if [[ ! -e ${s} ]] ; then
+ s=${s%usr/*}${s##*/usr/}
+ if [[ -e ${s} ]] ; then
+ echo -e "\aQA Notice: missing gen_usr_ldscript for ${s##*/}\a"
+ sleep 1
+ fi
+ fi
+done
+
+# Make sure people don't store libtool files or static libs in /lib
+f=$(ls "${D}"lib*/*.{a,la} 2>/dev/null)
+if [[ -n ${f} ]] ; then
+ echo -e "\n\aQA Notice: excessive files found in the / partition\a"
+ echo "${f}"
+ sleep 1
+fi
+
+# Verify that the libtool files don't contain bogus $D entries.
+for a in "${D}"usr/lib*/*.la ; do
+ s=${a##*/}
+ if grep -qs "${D}" "${a}" ; then
+ echo -e "\n\aQA Notice: ${s} appears to contain PORTAGE_TMPDIR paths\a"
+ sleep 1
+ fi
+done
+
+if type -p scanelf > /dev/null ; then
+
+# Run some sanity checks on shared libraries
+for d in "${D}"lib* "${D}"usr/lib* ; do
+ f=$(scanelf -ByF '%S %p' "${d}"/lib*.so* | gawk '$2 == "" { print }')
+ if [[ -n ${f} ]] ; then
+ echo -e "\n\aQA Notice: the following shared libraries lack a SONAME\a"
+ echo "${f}"
+ sleep 1
+ fi
+
+ f=$(scanelf -ByF '%n %p' "${d}"/lib*.so* | gawk '$2 == "" { print }')
+ if [[ -n ${f} ]] ; then
+ echo -e "\n\aQA Notice: the following shared libraries lack NEEDED entries\a"
+ echo "${f}"
+ sleep 1
+ fi
+done
+
+fi
diff --git a/pkgcore/bin/ebuild-helpers/prepalldocs b/pkgcore/bin/ebuild-helpers/prepalldocs
new file mode 100755
index 0000000..e71c6e4
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/prepalldocs
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: prepalldocs 1912 2005-08-25 03:54:42Z ferringb $
+
+dir="${D}usr/share/doc"
+
+[ ! -d "${dir}" ] && exit 0
+
+z=$(find "${dir}" \
+ '(' -type f -or -type l ')' \
+ -not -name '*.gz' \
+ -not -name '*.bz2' \
+ -not -name '*.Z' \
+ -not -name '*.js' \
+ 2>/dev/null)
+
+[ -z "${z}" ] && exit 0
+
+PORTAGE_COMPRESS=${PORTAGE_COMPRESS:-gzip}
+PORTAGE_COMPRESS_FLAGS=${PORTAGE_COMPRESS_FLAGS:--9}
+if [ -z "${PORTAGE_COMPRESS_SUFFIX}" ] ; then
+ case ${PORTAGE_COMPRESS} in
+ gzip) suffix="gz";;
+ bzip2) suffix="bz2";;
+ *) echo "prepalldocs error: please set PORTAGE_COMPRESS_SUFFIX in make.conf" 1>&2
+ exit 1;;
+ esac
+fi
+
+echo "doc: ${PORTAGE_COMPRESS} ${PORTAGE_COMPRESS_FLAGS}"
+for y in ${z} ; do
+ if [ -L "${y}" ] ; then
+ # Symlink ...
+ mylink=${y}
+ linkto=$(readlink "${y}")
+
+ if [ "${linkto##*.}" != "${suffix}" ] ; then
+ linkto="${linkto}.${suffix}"
+ fi
+ if [ "${mylink##*.}" != "${suffix}" ] ; then
+ mylink="${mylink}.${suffix}"
+ fi
+
+ echo " link fixed ${mylink##*/}"
+ ln -snf "${linkto}" "${mylink}"
+ if [ "${y}" != "${mylink}" ] ; then
+ echo " link removed ${y##*/}"
+ rm -f "${y}"
+ fi
+ else
+ if [ "${y##*.}" != "${suffix}" ] ; then
+ echo " compressing ${y##*/}"
+ "${PORTAGE_COMPRESS}" ${PORTAGE_COMPRESS_FLAGS} -f "${y}"
+ fi
+ fi
+done
diff --git a/pkgcore/bin/ebuild-helpers/prepallinfo b/pkgcore/bin/ebuild-helpers/prepallinfo
new file mode 100755
index 0000000..220391e
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/prepallinfo
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: prepallinfo 1912 2005-08-25 03:54:42Z ferringb $
+
+[ ! -d "${D}usr/share/info" ] && exit 0
+
+exec prepinfo
diff --git a/pkgcore/bin/ebuild-helpers/prepallman b/pkgcore/bin/ebuild-helpers/prepallman
new file mode 100755
index 0000000..77b570a
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/prepallman
@@ -0,0 +1,9 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: prepallman 1912 2005-08-25 03:54:42Z ferringb $
+
+for x in $(find "${D}" -name man -type d -printf '%P\n') ; do
+ prepman ${x%/man}
+ export prepallman_banner=no
+done
diff --git a/pkgcore/bin/ebuild-helpers/prepallstrip b/pkgcore/bin/ebuild-helpers/prepallstrip
new file mode 100755
index 0000000..e55e111
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/prepallstrip
@@ -0,0 +1,10 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: prepallstrip 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ "${FEATURES//*nostrip*/true}" == "true" ] || [ "${RESTRICT//*nostrip*/true}" == "true" ] ; then
+ exit 0
+fi
+
+exec prepstrip "${D}"
diff --git a/pkgcore/bin/ebuild-helpers/prepinfo b/pkgcore/bin/ebuild-helpers/prepinfo
new file mode 100755
index 0000000..85af086
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/prepinfo
@@ -0,0 +1,58 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: prepinfo 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "$1" ] ; then
+ z="${D}usr/share/info"
+else
+ if [ -d "${D}$1/share/info" ] ; then
+ z="${D}$1/share/info"
+ else
+ z="${D}$1/info"
+ fi
+fi
+
+[ ! -d "${z}" ] && exit 0
+
+rm -f "${z}"/dir{,.old}{,.info{,.gz,.bz2,.Z}}
+
+PORTAGE_COMPRESS=${PORTAGE_COMPRESS:-gzip}
+PORTAGE_COMPRESS_FLAGS=${PORTAGE_COMPRESS_FLAGS:--9}
+if [ -z "${PORTAGE_COMPRESS_SUFFIX}" ] ; then
+ case ${PORTAGE_COMPRESS} in
+ gzip) suffix="gz";;
+ bzip2) suffix="bz2";;
+ *) echo "prepinfo: error fixing links: please set PORTAGE_COMPRESS_SUFFIX in make.conf" 1>&2
+ exit 1;;
+ esac
+fi
+
+echo "info: ${PORTAGE_COMPRESS} ${PORTAGE_COMPRESS_FLAGS}"
+
+for x in `find "${z}"/ \( -type f -or -type l \) -maxdepth 1 -mindepth 1 2>/dev/null` ; do
+ if [ -L "${x}" ] ; then
+ # Symlink ...
+ mylink=${x}
+ linkto=$(readlink "${x}")
+
+ if [ "${linkto##*.}" != "${suffix}" ] ; then
+ linkto="${linkto}.${suffix}"
+ fi
+ if [ "${mylink##*.}" != "${suffix}" ] ; then
+ mylink="${mylink}.${suffix}"
+ fi
+
+ echo "fixing GNU info symlink: ${mylink##*/}"
+ ln -snf "${linkto}" "${mylink}"
+ if [ "${x}" != "${mylink}" ] ; then
+ echo "removing old symlink: ${x##*/}"
+ rm -f "${x}"
+ fi
+ else
+ if [ "${x##*.}" != "${suffix}" ] ; then
+ echo "compressing GNU info page: ${x##*/}"
+ "${PORTAGE_COMPRESS}" ${PORTAGE_COMPRESS_FLAGS} -f "${x}"
+ fi
+ fi
+done
diff --git a/pkgcore/bin/ebuild-helpers/preplib b/pkgcore/bin/ebuild-helpers/preplib
new file mode 100755
index 0000000..c6ea5c4
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/preplib
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 1999-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: preplib 1912 2005-08-25 03:54:42Z ferringb $
+
+LIBDIR_VAR="LIBDIR_${ABI}"
+if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
+ CONF_LIBDIR="${!LIBDIR_VAR}"
+fi
+unset LIBDIR_VAR
+
+if [ -z "${CONF_LIBDIR}" ]; then
+ # we need this to default to lib so that things dont break
+ CONF_LIBDIR="lib"
+fi
+
+if [ -z "$1" ] ; then
+ z="${D}usr/${CONF_LIBDIR}"
+else
+ z="${D}$1/${CONF_LIBDIR}"
+fi
+
+if [ -d "${z}" ] ; then
+ ldconfig -n -N "${z}"
+fi
diff --git a/pkgcore/bin/ebuild-helpers/preplib.so b/pkgcore/bin/ebuild-helpers/preplib.so
new file mode 100755
index 0000000..0b88cc9
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/preplib.so
@@ -0,0 +1,10 @@
+#!/bin/bash
+# Copyright 1999-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: preplib.so 1912 2005-08-25 03:54:42Z ferringb $
+
+for x in "$@" ; do
+ if [ -d "${D}${x}" ] ; then
+ ldconfig -n -N "${D}${x}"
+ fi
+done
diff --git a/pkgcore/bin/ebuild-helpers/prepman b/pkgcore/bin/ebuild-helpers/prepman
new file mode 100755
index 0000000..0fd16f5
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/prepman
@@ -0,0 +1,61 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: prepman 1912 2005-08-25 03:54:42Z ferringb $
+
+if [ -z "$1" ] ; then
+ z="${D}usr/share/man"
+else
+ z="${D}$1/man"
+fi
+
+[ ! -d "${z}" ] && exit 0
+
+PORTAGE_COMPRESS=${PORTAGE_COMPRESS:-gzip}
+PORTAGE_COMPRESS_FLAGS=${PORTAGE_COMPRESS_FLAGS:--9}
+if [ -z "${PORTAGE_COMPRESS_SUFFIX}" ] ; then
+ case ${PORTAGE_COMPRESS} in
+ gzip) suffix="gz";;
+ bzip2) suffix="bz2";;
+ *) echo "prepman error: please set PORTAGE_COMPRESS_SUFFIX in make.conf" 1>&2
+ exit 1;;
+ esac
+fi
+
+if [ -z "${prepallman_banner}" ] ; then
+ echo "man: ${PORTAGE_COMPRESS} ${PORTAGE_COMPRESS_FLAGS}"
+fi
+
+for x in `find "${z}"/ -type d 2>/dev/null` ; do
+ for y in `find "${x}"/ \( -type f -or -type l \) ! -name '.keep' -maxdepth 1 -mindepth 1 2>/dev/null` ; do
+ if [ -L "${y}" ] ; then
+ # Symlink ...
+ mylink=${y}
+ linkto=$(readlink "${y}")
+
+ # Do NOT change links to directories
+ if [ -d "${z}/${linkto}" ] ; then
+ continue
+ fi
+
+ if [ "${linkto##*.}" != "${suffix}" ] ; then
+ linkto="${linkto}.${suffix}"
+ fi
+ if [ "${mylink##*.}" != "${suffix}" ] ; then
+ mylink="${mylink}.${suffix}"
+ fi
+
+ echo " link fixed ${mylink##*/}"
+ ln -snf "${linkto}" "${mylink}"
+ if [ "${y}" != "${mylink}" ] ; then
+ echo " link removed ${y##*/}"
+ rm -f "${y}"
+ fi
+ else
+ if [ "${y##*.}" != "${suffix}" ] && [ ! -d "${y}" ] ; then
+ echo " compressing ${y##*/}"
+ "${PORTAGE_COMPRESS}" ${PORTAGE_COMPRESS_FLAGS} -f "${y}"
+ fi
+ fi
+ done
+done
diff --git a/pkgcore/bin/ebuild-helpers/prepstrip b/pkgcore/bin/ebuild-helpers/prepstrip
new file mode 100755
index 0000000..2e03cb9
--- /dev/null
+++ b/pkgcore/bin/ebuild-helpers/prepstrip
@@ -0,0 +1,48 @@
+#!/bin/bash
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: prepstrip 2228 2005-11-01 01:35:23Z vapier $
+
+if [ "${FEATURES//*nostrip*/true}" == "true" ] || [ "${RESTRICT//*nostrip*/true}" == "true" ] ; then
+ echo "nostrip"
+ STRIP="/bin/false"
+ PORTAGE_STRIP_FLAGS=""
+else
+ STRIP=${STRIP:-${CHOST}-strip}
+ type -p -- ${STRIP} > /dev/null || STRIP=strip
+ PORTAGE_STRIP_FLAGS=${PORTAGE_STRIP_FLAGS:---strip-unneeded}
+fi
+
+banner=1
+retval=0
+
+for x in "$@" ; do
+ if [ -d "${x}" ]; then
+ # We only want files. So make a pass for each directory and call again.
+ find "${x}" -type f \( -perm -0100 -or -perm -0010 -or -perm -0001 -or -name '*.so' -or -name '*.so.*' \) -print0 |
+ $XARGS -0 -n500 prepstrip
+ else
+ if [ ${banner} -eq 1 ] ; then
+ echo "strip: ${STRIP} ${PORTAGE_STRIP_FLAGS}"
+ banner=0
+ fi
+
+ f=$(file "${x}") || continue
+ [ -z "${f}" ] && continue
+
+ if [ -z "${f/*current ar archive*/}" ]; then
+ echo " ${x:${#D}:${#x}}"
+ ${STRIP} -g "${x}"
+ fi
+ if [ -z "${f/*SB executable*/}" ]; then
+ echo " ${x:${#D}:${#x}}"
+ ${STRIP} "${x}"
+ fi
+ if [ -z "${f/*SB shared object*/}" ]; then
+ echo " ${x:${#D}:${#x}}"
+ ${STRIP} ${PORTAGE_STRIP_FLAGS} "${x}"
+ fi
+ fi
+done
+
+exit ${retval}
diff --git a/pkgcore/binpkg/__init__.py b/pkgcore/binpkg/__init__.py
new file mode 100644
index 0000000..23646cf
--- /dev/null
+++ b/pkgcore/binpkg/__init__.py
@@ -0,0 +1,3 @@
+"""
+gentoo binpkg support
+"""
diff --git a/pkgcore/binpkg/repo_ops.py b/pkgcore/binpkg/repo_ops.py
new file mode 100644
index 0000000..bae5e0e
--- /dev/null
+++ b/pkgcore/binpkg/repo_ops.py
@@ -0,0 +1,99 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+import os, errno
+
+from pkgcore.interfaces import repo as repo_interfaces
+from pkgcore.fs import tar
+from pkgcore.binpkg import xpak
+from pkgcore.ebuild.conditionals import stringify_boolean
+
+from snakeoil import osutils
+from pkgcore.util.bzip2 import compress
+from snakeoil.osutils import join as pjoin
+from snakeoil.demandload import demandload
+demandload(globals(), "pkgcore.log:logger")
+
+def discern_loc(base, pkg):
+ return pjoin(base, pkg.category,
+ "%s-%s.tbz2" % (pkg.package, pkg.fullver))
+
+
+_metadata_rewrites = {
+ "depends":"DEPEND", "rdepends":"RDEPEND", "post_rdepends":"PDEPEND",
+ "use":"USE", "eapi":"EAPI", "CONTENTS":"contents", "provides":"PROVIDE"}
+
+def generate_attr_dict(pkg):
+ d = {}
+ for k in pkg.tracked_attributes:
+ if k == "contents":
+ continue
+ v = getattr(pkg, k)
+ if k == 'environment':
+ d['environment.bz2'] = compress(v.get_fileobj().read())
+ continue
+ if k == 'provides':
+ versionless_provides = lambda b: b.key
+ s = stringify_boolean(v, func=versionless_provides)
+ elif not isinstance(v, basestring):
+ try:
+ s = ' '.join(v)
+ except TypeError:
+ s = str(v)
+ else:
+ s = v
+ d[_metadata_rewrites.get(k, k.upper())] = s
+ d["%s-%s.ebuild" % (pkg.package, pkg.fullver)] = \
+ pkg.ebuild.get_fileobj().read()
+ return d
+
+
+class install(repo_interfaces.nonlivefs_install):
+
+ def modify_repo(self):
+ if self.observer is None:
+ end = start = lambda x:None
+ else:
+ start = self.observer.phase_start
+ end = self.observer.phase_end
+ pkg = self.new_pkg
+ final_path = discern_loc(self.repo.base, pkg)
+ tmp_path = pjoin(os.path.dirname(final_path),
+ ".tmp.%i.%s" % (os.getpid(), os.path.basename(final_path)))
+
+ if not osutils.ensure_dirs(os.path.dirname(tmp_path), mode=0755):
+ raise repo_interfaces.Failure("failed creating directory %r" %
+ os.path.dirname(tmp_path))
+ try:
+ start("generating tarball: %s" % tmp_path)
+ tar.write_set(pkg.contents, tmp_path, compressor='bz2')
+ end("tarball created")
+ start("writing Xpak")
+ # ok... got a tarball. now add xpak.
+ x = xpak.Xpak.write_xpak(tmp_path, generate_attr_dict(pkg))
+ end("wrote Xpak")
+ # ok... we tagged the xpak on.
+ os.chmod(tmp_path, 0644)
+ os.rename(tmp_path, final_path)
+ except:
+ try:
+ os.unlink(tmp_path)
+ except (IOError, OSError), e:
+ if e.errno != errno.ENOENT:
+ logger.warn("failed removing %r: %r" % (tmp_path, e))
+ raise
+ return True
+
+
+class uninstall(repo_interfaces.nonlivefs_uninstall):
+
+ def modify_repo(self):
+ os.unlink(discern_loc(self.repo.base, self.old_pkg))
+ return True
+
+
+class replace(install, uninstall, repo_interfaces.nonlivefs_replace):
+
+ def modify_repo(self):
+ uninstall.modify_repo(self)
+ install.modify_repo(self)
diff --git a/pkgcore/binpkg/repository.py b/pkgcore/binpkg/repository.py
new file mode 100644
index 0000000..a0804aa
--- /dev/null
+++ b/pkgcore/binpkg/repository.py
@@ -0,0 +1,298 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+binpkg ebuild repository
+"""
+
+import os, stat
+
+from pkgcore.repository import prototype, errors
+from pkgcore.merge import triggers
+from pkgcore.plugin import get_plugin
+from pkgcore.ebuild.ebuild_built import pkg_uses_default_preinst
+from pkgcore.config import ConfigHint
+#needed to grab the PN
+from pkgcore.ebuild.cpv import CPV as cpv
+
+from snakeoil.currying import partial
+from snakeoil.mappings import DictMixin
+from snakeoil.osutils import listdir_dirs, listdir_files
+from snakeoil.osutils import join as pjoin
+
+from snakeoil.demandload import demandload
+demandload(globals(),
+ "pkgcore.merge:engine",
+ "pkgcore.fs.livefs:scan",
+ "pkgcore.interfaces.data_source:local_source",
+ "pkgcore.fs.ops:offset_rewriter",
+ "pkgcore.interfaces.data_source:data_source",
+ "pkgcore.repository:wrapper",
+ "pkgcore.package.mutated:MutatedPkg",
+ "pkgcore.ebuild:ebd",
+ "pkgcore.binpkg:repo_ops",
+ "errno",
+ "pkgcore.fs.tar:generate_contents",
+ "pkgcore.binpkg.xpak:Xpak",
+ "pkgcore.util.bzip2:decompress",
+)
+
+
+class force_unpacking(triggers.base):
+
+ required_csets = ('install',)
+ _hooks = ('sanity_check',)
+ _priority = 5
+ _label = 'forced decompression'
+ _engine_type = triggers.INSTALLING_MODES
+
+ def __init__(self, format_op):
+ self.format_op = format_op
+
+ def trigger(self, engine, cset):
+ op = self.format_op
+ op.setup_workdir()
+ merge_contents = get_plugin("fs_ops.merge_contents")
+ merge_cset = cset
+ if engine.offset != '/':
+ merge_cset = cset.change_offset(engine.offset, '/')
+ merge_contents(merge_cset, offset=op.env["D"])
+
+ # ok. they're on disk.
+ # now to avoid going back to the binpkg, we rewrite
+ # the data_source for files to the on disk location.
+ # we can update in place also, since we're not changing the mapping.
+
+ # this rewrites the data_source to the ${D} loc.
+ d = op.env["D"]
+ fi = (x.change_attributes(data_source=local_source(
+ pjoin(d, x.location.lstrip('/'))))
+ for x in merge_cset.iterfiles())
+
+ if engine.offset:
+ # we're using merge_cset above, which has the final offset loc
+ # pruned; this is required for the merge, however, we're updating
+ # the cset so we have to insert the final offset back in.
+ # wrap the iter, iow.
+ fi = offset_rewriter(engine.offset, fi)
+
+ # we *probably* should change the csets class at some point
+ # since it no longer needs to be tar, but that's for another day.
+ cset.update(fi)
+
+
+def wrap_factory(klass, *args, **kwds):
+
+ class new_factory(klass):
+
+ def _add_format_triggers(self, pkg, op_inst, format_op_inst,
+ engine_inst):
+ if engine.UNINSTALL_MODE != engine_inst.mode and \
+ pkg == engine_inst.new and \
+ pkg.repo is engine_inst.new.repo and \
+ not pkg_uses_default_preinst(pkg):
+ t = force_unpacking(op_inst.install_op)
+ t.register(engine_inst)
+
+ klass._add_format_triggers(
+ self, pkg, op_inst, format_op_inst, engine_inst)
+
+ def scan_contents(self, location):
+ return scan(location, offset=location)
+
+ return new_factory(*args, **kwds)
+
+
+class StackedXpakDict(DictMixin):
+ __slots__ = ("_xpak", "_parent", "_pkg", "contents",
+ "_wipes")
+
+ _metadata_rewrites = {
+ "depends":"DEPEND", "rdepends":"RDEPEND", "post_rdepends":"PDEPEND",
+ "provides":"PROVIDE", "use":"USE", "eapi":"EAPI",
+ "CONTENTS":"contents"}
+
+ def __init__(self, parent, pkg):
+ self._pkg = pkg
+ self._parent = parent
+ self._wipes = []
+
+ def __getattr__(self, attr):
+ if attr == "_xpak":
+ data = Xpak(self._parent._get_path(self._pkg))
+ object.__setattr__(self, attr, data)
+ return data
+ raise AttributeError(self, attr)
+
+ def __getitem__(self, key):
+ key = self._metadata_rewrites.get(key, key)
+ if key in self._wipes:
+ raise KeyError(self, key)
+ if key == "contents":
+ data = generate_contents(self._parent._get_path(self._pkg))
+ object.__setattr__(self, "contents", data)
+ elif key == "environment":
+ data = self._xpak.get("environment.bz2")
+ if data is None:
+ data = data_source(self._xpak.get("environment"),
+ mutable=True)
+ if data is None:
+ raise KeyError(
+ "environment.bz2 not found in xpak segment, "
+ "malformed binpkg?")
+ else:
+ data = data_source(decompress(data), mutable=True)
+ elif key == "ebuild":
+ data = self._xpak.get("%s-%s.ebuild" %
+ (self._pkg.package, self._pkg.fullver), "")
+ data = data_source(data)
+ else:
+ try:
+ data = self._xpak[key]
+ except KeyError:
+ data = ''
+ return data
+
+ def __delitem__(self, key):
+ if key in ("contents", "environment"):
+ if key in self._wipes:
+ raise KeyError(self, key)
+ self._wipes.append(key)
+ else:
+ del self._xpak[key]
+
+ def __setitem__(self, key, val):
+ if key in ("contents", "environment"):
+ setattr(self, key, val)
+ self._wipes = [x for x in self._wipes if x != key]
+ else:
+ self._xpak[key] = val
+ return val
+
+ def iterkeys(self):
+ for k in self._xpak:
+ yield k
+ for k in ("environment", "contents"):
+ if self.get(k) is not None:
+ yield k
+
+
+class tree(prototype.tree):
+
+ format_magic = "ebuild_built"
+
+ # yes, the period is required. no, do not try and remove it
+ # (harring says it stays)
+ extension = ".tbz2"
+
+ configured = False
+ configurables = ("settings",)
+
+ pkgcore_config_type = ConfigHint({'location':'str',
+ 'repo_id':'str'}, typename='repo')
+
+ def __init__(self, location, repo_id=None):
+ super(tree, self).__init__()
+ self.base = location
+ if repo_id is None:
+ repo_id = location
+ self.repo_id = repo_id
+ self._versions_tmp_cache = {}
+ try:
+ st = os.lstat(self.base)
+ if not stat.S_ISDIR(st.st_mode):
+ raise errors.InitializationError(
+ "base not a dir: %s" % self.base)
+ elif not st.st_mode & (os.X_OK|os.R_OK):
+ raise errors.InitializationError(
+ "base lacks read/executable: %s" % self.base)
+
+ except OSError:
+ raise errors.InitializationError(
+ "lstat failed on base %s" % self.base)
+
+ self.package_class = wrap_factory(
+ get_plugin("format." + self.format_magic), self)
+
+ def _get_categories(self, *optional_category):
+ # return if optional_category is passed... cause it's not yet supported
+ if optional_category:
+ return {}
+ try:
+ return tuple(
+ x for x in listdir_dirs(self.base)
+ if x.lower() != "all")
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching categories: %s" % str(e))
+
+ def _get_packages(self, category):
+ cpath = pjoin(self.base, category.lstrip(os.path.sep))
+ l = set()
+ d = {}
+ lext = len(self.extension)
+ try:
+ for x in listdir_files(cpath):
+ # don't use lstat; symlinks may exist
+ if (x.endswith(".lockfile")
+ or not x[-lext:].lower() == self.extension):
+ continue
+ x = cpv(category+"/"+x[:-lext])
+ l.add(x.package)
+ d.setdefault((category, x.package), []).append(x.fullver)
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching packages for category %s: %s" % \
+ (pjoin(self.base, category.lstrip(os.path.sep)), str(e)))
+
+ self._versions_tmp_cache.update(d)
+ return tuple(l)
+
+ def _get_versions(self, catpkg):
+ return tuple(self._versions_tmp_cache.pop(catpkg))
+
+ def _get_path(self, pkg):
+ s = "%s-%s" % (pkg.package, pkg.fullver)
+ return pjoin(self.base, pkg.category, s+".tbz2")
+
+ _get_ebuild_path = _get_path
+
+ def _get_metadata(self, pkg):
+ return StackedXpakDict(self, pkg)
+
+ def notify_remove_package(self, pkg):
+ prototype.tree.notify_remove_package(self, pkg)
+ try:
+ os.rmdir(pjoin(self.base, pkg.category))
+ except OSError, oe:
+ if oe.errno != errno.ENOTEMPTY:
+ raise
+ del oe
+
+ def _install(self, pkg, *a, **kw):
+ return repo_ops.install(self, pkg, *a, **kw)
+
+ def _uninstall(self, pkg, *a, **kw):
+ return repo_ops.uninstall(self, pkg, *a, **kw)
+
+ def _replace(self, oldpkg, newpkg, *a, **kw):
+ return repo_ops.replace(self, oldpkg, newpkg, *a, **kw)
+
+
+class ConfiguredBinpkgTree(wrapper.tree):
+
+ format_magic = "ebuild_built"
+ configured = True
+
+ def __init__(self, repo, domain_settings):
+ # rebind to ourselves basically.
+ def package_class(pkg):
+ return MutatedPkg(pkg,
+ {"build":partial(self._generate_build_op, pkg)})
+ wrapper.tree.__init__(self, repo, package_class=package_class)
+ self.domain_settings = domain_settings
+
+ def _generate_build_op(self, pkg, **kwargs):
+ kwargs["initial_env"] = self.domain_settings
+ kwargs["env_data_source"] = pkg.environment
+ return ebd.binpkg_buildable(pkg, **kwargs)
+
+tree.configure = ConfiguredBinpkgTree
diff --git a/pkgcore/binpkg/xpak.py b/pkgcore/binpkg/xpak.py
new file mode 100644
index 0000000..1d64985
--- /dev/null
+++ b/pkgcore/binpkg/xpak.py
@@ -0,0 +1,263 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+XPAK container support
+"""
+
+import struct
+from snakeoil.mappings import OrderedDict
+from snakeoil.demandload import demandload
+demandload(globals(), "os", "errno")
+
+#
+# format is:
+# XPAKPACKIIIIDDDD[index][data]XPAKSTOPOOOOSTOP
+# first; all ints/longs are big endian
+# meanwhile, 8 byte format magic
+# 4 bytes of index len,
+# 4 bytes of data len
+# index items: 4 bytes (len of the key name), then that length of key data
+# finally, 2 longs; relative offset from data block start, length of the data
+# repeats till index is full processed
+# for data, just a big blob; offsets into it are determined via the index
+# table.
+# finally, trailing magic, 4 bytes (positive) of the # of bytes to seek to
+# reach the end of the magic, and 'STOP'. offset is relative to EOS for Xpak
+#
+
+class MalformedXpak(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self, "xpak as malformed: %s" % (msg,))
+ self.msg = msg
+
+
+class Xpak(object):
+ __slots__ = ("_source", "_source_is_path", "xpak_start", "_keys_dict")
+ trailer_size = 16
+ trailer_parser = ">8sL4s"
+ trailer_pre_magic = "XPAKSTOP"
+ trailer_post_magic = "STOP"
+
+ header_size = 16
+ header_parser = ">8sLL"
+ header_pre_magic = "XPAKPACK"
+
+
+ def __init__(self, source):
+ self._source_is_path = isinstance(source, basestring)
+ self._source = source
+ self.xpak_start = None
+ # _keys_dict becomes an ordereddict after _load_offsets; reason for
+ # it is so that reads are serialized.
+
+ def __getattr__(self, attr):
+ if attr == "_keys_dict":
+ self._load_offsets()
+ return object.__getattribute__(self, attr)
+ raise AttributeError(self, attr)
+
+ @property
+ def _fd(self):
+ # we do this annoying little dance to avoid having a couple
+ # hundred fds open if they're accessing a lot of binpkgs
+ if self._source_is_path:
+ return open(self._source, "r")
+ return self._source
+
+ @classmethod
+ def write_xpak(cls, target_source, data):
+ """
+ write an xpak dict to disk; overwriting an xpak if it exists
+ @param target_source: string path, or
+ L{pkgcore.interfaces.data_source.base} derivative
+ @param data: mapping instance to write into the xpak.
+ @return: xpak instance
+ """
+ try:
+ old_xpak = cls(target_source)
+ # force access
+ old_xpak.keys()
+ start = old_xpak.xpak_start
+ source_is_path = old_xpak._source_is_path
+ except (MalformedXpak, IOError):
+ source_is_path = isinstance(target_source, basestring)
+ if source_is_path:
+ try:
+ start = os.lstat(target_source).st_size
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ start = 0
+ else:
+ f = target_source.get_fileobj().seek(0, 2)
+ start = f.tell()
+ new_index = []
+ new_data = []
+ cur_pos = 0
+ for key, val in data.iteritems():
+ new_index.append(struct.pack(">L%isLL" % len(key),
+ len(key), key, cur_pos, len(val)))
+ new_data.append(val)
+ cur_pos += len(val)
+
+ if source_is_path:
+ # rb+ required since A) binary, B) w truncates from the getgo
+ handle = open(target_source, "rb+")
+ else:
+ handle = target_source.get_fileobj()
+
+ new_index = ''.join(new_index)
+ new_data = ''.join(new_data)
+
+ handle.seek(start, 0)
+ # +12 is len(key) long, data_offset long, data_offset len long
+ handle.write(struct.pack(">%isLL%is%is%isL%is" %
+ (len(cls.header_pre_magic),
+ len(new_index),
+ len(new_data),
+ len(cls.trailer_pre_magic),
+ len(cls.trailer_post_magic)),
+ cls.header_pre_magic,
+ len(new_index),
+ len(new_data),
+ new_index,
+ new_data,
+ cls.trailer_pre_magic,
+ # the fun one; 16 for the footer, 8 for index/data longs,
+ # + index/data chunks.
+ len(new_index) + len(new_data) + 24,
+ cls.trailer_post_magic))
+
+ handle.truncate()
+ handle.close()
+ return Xpak(target_source)
+
+ def _load_offsets(self):
+ fd = self._fd
+ index_start, index_len, data_len = self._check_magic(fd)
+ data_start = index_start + index_len
+ keys_dict = OrderedDict()
+ while index_len:
+ key_len = struct.unpack(">L", fd.read(4))[0]
+ key = fd.read(key_len)
+ if len(key) != key_len:
+ raise MalformedXpak(
+ "tried reading key %i of len %i, but hit EOF" % (
+ len(keys_dict) + 1, key_len))
+ try:
+ offset, data_len = struct.unpack(">LL", fd.read(8))
+ except struct.error:
+ raise MalformedXpak(
+ "key %i, tried reading data offset/len but hit EOF" % (
+ len(keys_dict) + 1))
+ keys_dict[key] = (data_start + offset, data_len)
+ index_len -= (key_len + 12) # 12 for key_len, offset, data_len longs
+
+ self._keys_dict = keys_dict
+
+ def _check_magic(self, fd):
+ fd.seek(-16, 2)
+ try:
+ pre, size, post = struct.unpack(
+ self.trailer_parser, fd.read(self.trailer_size))
+ if pre != self.trailer_pre_magic or post != self.trailer_post_magic:
+ raise MalformedXpak(
+ "not an xpak segment, trailer didn't match: %r" % fd)
+ except struct.error:
+ raise MalformedXpak(
+ "not an xpak segment, failed parsing trailer: %r" % fd)
+
+ # this is a bit daft, but the format seems to intentionally
+ # have an off by 8 in the offset address. presumably cause the
+ # header was added after the fact, either way we go +8 to
+ # check the header magic.
+ fd.seek(-(size + 8), 2)
+ self.xpak_start = fd.tell()
+ try:
+ pre, index_len, data_len = struct.unpack(
+ self.header_parser, fd.read(self.header_size))
+ if pre != self.header_pre_magic:
+ raise MalformedXpak(
+ "not an xpak segment, header didn't match: %r" % fd)
+ except struct.error:
+ raise MalformedXpak(
+ "not an xpak segment, failed parsing header: %r" % fd)
+
+ return self.xpak_start + self.header_size, index_len, data_len
+
+ def keys(self):
+ return list(self.iterkeys())
+
+ def values(self):
+ return list(self.itervalues())
+
+ def items(self):
+ return list(self.iteritems())
+
+ def __len__(self):
+ return len(self._keys_dict)
+
+ def __contains__(self, key):
+ return key in self._keys_dict
+
+ def __nonzero__(self):
+ return bool(self._keys_dict)
+
+ def __iter__(self):
+ return iter(self._keys_dict)
+
+ def iterkeys(self):
+ return self._keys_dict.iterkeys()
+
+ def itervalues(self):
+ fd = self._fd
+ return (self._get_data(fd, *v) for v in self._keys_dict.itervalues())
+
+ def iteritems(self):
+ # note that it's an OrderedDict, so this works.
+ fd = self._fd
+ return (
+ (k, self._get_data(fd, *v))
+ for k, v in self._keys_dict.iteritems())
+
+ def __getitem__(self, key):
+ return self._get_data(self._fd, *self._keys_dict[key])
+
+ def __delitem__(self, key):
+ del self._keys_dict[key]
+
+ def __setitem__(self, key, val):
+ self._keys_dict[key] = val
+ return val
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def pop(self, key, *a):
+ # faster then the exception form...
+ l = len(a)
+ if l > 1:
+ raise TypeError("pop accepts 1 or 2 args only")
+ if key in self._keys_dict:
+ o = self._keys_dict.pop(key)
+ elif l:
+ o = a[0]
+ else:
+ raise KeyError(key)
+ return o
+
+ def _get_data(self, fd, offset, data_len):
+ # optimization for file objs; they cache tell position, but
+ # pass through all seek calls (nice, eh?) so we rely on that
+ # for cutting down on uneeded seeks; userland comparison being
+ # far cheaper then an actual syscall seek
+ if fd.tell() != offset:
+ fd.seek(offset, 0)
+ assert fd.tell() == offset
+ r = fd.read(data_len)
+ assert len(r) == data_len
+ return r
diff --git a/pkgcore/cache/__init__.py b/pkgcore/cache/__init__.py
new file mode 100644
index 0000000..2867bc2
--- /dev/null
+++ b/pkgcore/cache/__init__.py
@@ -0,0 +1,6 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+cache subsystem, typically used for storing package metadata
+"""
diff --git a/pkgcore/cache/anydbm.py b/pkgcore/cache/anydbm.py
new file mode 100644
index 0000000..1f28c0b
--- /dev/null
+++ b/pkgcore/cache/anydbm.py
@@ -0,0 +1,79 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+anydbm backend
+"""
+
+anydbm_module = __import__("anydbm")
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+import os
+from pkgcore.cache import fs_template, errors
+
+
+class database(fs_template.FsBased):
+
+ """anydbm based cache backend, autocommiting"""
+
+ autocommits = True
+ cleanse_keys = True
+
+ def __init__(self, *args, **config):
+ self._db = None
+ super(database, self).__init__(*args, **config)
+
+ default_db = config.get("dbtype","anydbm")
+ if not default_db.startswith("."):
+ default_db = '.' + default_db
+
+ self._db_path = os.path.join(
+ self.location, fs_template.gen_label(self.label) + default_db)
+ self._db = None
+
+ try:
+ self._db = anydbm_module.open(self._db_path, "w", self._perms)
+ except anydbm_module.error:
+ # XXX handle this at some point
+ try:
+ self._ensure_dirs()
+ self._ensure_dirs(self._db_path)
+ self._ensure_access(self._db_path)
+ except (OSError, IOError), e:
+ raise errors.InitializationError(self.__class__, e)
+
+ # try again if failed
+ try:
+ if self._db is None:
+ self._db = anydbm_module.open(
+ self._db_path, "c", self._perms)
+ except anydbm_module.error, e:
+ raise errors.InitializationError(self.__class__, e)
+ __init__.__doc__ = fs_template.FsBased.__init__.__doc__
+
+ def iteritems(self):
+ return self._db.iteritems()
+
+ def _getitem(self, cpv):
+ # we override getitem because it's just a cpickling of the
+ # data handed in.
+ return pickle.loads(self._db[cpv])
+
+ def _setitem(self, cpv, values):
+ self._db[cpv] = pickle.dumps(values, pickle.HIGHEST_PROTOCOL)
+
+ def _delitem(self, cpv):
+ del self._db[cpv]
+
+ def iterkeys(self):
+ return iter(self._db)
+
+ def __contains__(self, cpv):
+ return cpv in self._db
+
+ def __del__(self):
+ if self._db is not None:
+ self._db.sync()
+ self._db.close()
diff --git a/pkgcore/cache/cdb.py b/pkgcore/cache/cdb.py
new file mode 100644
index 0000000..0ad291a
--- /dev/null
+++ b/pkgcore/cache/cdb.py
@@ -0,0 +1,109 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Jason Stubbs <jstubbs@gentoo.org>
+# License: GPL2
+
+"""
+cdb backend
+"""
+
+cdb_module = __import__("cdb")
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+import copy
+import os
+
+from pkgcore.cache import fs_template, errors
+
+
+class database(fs_template.FsBased):
+
+ """cdb cache backend, non autocommiting"""
+
+ autocommits = False
+ cleanse_keys = True
+ serialize_eclasses = False
+
+ def __init__(self, *args, **config):
+ super(database, self).__init__(*args, **config)
+
+ self._db_path = os.path.join(
+ self.location, fs_template.gen_label(self.label) + ".cdb")
+ self._db = None
+ try:
+ self._db = cdb_module.init(self._db_path)
+
+ except cdb_module.error:
+ try:
+ self._ensure_dirs()
+ self._ensure_dirs(self._db_path)
+ self._ensure_access(self._db_path)
+ except (OSError, IOError), e:
+ raise errors.InitializationError(self.__class__, e)
+
+ try:
+ cm = cdb_module.cdbmake(self._db_path, self._db_path+".tmp")
+ cm.finish()
+ self._ensure_access(self._db_path)
+ self._db = cdb_module.init(self._db_path)
+ except cdb_module.error, e:
+ raise errors.InitializationError(self.__class__, e)
+ self._adds = {}
+ self._dels = set()
+ __init__.__doc__ = fs_template.FsBased.__init__.__doc__
+
+ def iteritems(self):
+ self.commit()
+ return iter(self._db.each, None)
+
+ def _getitem(self, cpv):
+ if cpv in self._adds:
+ d = copy.deepcopy(self._adds[cpv])
+ else:
+ d = pickle.loads(self._db[cpv])
+ return d
+
+ def _setitem(self, cpv, values):
+ if cpv in self._dels:
+ del self._dels[cpv]
+ self._adds[cpv] = values
+
+ def _delitem(self, cpv):
+ if cpv in self._adds:
+ del self._adds[cpv]
+ self._dels.add(cpv)
+
+ def commit(self):
+ """commit any outstanding transactions"""
+ if not self._adds and not self._dels:
+ return
+ cm = cdb_module.cdbmake(self._db_path, self._db_path+str(os.getpid()))
+ for (key, value) in iter(self._db.each, None):
+ if key in self._dels:
+ del self._dels[key]
+ continue
+ if key in self._adds:
+ cm.add(key, pickle.dumps(self._adds.pop(key),
+ pickle.HIGHEST_PROTOCOL))
+ else:
+ cm.add(key, value)
+ for (key, value) in self._adds.iteritems():
+ cm.add(key, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
+ cm.finish()
+ self._ensure_access(self._db_path)
+ self._db = cdb_module.init(self._db_path)
+ self._adds = {}
+ self._dels = {}
+
+ def iterkeys(self):
+ self.commit()
+ return iter(self._db.keys())
+
+ def __contains__(self, cpv):
+ return cpv not in self._dels and (cpv in self._adds or cpv in self._db)
+
+ def __del__(self):
+ if getattr(self, "_db", None):
+ self.commit()
diff --git a/pkgcore/cache/errors.py b/pkgcore/cache/errors.py
new file mode 100644
index 0000000..f54c462
--- /dev/null
+++ b/pkgcore/cache/errors.py
@@ -0,0 +1,42 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+cache subsystem exceptions
+"""
+
+class CacheError(Exception):
+ pass
+
+class InitializationError(CacheError):
+ def __init__(self, class_name, error):
+ CacheError.__init__(self, "Creation of instance %s failed due to %s" %
+ (class_name, error))
+ self.error, self.class_name = error, class_name
+
+
+class CacheCorruption(CacheError):
+ def __init__(self, key, ex):
+ CacheError.__init__(self, "%s is corrupt: %s" % (key, ex))
+ self.key, self.ex = key, ex
+
+
+class GeneralCacheCorruption(CacheError):
+ def __init__(self, ex):
+ CacheError.__init__(self, "corruption detected: %s" % (ex,))
+ self.ex = ex
+
+
+class InvalidRestriction(CacheError):
+ def __init__(self, key, restriction, exception=None):
+ if exception is None:
+ exception = ''
+ CacheError.__init__(self, "%s:%s is not valid: %s" %
+ (key, restriction, exception))
+ self.key, self.restriction, self.ex = key, restriction, exception
+
+
+class ReadOnly(CacheError):
+ def __init__(self, info=''):
+ CacheError.__init__(self, "cache is non-modifiable %s" % (info,))
+ self.info = info
diff --git a/pkgcore/cache/flat_hash.py b/pkgcore/cache/flat_hash.py
new file mode 100644
index 0000000..0788243
--- /dev/null
+++ b/pkgcore/cache/flat_hash.py
@@ -0,0 +1,136 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+per key file based backend
+"""
+
+import os, stat, errno
+from pkgcore.cache import fs_template
+from pkgcore.cache import errors
+from pkgcore.config import ConfigHint
+from snakeoil.osutils import join as pjoin, readlines
+
+class database(fs_template.FsBased):
+
+ """
+ stores cache entries in key=value form, stripping newlines
+ """
+
+ # TODO different way of passing in default auxdbkeys and location
+ pkgcore_config_type = ConfigHint(
+ {'readonly': 'bool', 'location': 'str', 'label': 'str',
+ 'auxdbkeys': 'list'},
+ required=['location', 'label'],
+ positional=['location', 'label'],
+ typename='cache')
+
+
+ autocommits = True
+
+ def __init__(self, *args, **config):
+ super(database, self).__init__(*args, **config)
+ self.location = self._format_location()
+
+ if not os.path.exists(self.location):
+ self._ensure_dirs()
+ __init__.__doc__ = fs_template.FsBased.__init__.__doc__
+
+ def _format_location(self):
+ return pjoin(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ def _getitem(self, cpv):
+ path = pjoin(self.location, cpv)
+ try:
+ data = readlines(path, True, True, True)
+ except (IOError, OSError), e:
+ raise errors.CacheCorruption(cpv, e)
+ if data is None:
+ raise KeyError(cpv)
+ try:
+ d = self._parse_data(data, data.mtime)
+ except (OSError, ValueError), e:
+ raise errors.CacheCorruption(cpv, e)
+ return d
+
+ def _parse_data(self, data, mtime):
+ d = self._cdict_kls()
+ known = self._known_keys
+ for x in data:
+ k, v = x.split("=", 1)
+ if k in known:
+ d[k] = v
+
+ if self._mtime_used:
+ d["_mtime_"] = long(mtime)
+ return d
+
+ def _setitem(self, cpv, values):
+ # might seem weird, but we rely on the trailing +1; this
+ # makes it behave properly for any cache depth (including no depth)
+ s = cpv.rfind("/") + 1
+ fp = pjoin(self.location,
+ cpv[:s], ".update.%i.%s" % (os.getpid(), cpv[s:]))
+ try:
+ myf = open(fp, "w", 32768)
+ except IOError, ie:
+ if ie.errno == errno.ENOENT:
+ if not self._ensure_dirs(cpv):
+ raise errors.CacheCorruption(
+ cpv, 'error creating directory for %r' % (fp,))
+ try:
+ myf = open(fp, "w", 32768)
+ except (OSError, IOError), e:
+ raise errors.CacheCorruption(cpv, e)
+ else:
+ raise errors.CacheCorruption(cpv, ie)
+ except OSError, e:
+ raise errors.CacheCorruption(cpv, e)
+
+ for k, v in values.iteritems():
+ if k != "_mtime_":
+ myf.writelines("%s=%s\n" % (k, v))
+
+ myf.close()
+ if self._mtime_used:
+ self._ensure_access(fp, mtime=values["_mtime_"])
+ else:
+ self._ensure_access(fp)
+
+ #update written. now we move it.
+
+ new_fp = pjoin(self.location, cpv)
+ try:
+ os.rename(fp, new_fp)
+ except (OSError, IOError), e:
+ os.remove(fp)
+ raise errors.CacheCorruption(cpv, e)
+
+ def _delitem(self, cpv):
+ try:
+ os.remove(pjoin(self.location, cpv))
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ raise KeyError(cpv)
+ else:
+ raise errors.CacheCorruption(cpv, e)
+
+ def __contains__(self, cpv):
+ return os.path.exists(pjoin(self.location, cpv))
+
+ def iterkeys(self):
+ """generator for walking the dir struct"""
+ dirs = [self.location]
+ len_base = len(self.location)
+ while dirs:
+ d = dirs.pop(0)
+ for l in os.listdir(d):
+ if l.endswith(".cpickle"):
+ continue
+ p = pjoin(d, l)
+ st = os.lstat(p)
+ if stat.S_ISDIR(st.st_mode):
+ dirs.append(p)
+ continue
+ yield p[len_base+1:]
diff --git a/pkgcore/cache/fs_template.py b/pkgcore/cache/fs_template.py
new file mode 100644
index 0000000..209fe59
--- /dev/null
+++ b/pkgcore/cache/fs_template.py
@@ -0,0 +1,80 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+template for fs based backends
+"""
+
+import os
+from pkgcore.cache import template
+from pkgcore.os_data import portage_gid
+from snakeoil.osutils import ensure_dirs
+
+class FsBased(template.database):
+ """Template wrapping fs needed options.
+
+ Provides _ensure_access as a way to attempt to ensure files have
+ the specified owners/perms.
+ """
+
+ def __init__(self, *args, **config):
+ """
+ throws InitializationError if needs args aren't specified
+
+ @keyword gid: defaults to L{pkgcore.os_data.portage_gid},
+ gid to force all entries to
+ @keyword perms: defaults to 0665, mode to force all entries to"""
+
+ for x, y in (("gid", portage_gid), ("perms", 0664)):
+ if x in config:
+ setattr(self, "_"+x, config[x])
+ del config[x]
+ else:
+ setattr(self, "_"+x, y)
+ super(FsBased, self).__init__(*args, **config)
+
+ if self.label.startswith(os.path.sep):
+ # normpath.
+ self.label = os.path.sep + os.path.normpath(
+ self.label).lstrip(os.path.sep)
+
+ self._mtime_used = "_mtime_" in self._known_keys
+
+ __init__.__doc__ = "\n".join(
+ x.lstrip() for x in __init__.__doc__.split("\n") + [
+ y.lstrip().replace("@param", "@keyword")
+ for y in template.database.__init__.__doc__.split("\n")
+ if "@param" in y])
+
+ def _ensure_access(self, path, mtime=-1):
+ """Ensure access to a path.
+
+ @param mtime: if specified change mtime to this value.
+ @return: C{False} if unable to guarantee access, C{True} otherwise.
+ """
+ try:
+ os.chown(path, -1, self._gid)
+ os.chmod(path, self._perms)
+ if mtime:
+ mtime = long(mtime)
+ os.utime(path, (mtime, mtime))
+ except (OSError, IOError):
+ return False
+ return True
+
+ def _ensure_dirs(self, path=None):
+ """Make sure a path relative to C{self.location} exists."""
+ if path is not None:
+ path = self.location + os.path.sep + os.path.dirname(path)
+ else:
+ path = self.location
+ return ensure_dirs(path, mode=0775, minimal=False)
+
+def gen_label(label):
+ """Turn a user-defined label into something usable as a filename."""
+ if label.find(os.path.sep) == -1:
+ return label
+ label = label.strip("\"").strip("'")
+ label = os.path.join(*(label.rstrip(os.path.sep).split(os.path.sep)))
+ tail = os.path.split(label)[1]
+ return "%s-%X" % (tail, abs(label.__hash__()))
diff --git a/pkgcore/cache/metadata.py b/pkgcore/cache/metadata.py
new file mode 100644
index 0000000..22db70c
--- /dev/null
+++ b/pkgcore/cache/metadata.py
@@ -0,0 +1,201 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""
+cache backend designed for rsynced tree's pregenerated metadata.
+"""
+
+import os
+import errno
+from pkgcore.cache import flat_hash, errors
+from pkgcore.config import ConfigHint
+from pkgcore.ebuild import eclass_cache
+from snakeoil.osutils import join as pjoin
+from snakeoil.mappings import ProtectedDict
+
+
+# store the current key order *here*.
+class database(flat_hash.database):
+ """
+ Compatibility with (older) portage-generated caches.
+
+ Autodetects per entry if it is a
+ L{flat_list<pkgcore.cache.flat_hash.database>} and flat_list
+ backends entry, and converts old (and incomplete) INHERITED field
+ to _eclasses_ as required.
+ """
+ complete_eclass_entries = False
+
+ auxdbkeys_order = ('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
+ 'PDEPEND', 'PROVIDE', 'EAPI')
+
+ # this is the old cache format, flat_list. hardcoded, and must
+ # remain that way.
+ magic_line_count = 22
+
+ autocommits = True
+
+ def __init__(self, location, *args, **config):
+ self.base_loc = location
+ super(database, self).__init__(location, *args, **config)
+ self.ec = eclass_cache.cache(pjoin(self.base_loc, "eclass"),
+ self.base_loc)
+ self.hardcoded_auxdbkeys_order = tuple((idx, key)
+ for idx, key in enumerate(self.auxdbkeys_order)
+ if key in self._known_keys)
+
+ __init__.__doc__ = flat_hash.database.__init__.__doc__.replace(
+ "@keyword location", "@param location")
+
+
+ def _format_location(self):
+ return pjoin(self.location, "metadata", "cache")
+
+ def __getitem__(self, cpv):
+ d = flat_hash.database.__getitem__(self, cpv)
+
+ if "_eclasses_" not in d:
+ if "INHERITED" in d:
+ d["_eclasses_"] = self.ec.get_eclass_data(
+ d["INHERITED"].split())
+ del d["INHERITED"]
+ else:
+ d["_eclasses_"] = self.reconstruct_eclasses(cpv, d["_eclasses_"])
+
+ return d
+
+ def _parse_data(self, data, mtime):
+ data = list(data)
+ if len(data) != self.magic_line_count:
+ raise errors.GeneralCacheCorruption(
+ "wrong line count, requires %i, got %i" %
+ (self.magic_line_count, len(data)))
+
+ # this one's interesting.
+ d = self._cdict_kls()
+ for idx, key in self.hardcoded_auxdbkeys_order:
+ d[key] = data[idx].strip()
+
+ if self._mtime_used:
+ d["_mtime_"] = mtime
+ return d
+
+ def _setitem(self, cpv, values):
+ values = ProtectedDict(values)
+
+ # hack. proper solution is to make this a __setitem__ override, since
+ # template.__setitem__ serializes _eclasses_, then we reconstruct it.
+ eclasses = values.pop('_eclasses_', None)
+ if eclasses is not None:
+ eclasses = self.reconstruct_eclasses(cpv, eclasses)
+ values["INHERITED"] = ' '.join(eclasses)
+
+ s = cpv.rfind("/")
+ fp = pjoin(
+ self.location, cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try:
+ myf = open(fp, "w")
+ except (OSError, IOError), e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf = open(fp,"w")
+ except (OSError, IOError),e:
+ raise errors.CacheCorruption(cpv, e)
+ else:
+ raise errors.CacheCorruption(cpv, e)
+
+ count = 0
+ for idx, key in self.hardcoded_auxdbkeys_order:
+ myf.write("%s%s" % ("\n" * (idx - count), values.get(key, "")))
+ count = idx
+ myf.write("\n" * (self.magic_line_count - count))
+
+ myf.close()
+ self._set_mtime(fp, values, eclasses)
+
+ #update written. now we move it.
+ new_fp = pjoin(self.location, cpv)
+ try:
+ os.rename(fp, new_fp)
+ except (OSError, IOError), e:
+ os.remove(fp)
+ raise errors.CacheCorruption(cpv, e)
+
+ def _set_mtime(self, fp, values, eclasses):
+ if self._mtime_used:
+ self._ensure_access(fp, mtime=values["_mtime_"])
+
+
+class paludis_flat_list(database):
+
+ """
+ (Hopefully) write a paludis specific form of flat_list format cache.
+ Not very well tested.
+
+ difference from a normal flat_list cache is that mtime is set to ebuild
+ for normal, for paludis it's max mtime of eclasses/ebuild involved.
+ """
+
+ pkgcore_config_type = ConfigHint(
+ {'readonly': 'bool', 'location': 'str', 'label': 'str'},
+ required=['location', 'label'],
+ positional=['location', 'label'],
+ typename='cache')
+
+ def __init__(self, location, *args, **config):
+ config['auxdbkeys'] = self.auxdbkeys_order
+ database.__init__(self, location, *args, **config)
+
+ def _set_mtime(self, fp, values, eclasses):
+ mtime = values.get("_mtime_", 0)
+
+ if eclasses:
+ self._ensure_access(
+ fp,
+ mtime=max(max(mtime for path, mtime in eclasses.itervalues()),
+ mtime))
+ else:
+ self._ensure_access(fp, mtime)
+
+
+class protective_database(database):
+
+ def _parse_data(self, data, mtime):
+ # easy attempt first.
+ data = list(data)
+ if len(data) != self.magic_line_count:
+ return flat_hash.database._parse_data(self, data, mtime)
+
+ # this one's interesting.
+ d = self._cdict_kls()
+
+ for line in data:
+ # yes, meant to iterate over a string.
+ hashed = False
+ for idx, c in enumerate(line):
+ if not c.isalpha():
+ if c == "=" and idx > 0:
+ hashed = True
+ d[line[:idx]] = line[idx + 1:]
+ elif c == "_" or c.isdigit():
+ continue
+ break
+ elif not c.isupper():
+ break
+
+ if not hashed:
+ # non hashed.
+ d.clear()
+ for idx, key in self.hardcoded_auxdbkeys_order:
+ d[key] = data[idx].strip()
+ break
+
+ if self._mtime_used:
+ d["_mtime_"] = mtime
+ return d
+
+
diff --git a/pkgcore/cache/sql_template.py b/pkgcore/cache/sql_template.py
new file mode 100644
index 0000000..ad8a709
--- /dev/null
+++ b/pkgcore/cache/sql_template.py
@@ -0,0 +1,327 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+template for sql backends- needs work
+"""
+
+from pkgcore.cache import template, errors
+
+class SQLDatabase(template.database):
+ """template class for RDBM based caches
+
+ This class is designed such that derivatives don't have to change
+ much code, mostly constant strings.
+
+ _BaseError must be an exception class that all Exceptions thrown
+ from the derived RDBMS are derived from.
+
+ SCHEMA_INSERT_CPV_INTO_PACKAGE should be modified dependant on the
+ RDBMS, as should SCHEMA_PACKAGE_CREATE- basically you need to deal
+ with creation of a unique pkgid. If the dbapi2 rdbms class has a
+ method of recovering that id, then modify _insert_cpv to remove
+ the extra select.
+
+ Creation of a derived class involves supplying _initdb_con, and
+ table_exists. Additionally, the default schemas may have to be
+ modified.
+ """
+
+ SCHEMA_PACKAGE_NAME = "package_cache"
+ SCHEMA_PACKAGE_CREATE = (
+ "CREATE TABLE %s ( "
+ "pkgid INTEGER PRIMARY KEY, label VARCHAR(255), cpv VARCHAR(255), "
+ "UNIQUE(label, cpv))" % SCHEMA_PACKAGE_NAME)
+ SCHEMA_PACKAGE_DROP = "DROP TABLE %s" % SCHEMA_PACKAGE_NAME
+
+ SCHEMA_VALUES_NAME = "values_cache"
+ SCHEMA_VALUES_CREATE = (
+ "CREATE TABLE %s ( "
+ "pkgid integer references %s (pkgid) on delete cascade, "
+ "key varchar(255), value text, UNIQUE(pkgid, key))" % (
+ SCHEMA_VALUES_NAME, SCHEMA_PACKAGE_NAME))
+ SCHEMA_VALUES_DROP = "DROP TABLE %s" % SCHEMA_VALUES_NAME
+ SCHEMA_INSERT_CPV_INTO_PACKAGE = (
+ "INSERT INTO %s (label, cpv) VALUES(%%s, %%s)" % SCHEMA_PACKAGE_NAME)
+
+ _BaseError = ()
+ _dbClass = None
+
+ autocommits = False
+# cleanse_keys = True
+
+ # boolean indicating if the derived RDBMS class supports replace syntax
+ _supports_replace = False
+
+ def __init__(self, location, label, auxdbkeys, *args, **config):
+ """initialize the instance.
+ derived classes shouldn't need to override this"""
+
+ self.db = None
+ super(SQLDatabase, self).__init__(location, label, auxdbkeys,
+ *args, **config)
+
+ config.setdefault("host","127.0.0.1")
+ config.setdefault("autocommit", self.autocommits)
+ self._initdb_con(config)
+
+ self.label = self._sfilter(self.label)
+
+ def _dbconnect(self, config):
+ """Initialize the internal db connection and cursor.
+
+ Should be overridden if the derived class needs special
+ parameters for initializing the db connection or cursor.
+ """
+ self.db = self._dbClass(**config)
+ self.con = self.db.cursor()
+
+ def _initdb_con(self, config):
+ """ensure needed tables are in place.
+
+ If the derived class needs a different set of table creation
+ commands, overload the approriate SCHEMA_ attributes. If it
+ needs additional execution beyond that, override this.
+ """
+
+ self._dbconnect(config)
+ if not self._table_exists(self.SCHEMA_PACKAGE_NAME):
+ if self.readonly:
+ raise errors.ReadOnly(
+ "table %s doesn't exist" % self.SCHEMA_PACKAGE_NAME)
+ try:
+ self.con.execute(self.SCHEMA_PACKAGE_CREATE)
+ except self._BaseError, e:
+ raise errors.InitializationError(self.__class__, e)
+
+ if not self._table_exists(self.SCHEMA_VALUES_NAME):
+ if self.readonly:
+ raise errors.ReadOnly(
+ "table %s doesn't exist" % self.SCHEMA_VALUES_NAME)
+ try:
+ self.con.execute(self.SCHEMA_VALUES_CREATE)
+ except self._BaseError, e:
+ raise errors.InitializationError(self.__class__, e)
+
+ def _table_exists(self, tbl):
+ """return true if a table exists
+ derived classes must override this"""
+ raise NotImplementedError
+
+ def _sfilter(self, string):
+ """meta escaping, returns quoted string for use in sql statements"""
+ return "\"%s\"" % string.replace("\\","\\\\").replace("\"","\\\"")
+
+ def _getitem(self, cpv):
+ try:
+ self.con.execute(
+ "SELECT key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s AND cpv=%s" % (
+ self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label, self._sfilter(cpv)))
+ except self._BaseError, e:
+ raise errors.CacheCorruption(self, cpv, e)
+
+ rows = self.con.fetchall()
+
+ if not rows:
+ raise KeyError(cpv)
+
+ vals = dict((k,"") for k in self._known_keys)
+ vals.update(dict(rows))
+ return vals
+
+ def _delitem(self, cpv):
+ """delete a cpv cache entry
+ derived RDBM classes for this *must* either support cascaded deletes, or
+ override this method
+ """
+
+ try:
+ try:
+ self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ if self.autocommits:
+ self.commit()
+ except self._BaseError, e:
+ raise errors.CacheCorruption(self, cpv, e)
+ if self.con.rowcount <= 0:
+ raise KeyError(cpv)
+ except Exception:
+ if not self.autocommits:
+ self.db.rollback()
+ # yes, this can roll back a lot more then just the delete. deal.
+ raise
+
+ def __del__(self):
+ if self.db is not None:
+ self.commit()
+ self.db.close()
+
+ def _setitem(self, cpv, values):
+ try:
+ # insert.
+ try:
+ pkgid = self._insert_cpv(cpv)
+ except self._BaseError, e:
+ raise errors.CacheCorruption(cpv, e)
+
+ # __getitem__ fills out missing values,
+ # so we store only what's handed to us and is a known key
+ db_values = []
+ for key in self._known_keys:
+ if values.get(key, "") != "":
+ db_values.append({"key":key, "value":values[key]})
+
+ if db_values:
+ try:
+ self.con.executemany(
+ "INSERT INTO %s (pkgid, key, value) "
+ "VALUES(\"%s\", %%(key)s, %%(value)s)" %
+ (self.SCHEMA_VALUES_NAME, str(pkgid)), db_values)
+ except self._BaseError, e:
+ raise errors.CacheCorruption(cpv, e)
+ if self.autocommits:
+ self.commit()
+
+ except Exception:
+ if not self.autocommits:
+ try:
+ self.db.rollback()
+ except self._BaseError:
+ pass
+ raise
+
+
+ def _insert_cpv(self, cpv):
+ """Insert a cpv in the db.
+
+ Uses SCHEMA_INSERT_CPV_INTO_PACKAGE, which must be overloaded
+ if the table definition doesn't support auto-increment columns
+ for pkgid.
+
+ @return: the cpvs new pkgid
+
+ note this doesn't commit the transaction. The caller is expected to.
+ """
+
+ cpv = self._sfilter(cpv)
+ if self._supports_replace:
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace(
+ "INSERT", "REPLACE", 1)
+ else:
+ # just delete it.
+ try:
+ del self[cpv]
+ except (errors.CacheCorruption, KeyError):
+ pass
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE
+
+ try:
+ self.con.execute(query_str % (self.label, cpv))
+ except self._BaseError:
+ self.db.rollback()
+ raise
+
+ self.con.execute("SELECT pkgid FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, cpv))
+
+ if self.con.rowcount != 1:
+ raise errors.CacheCorruption(
+ cpv, "Tried to insert the cpv, but found "
+ " %i matches upon the following select!" % (
+ self.con.rowcount))
+ return self.con.fetchone()[0]
+
+ def __contains__(self, cpv):
+ if not self.autocommits:
+ try:
+ self.commit()
+ except self._BaseError, e:
+ raise errors.GeneralCacheCorruption(e)
+
+ try:
+ self.con.execute("SELECT cpv FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ except self._BaseError, e:
+ raise errors.GeneralCacheCorruption(e)
+ return self.con.rowcount > 0
+
+ def iterkeys(self):
+ if not self.autocommits:
+ try:
+ self.commit()
+ except self._BaseError, e:
+ raise errors.GeneralCacheCorruption(e)
+
+ try:
+ self.con.execute("SELECT cpv FROM %s WHERE label=%s" %
+ (self.SCHEMA_PACKAGE_NAME, self.label))
+ except self._BaseError, e:
+ raise errors.GeneralCacheCorruption(e)
+
+ for x in self.con.fetchall():
+ yield x[0]
+
+ def iteritems(self):
+ try:
+ self.con.execute(
+ "SELECT cpv, key, value FROM %s NATURAL JOIN %s WHERE label=%s"
+ % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label))
+ except self._BaseError, e:
+ # XXX this makes no sense
+ raise errors.CacheCorruption(self, 'iteritems', e)
+
+ oldcpv = None
+ l = []
+ for cpv, key, v in self.con.fetchall():
+ if oldcpv != cpv:
+ if oldcpv is not None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = self.reconstruct_eclasses(
+ oldcpv, d["_eclasses_"])
+ yield oldcpv, d
+ l = []
+ oldcpv = cpv
+ l.append((key, v))
+
+ if oldcpv is not None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = self.reconstruct_eclasses(
+ oldcpv, d["_eclasses_"])
+ yield oldcpv, d
+
+ def commit(self):
+ self.db.commit()
+
+ def get_matches(self, match_dict):
+ query_list = []
+ for k, v in match_dict.items():
+ if k not in self._known_keys:
+ raise errors.InvalidRestriction(
+ k, v, "key isn't known to this cache instance")
+ v = v.replace("%","\\%")
+ v = v.replace(".*","%")
+ query_list.append(
+ "(key=%s AND value LIKE %s)" % (
+ self._sfilter(k), self._sfilter(v)))
+
+ if query_list:
+ query = " AND "+" AND ".join(query_list)
+ else:
+ query = ''
+
+ print (
+ "query = SELECT cpv from package_cache natural join values_cache "
+ "WHERE label=%s %s" % (self.label, query))
+ try:
+ self.con.execute(
+ "SELECT cpv from package_cache natural join values_cache "
+ "WHERE label=%s %s" % (self.label, query))
+ except self._BaseError, e:
+ raise errors.GeneralCacheCorruption(e)
+
+ return [ row[0] for row in self.con.fetchall() ]
diff --git a/pkgcore/cache/sqlite.py b/pkgcore/cache/sqlite.py
new file mode 100644
index 0000000..dbcabbc
--- /dev/null
+++ b/pkgcore/cache/sqlite.py
@@ -0,0 +1,80 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+pysqlite <v2 backend
+"""
+
+import os
+from pkgcore.cache import sql_template, fs_template, errors
+sqlite_module = __import__("sqlite")
+
+class database(fs_template.FsBased, sql_template.SQLDatabase):
+
+ SCHEMA_DELETE_NAME = "delete_package_values"
+ SCHEMA_DELETE_TRIGGER = """CREATE TRIGGER %s AFTER DELETE on %s
+ begin
+ DELETE FROM %s WHERE pkgid=old.pkgid;
+ end;""" % (SCHEMA_DELETE_NAME, sql_template.SQLDatabase.SCHEMA_PACKAGE_NAME,
+ sql_template.SQLDatabase.SCHEMA_VALUES_NAME)
+
+ _BaseError = sqlite_module.Error
+ _dbClass = sqlite_module
+ _supports_replace = True
+
+ def _dbconnect(self, config):
+ self._dbpath = os.path.join(
+ self.location, fs_template.gen_label(self.label) + ".sqldb")
+ try:
+ self.db = sqlite_module.connect(
+ self._dbpath, mode=self._perms, autocommit=False)
+ if not self._ensure_access(self._dbpath):
+ raise errors.InitializationError(
+ self.__class__, "can't ensure perms on %s" % self._dbpath)
+ self.con = self.db.cursor()
+ except self._BaseError, e:
+ raise errors.InitializationError(self.__class__, e)
+
+ def _initdb_con(self, config):
+ sql_template.SQLDatabase._initdb_con(self, config)
+ try:
+ self.con.execute(
+ "SELECT name FROM sqlite_master "
+ "WHERE type=\"trigger\" AND name=%s" %
+ self._sfilter(self.SCHEMA_DELETE_NAME))
+ if self.con.rowcount == 0:
+ self.con.execute(self.SCHEMA_DELETE_TRIGGER)
+ self.db.commit()
+ except self._BaseError, e:
+ raise errors.InitializationError(self.__class__, e)
+
+ def _table_exists(self, tbl):
+ """return true/false dependant on a tbl existing"""
+ try:
+ self.con.execute(
+ "SELECT name FROM sqlite_master "
+ "WHERE type=\"table\" AND name=%s" %
+ self._sfilter(tbl))
+ except self._BaseError:
+ # XXX crappy.
+ return False
+ return len(self.con.fetchall()) == 1
+
+ # we can do it minus a query via rowid.
+ def _insert_cpv(self, cpv):
+ cpv = self._sfilter(cpv)
+ try:
+ self.con.execute(
+ self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace(
+ "INSERT", "REPLACE", 1) %
+ (self.label, cpv))
+ except self._BaseError, e:
+ raise errors.CacheCorruption(
+ cpv, "tried to insert a cpv, but failed: %s" % str(e))
+
+ # sums the delete also
+ if self.con.rowcount <= 0 or self.con.rowcount > 2:
+ raise errors.CacheCorruption(
+ cpv, "tried to insert a cpv, but failed- %i rows modified" %
+ self.rowcount)
+ return self.con.lastrowid
diff --git a/pkgcore/cache/template.py b/pkgcore/cache/template.py
new file mode 100644
index 0000000..a1ac94e
--- /dev/null
+++ b/pkgcore/cache/template.py
@@ -0,0 +1,236 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# Author(s): Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+template for cache backend classes
+"""
+
+from pkgcore.cache import errors
+from snakeoil.mappings import ProtectedDict
+from snakeoil.obj import make_SlottedDict_kls
+
+# temp hack for .2
+from pkgcore.ebuild.const import metadata_keys
+metadata_keys = tuple(metadata_keys)
+
+class database(object):
+ # this is for metadata/cache transfer.
+ # basically flags the cache needs be updated when transfered cache to cache.
+ # leave this.
+
+ """
+ @ivar autocommits: Controls whether the template commits every update,
+ or queues up updates.
+ @ivar complete_eclass_entries: Specifies if the cache backend stores full
+ eclass data, or partial.
+ @ivar cleanse_keys: Boolean controlling whether the template should drop
+ empty keys for storing.
+ @ivar serialize_eclasses: Boolean controlling whether the template should
+ serialize eclass data itself, or leave it to the derivative.
+ """
+
+ complete_eclass_entries = True
+ autocommits = False
+ cleanse_keys = False
+ serialize_eclasses = True
+
+ def __init__(self, location, label, auxdbkeys=metadata_keys,
+ readonly=False):
+ """
+ initialize the derived class; specifically, store label/keys
+
+ @param location: fs location the cache is stored at
+ @param label: cache label
+ @param auxdbkeys: sequence of allowed keys for each cache entry
+ @param readonly: defaults to False,
+ controls whether the cache is mutable.
+ """
+ self._known_keys = frozenset(auxdbkeys)
+ self._cdict_kls = make_SlottedDict_kls(self._known_keys)
+ self.location = location
+ self.label = label
+ self.readonly = readonly
+ self.sync_rate = 0
+ self.updates = 0
+
+ def __getitem__(self, cpv):
+ """set a cpv to values
+
+ This shouldn't be overriden in derived classes since it
+ handles the __eclasses__ conversion. That said, if the class
+ handles it, they can override it.
+ """
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+ d = self._getitem(cpv)
+ if self.serialize_eclasses and "_eclasses_" in d:
+ d["_eclasses_"] = self.reconstruct_eclasses(cpv, d["_eclasses_"])
+ return d
+
+ def _getitem(self, cpv):
+ """get cpv's values.
+
+ override this in derived classess.
+ """
+ raise NotImplementedError
+
+ def __setitem__(self, cpv, values):
+ """set a cpv to values
+
+ This shouldn't be overriden in derived classes since it
+ handles the readonly checks.
+ """
+ if self.readonly:
+ raise errors.ReadOnly()
+ if self.cleanse_keys:
+ d = ProtectedDict(values)
+ for k in d.iterkeys():
+ if not d[k]:
+ del d[k]
+ if self.serialize_eclasses and "_eclasses_" in values:
+ d["_eclasses_"] = self.deconstruct_eclasses(d["_eclasses_"])
+ elif self.serialize_eclasses and "_eclasses_" in values:
+ d = ProtectedDict(values)
+ d["_eclasses_"] = self.deconstruct_eclasses(d["_eclasses_"])
+ else:
+ d = values
+ self._setitem(cpv, d)
+ if not self.autocommits:
+ self.updates += 1
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _setitem(self, name, values):
+ """__setitem__ calls this after readonly checks.
+
+ override it in derived classes.
+ note _eclasses_ key *must* be handled.
+ """
+ raise NotImplementedError
+
+ def __delitem__(self, cpv):
+ """delete a key from the cache.
+
+ This shouldn't be overriden in derived classes since it
+ handles the readonly checks.
+ """
+ if self.readonly:
+ raise errors.ReadOnly()
+ if not self.autocommits:
+ self.updates += 1
+ self._delitem(cpv)
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _delitem(self, cpv):
+ """__delitem__ calls this after readonly checks.
+
+ override it in derived classes.
+ """
+ raise NotImplementedError
+
+ def __contains__(self, cpv):
+ raise NotImplementedError
+
+ def has_key(self, cpv):
+ return cpv in self
+
+ def keys(self):
+ return list(self.iterkeys())
+
+ def iterkeys(self):
+ raise NotImplementedError
+
+ def iteritems(self):
+ for x in self.iterkeys():
+ yield (x, self[x])
+
+ def items(self):
+ return list(self.iteritems())
+
+ def sync(self, rate=0):
+ self.sync_rate = rate
+ if rate == 0:
+ self.commit()
+
+ def commit(self):
+ if not self.autocommits:
+ raise NotImplementedError
+
+ def get_matches(self, match_dict):
+ """
+ Generic function for walking the entire cache db, matching
+ restrictions to filter what cpv's are returned.
+
+ Derived classes should override this if they can implement a
+ faster method then pulling each cpv:values, and checking it.
+ For example, RDBMS derived classes should push the matching
+ logic down to the actual RDBM.
+ """
+
+ import re
+ restricts = {}
+ for key, match in match_dict.iteritems():
+ # XXX this sucks.
+ try:
+ if isinstance(match, str):
+ restricts[key] = re.compile(match).match
+ else:
+ restricts[key] = re.compile(match[0], match[1]).match
+ except re.error, e:
+ raise errors.InvalidRestriction(key, match, e)
+ if key not in self._known_keys:
+ raise errors.InvalidRestriction(key, match,
+ "Key isn't valid")
+
+ for cpv, vals in self.iteritems():
+ for key, match in restricts.iteritems():
+ if not match(vals[key]):
+ break
+ else:
+ yield cpv
+
+ @staticmethod
+ def deconstruct_eclasses(eclass_dict):
+ """takes a dict, returns a string representing said dict"""
+ return "\t".join(
+ "%s\t%s\t%s" % (k, v[0], v[1])
+ for k, v in eclass_dict.iteritems())
+
+ @staticmethod
+ def reconstruct_eclasses(cpv, eclass_string):
+ """Turn a string from L{serialize_eclasses} into a dict."""
+ if not isinstance(eclass_string, basestring):
+ raise TypeError("eclass_string must be basestring, got %r" %
+ eclass_string)
+ eclasses = eclass_string.strip().split("\t")
+ if eclasses == [""]:
+ # occasionally this occurs in the fs backends. they suck.
+ return {}
+
+ l = len(eclasses)
+ if not l % 3:
+ paths = True
+ elif not l % 2:
+ # edge case of a multiple of 6
+ paths = not eclasses[1].isdigit()
+ else:
+ raise errors.CacheCorruption(
+ cpv, "_eclasses_ was of invalid len %i"
+ "(must be mod 3 or mod 2)" % len(eclasses))
+ d = {}
+ try:
+ if paths:
+ for x in xrange(0, len(eclasses), 3):
+ d[eclasses[x]] = (eclasses[x + 1], long(eclasses[x + 2]))
+ else:
+ for x in xrange(0, len(eclasses), 2):
+ d[eclasses[x]] = ('', long(eclasses[x + 1]))
+ except ValueError:
+ raise errors.CacheCorruption(
+ cpv, 'ValueError reading %r' % (eclass_string,))
+ return d
diff --git a/pkgcore/cache/util.py b/pkgcore/cache/util.py
new file mode 100644
index 0000000..fb00adb
--- /dev/null
+++ b/pkgcore/cache/util.py
@@ -0,0 +1,118 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+cache backend utilities
+"""
+
+from pkgcore.cache import errors
+
+def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, eclass_cache=None,
+ verbose_instance=None):
+ """
+ make a cache backend a mirror of another
+
+ @param valid_nodes_iterable: valid keys
+ @param src_cache: L{pkgcore.cache.template.database} instance
+ to copy keys from
+ @param trg_cache: L{pkgcore.cache.template.database} instance
+ to write keys to
+ @param eclass_cache: if doing eclass_cache translation,
+ a L{pkgcore.ebuild.eclass_cache.cache} instance to use, else None
+ @param verbose_instance: either None (defaulting to L{quiet_mirroring}),
+ or a L{quiet_mirroring} derivative
+ """
+
+ if not src_cache.complete_eclass_entries and not eclass_cache:
+ raise Exception(
+ "eclass_cache required for cache's of class %s!" %
+ src_cache.__class__)
+
+ if verbose_instance is None:
+ noise = quiet_mirroring()
+ else:
+ noise = verbose_instance
+
+ dead_nodes = set(trg_cache.iterkeys())
+ count = 0
+
+ if not trg_cache.autocommits:
+ trg_cache.sync(100)
+
+ for x in valid_nodes_iterable:
+ count += 1
+ if x in dead_nodes:
+ dead_nodes.remove(x)
+ try:
+ entry = src_cache[x]
+ except KeyError:
+ noise.missing_entry(x)
+ continue
+ if entry.get("INHERITED",""):
+ if src_cache.complete_eclass_entries:
+ if not "_eclasses_" in entry:
+ noise.corruption(x,"missing _eclasses_ field")
+ continue
+ if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]):
+ noise.eclass_stale(x)
+ continue
+ else:
+ entry["_eclasses_"] = eclass_cache.get_eclass_data(
+ entry["INHERITED"].split(), from_master_only=True)
+ if not entry["_eclasses_"]:
+ noise.eclass_stale(x)
+ continue
+
+ # by this time, if it reaches here, the eclass has been
+ # validated, and the entry has been updated/translated (if
+ # needs be, for metadata/cache mainly)
+ try:
+ trg_cache[x] = entry
+ except errors.CacheError, ce:
+ noise.exception(x, ce)
+ del ce
+ continue
+
+ if count >= noise.call_update_min:
+ noise.update(x)
+ count = 0
+
+ if not trg_cache.autocommits:
+ trg_cache.commit()
+
+ # ok. by this time, the trg_cache is up to date, and we have a
+ # dict with a crapload of cpv's. we now walk the target db,
+ # removing stuff if it's in the list.
+ for key in dead_nodes:
+ try:
+ del trg_cache[key]
+ except errors.CacheError, ce:
+ noise.exception(ce)
+ del ce
+
+
+# "More than one statement on a single line"
+# pylint: disable-msg=C0321
+
+class quiet_mirroring(object):
+ """Quiet mirror_cache callback object for getting progress information."""
+ # call_update_every is used by mirror_cache to determine how often
+ # to call in. quiet defaults to 2^24 -1. Don't call update, 'cept
+ # once every 16 million or so :)
+ call_update_min = 0xffffff
+ def update(self, key, *arg): pass
+ def exception(self, key, *arg): pass
+ def eclass_stale(self, *arg): pass
+ def missing_entry(self, key): pass
+ def misc(self, key, *arg): pass
+ def corruption(self, key, s): pass
+
+class non_quiet_mirroring(quiet_mirroring):
+ """prints to stdout each step in cache mirroring"""
+
+ call_update_min = 1
+ def update(self, key, *arg): print "processed", key
+ def exception(self, key, *arg): print "exec", key, arg
+ def missing(self, key): print "key %s is missing", key
+ def corruption(self, key, *arg): print "corrupt %s:" % key, arg
+ def eclass_stale(self, key, *arg): print "stale %s:" % key, arg
diff --git a/pkgcore/chksum/__init__.py b/pkgcore/chksum/__init__.py
new file mode 100644
index 0000000..3978273
--- /dev/null
+++ b/pkgcore/chksum/__init__.py
@@ -0,0 +1,116 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+chksum verification/generation subsystem
+"""
+
+from pkgcore.interfaces.data_source import base as base_data_source
+from snakeoil.demandload import demandload
+demandload(globals(), "os",
+ "sys",
+ "pkgcore.log:logger",
+ "pkgcore.chksum.defaults:loop_over_file",
+ "snakeoil.modules:load_module",
+ "snakeoil.osutils:listdir_files",
+)
+
+chksum_types = {}
+__inited__ = False
+
+def get_handler(requested):
+
+ """
+ get a chksum handler
+
+ @raise KeyError: if chksum type has no registered handler
+ @return: chksum handler (callable)
+ """
+
+ if not __inited__:
+ init()
+ if requested not in chksum_types:
+ raise KeyError("no handler for %s" % requested)
+ return chksum_types[requested]
+
+
+def get_handlers(requested=None):
+
+ """
+ get chksum handlers
+
+ @param requested: None (all handlers), or a sequence of the specific
+ handlers desired.
+ @raise KeyError: if requested chksum type has no registered handler
+ @return: dict of chksum_type:chksum handler
+ """
+
+ if requested is None:
+ if not __inited__:
+ init()
+ return dict(chksum_types)
+ d = {}
+ for x in requested:
+ d[x] = get_handler(x)
+ return d
+
+
+def init(additional_handlers=None):
+
+ """
+ init the chksum subsystem.
+
+ Scan the dir, find what handlers are available, etc.
+
+ @param additional_handlers: None, or pass in a dict of type:func
+ """
+
+ global __inited__
+
+ if additional_handlers is not None and not isinstance(
+ additional_handlers, dict):
+ raise TypeError("additional handlers must be a dict!")
+
+ chksum_types.clear()
+ __inited__ = False
+ loc = os.path.dirname(sys.modules[__name__].__file__)
+ for f in listdir_files(loc):
+ if not f.endswith(".py") or f.startswith("__init__."):
+ continue
+ try:
+ i = f.find(".")
+ if i != -1:
+ f = f[:i]
+ del i
+ m = load_module(__name__+"."+f)
+ except ImportError:
+ continue
+ try:
+ types = getattr(m, "chksum_types")
+ except AttributeError:
+ # no go.
+ continue
+ try:
+ chksum_types.update(types)
+
+ except ValueError:
+ logger.warn(
+ "%s.%s invalid chksum_types, ValueError Exception" % (
+ __name__, f))
+ continue
+
+ if additional_handlers is not None:
+ chksum_types.update(additional_handlers)
+
+ __inited__ = True
+
+
+def get_chksums(location, *chksums):
+ """
+ run multiple chksumers over a data_source/file path
+ """
+ handlers = get_handlers(chksums)
+ # try to hand off to the per file handler, may be faster.
+ if len(chksums) == 1:
+ return [handlers[chksums[0]](location)]
+ return loop_over_file(location, *[handlers[k].new() for k in chksums])
diff --git a/pkgcore/chksum/defaults.py b/pkgcore/chksum/defaults.py
new file mode 100644
index 0000000..57f0540
--- /dev/null
+++ b/pkgcore/chksum/defaults.py
@@ -0,0 +1,288 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+
+"""
+default chksum handlers implementation- sha1, sha256, rmd160, and md5
+"""
+from pkgcore.interfaces.data_source import base as base_data_source
+from snakeoil.currying import partial
+from snakeoil import modules
+from snakeoil.demandload import demandload
+demandload(globals(), "os")
+
+blocksize = 32768
+
+sha1_size = 40
+md5_size = 32
+rmd160_size = 40
+sha256_size = 64
+
+def loop_over_file(filename, *objs):
+ if isinstance(filename, base_data_source):
+ if filename.get_path is not None:
+ filename = filename.get_path()
+ else:
+ filename = filename.get_fileobj()
+ wipeit = False
+ if isinstance(filename, basestring):
+ wipeit = True
+ f = open(filename, 'rb', blocksize * 2)
+ else:
+ f = filename
+ # reposition to start
+ f.seek(0, 0)
+ try:
+ data = f.read(blocksize)
+ # XXX why is size tracked here? It seems to be unused...
+ size = 0L
+ chfs = [chf() for chf in objs]
+ while data:
+ for chf in chfs:
+ chf.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+
+ return [long(chf.hexdigest(), 16) for chf in chfs]
+ finally:
+ if wipeit:
+ f.close()
+
+
+class Chksummer(object):
+
+ def __init__(self, chf_type, obj, str_size):
+ self.obj = obj
+ self.chf_type = chf_type
+ self.str_size = str_size
+
+ def new(self):
+ return self.obj
+
+ def long2str(self, val):
+ return ("%x" % val).rjust(self.str_size, '0')
+
+ @staticmethod
+ def str2long(val):
+ return long(val, 16)
+
+ def __call__(self, filename):
+ return loop_over_file(filename, self.obj)[0]
+
+ def __str__(self):
+ return "%s chksummer" % self.chf_type
+
+
+# We have a couple of options:
+#
+# - If we are on python 2.5 or newer we can use hashlib, which uses
+# openssl if available (this will be fast and support a whole bunch
+# of hashes) and use a c implementation from python itself otherwise
+# (does not support as many hashes, slower).
+# - On older pythons we can use the sha and md5 module for sha1 and md5.
+# On python 2.5 these are deprecated wrappers around hashlib.
+# - On any python we can use the fchksum module (if available) which can
+# hash an entire file faster than we can, probably just because it does the
+# file-reading and hashing both in c.
+# - For any python we can use PyCrypto. Supports many hashes, fast but not
+# as fast as openssl-powered hashlib. Not compared to cpython hashlib.
+#
+# To complicate matters hashlib has a couple of hashes always present
+# as attributes of the hashlib module and less common hashes available
+# through a constructor taking a string. The former is faster.
+#
+# Some timing data from my athlonxp 2600+, python 2.4.3, python 2.5rc1,
+# pycrypto 2.0.1-r5, openssl 0.9.7j, fchksum 1.7.1 (not exhaustive obviously):
+# (test file is the Python 2.4.3 tarball, 7.7M)
+#
+# python2.4 -m timeit -s 'import fchksum'
+# 'fchksum.fmd5t("/home/marienz/tmp/Python-2.4.3.tar.bz2")[0]'
+# 40 +/- 1 msec roughly
+#
+# same with python2.5: same results.
+#
+# python2.4 -m timeit -s 'from pkgcore.chksum import defaults;import md5'
+# 'defaults.loop_over_file(md5, "/home/marienz/tmp/Python-2.4.3.tar.bz2")'
+# 64 +/- 1 msec roughly
+#
+# Same with python2.5:
+# 37 +/- 1 msec roughly
+#
+# python2.5 -m timeit -s
+# 'from pkgcore.chksum import defaults; from snakeoil import currying;'
+# -s 'import hashlib; hash = currying.pre_curry(hashlib.new, "md5")'
+# 'defaults.loop_over_file(hash, "/home/marienz/tmp/Python-2.4.3.tar.bz2")'
+# 37 +/- 1 msec roughly
+#
+# python2.5 -m timeit -s 'import hashlib'
+# 'h=hashlib.new("md5"); h.update("spork"); h.hexdigest()'
+# 6-7 usec per loop
+#
+# python2.5 -m timeit -s 'import hashlib'
+# 'h=hashlib.md5(); h.update("spork"); h.hexdigest()'
+# ~4 usec per loop
+#
+# python2.5 -m timeit -s 'import hashlib;data = 1024 * "spork"'
+# 'h=hashlib.new("md5"); h.update(data); h.hexdigest()'
+# ~20 usec per loop
+#
+# python2.5 -m timeit -s 'import hashlib;data = 1024 * "spork"'
+# 'h=hashlib.md5(); h.update(data); h.hexdigest()'
+# ~18 usec per loop
+#
+# Summarized:
+# - hashlib is faster than fchksum, fchksum is faster than python 2.4's md5.
+# - using hashlib's new() instead of the predefined type is still noticably
+# slower for 5k of data. Since ebuilds and patches will often be smaller
+# than 5k we should avoid hashlib's new if there is a predefined type.
+# - If we do not have hashlib preferring fchksum over python md5 is worth it.
+# - Testing PyCrypto is unnecessary since its Crypto.Hash.MD5 is an
+# alias for python's md5 (same for sha1).
+#
+# An additional advantage of using hashlib instead of PyCrypto is it
+# is more reliable (PyCrypto has a history of generating bogus hashes,
+# especially on non-x86 platforms, OpenSSL should be more reliable
+# because it is more widely used).
+#
+# TODO do benchmarks for more hashes?
+#
+# Hash function we use is:
+# - hashlib attr if available
+# - hashlib through new() if available.
+# - fchksum with python md5 fallback if possible
+# - PyCrypto
+# - python's md5 or sha1.
+
+chksum_types = {}
+
+try:
+ import hashlib
+except ImportError:
+ pass
+else:
+ # Always available according to docs.python.org:
+ # md5(), sha1(), sha224(), sha256(), sha384(), and sha512().
+ for hashlibname, chksumname, size in [
+ ('md5', 'md5', md5_size),
+ ('sha1', 'sha1', sha1_size),
+ ('sha256', 'sha256', sha256_size),
+ ]:
+ chksum_types[chksumname] = Chksummer(chksumname,
+ getattr(hashlib, hashlibname), size)
+
+ # May or may not be available depending on openssl. List
+ # determined through trial and error.
+ for hashlibname, chksumname in [
+ ('ripemd160', 'rmd160'),
+ ]:
+ try:
+ hashlib.new(hashlibname)
+ except ValueError:
+ pass # This hash is not available.
+ else:
+ chksum_types[chksumname] = Chksummer(chksumname,
+ partial(hashlib.new, hashlibname), rmd160_size)
+ del hashlibname, chksumname
+
+
+if 'md5' not in chksum_types:
+ import md5
+ fchksum = None
+ try:
+ import fchksum
+ except ImportError:
+ pass
+ else:
+ class MD5Chksummer(Chksummer):
+ chf_type = "md5"
+ str_size = md5_size
+ __init__ = lambda s:None
+
+ def new(self):
+ return md5.new
+
+ def __call__(self, filename):
+ if isinstance(filename, base_data_source):
+ if filename.get_path is not None:
+ filename = filename.get_path()
+ if isinstance(filename, basestring) and fchksum is not None:
+ return long(fchksum.fmd5t(filename)[0], 16)
+ return loop_over_file(filename, md5.new)[0]
+
+ chksum_types = {"md5":MD5Chksummer()}
+
+
+# expand this to load all available at some point
+for k, v, str_size in (("sha1", "SHA", sha1_size),
+ ("sha256", "SHA256", sha256_size),
+ ("rmd160", "RIPEMD", rmd160_size)):
+ if k in chksum_types:
+ continue
+ try:
+ chksum_types[k] = Chksummer(k, modules.load_attribute(
+ "Crypto.Hash.%s.new" % v), str_size)
+ except modules.FailedImport:
+ pass
+del k, v
+
+
+for modulename, chksumname, size in [
+ ('sha', 'sha1', sha1_size),
+ ('md5', 'md5', md5_size),
+ ]:
+ if chksumname not in chksum_types:
+ chksum_types[chksumname] = Chksummer(chksumname,
+ modules.load_attribute('%s.new' % (modulename,)), size)
+del modulename, chksumname
+
+class SizeUpdater(object):
+
+ def __init__(self):
+ self.count = 0
+
+ def update(self, data):
+ self.count += len(data)
+
+ def hexdigest(self):
+ return "%x" % self.count
+
+
+class SizeChksummer(Chksummer):
+ """
+ size based chksum handler
+ yes, aware that size isn't much of a chksum. ;)
+ """
+
+ def __init__(self):
+ pass
+ obj = SizeUpdater
+ str_size = 1000000000
+ chf_type = 'size'
+
+ @staticmethod
+ def long2str(val):
+ return str(val)
+
+ @staticmethod
+ def str2long(val):
+ return long(val)
+
+ def __call__(self, file_obj):
+ if isinstance(file_obj, base_data_source):
+ if file_obj.get_path is not None:
+ file_obj = file_obj.get_path()
+ else:
+ file_obj = file_obj.get_fileobj()
+ if isinstance(file_obj, basestring):
+ try:
+ st_size = os.lstat(file_obj).st_size
+ except OSError:
+ return None
+ return st_size
+ # seek to the end.
+ file_obj.seek(0, 2)
+ return long(file_obj.tell())
+
+
+chksum_types["size"] = SizeChksummer()
+chksum_types = dict((intern(k), v) for k, v in chksum_types.iteritems())
diff --git a/pkgcore/chksum/errors.py b/pkgcore/chksum/errors.py
new file mode 100644
index 0000000..a556be7
--- /dev/null
+++ b/pkgcore/chksum/errors.py
@@ -0,0 +1,22 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+exceptions thrown by chksum subsystem
+"""
+
+class base(Exception):
+ pass
+
+class MissingChksum(base):
+
+ def __init__(self, filename):
+ base.__init__(self, "Missing chksum file %r" % filename)
+ self.file = filename
+
+
+class ParseChksumError(base):
+ def __init__(self, filename, error):
+ base.__init__(self, "Failed parsing %r chksum due to %s" %
+ (filename, error))
+ self.file, self.error = filename, error
diff --git a/pkgcore/chksum/gpg.py b/pkgcore/chksum/gpg.py
new file mode 100644
index 0000000..d80904f
--- /dev/null
+++ b/pkgcore/chksum/gpg.py
@@ -0,0 +1,39 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+msg_header = "-----BEGIN PGP SIGNED MESSAGE-----\n"
+msg_header_len = len(msg_header)
+msg_hash = 'Hash:'
+msg_hash_len = len(msg_hash)
+sig_header = "-----BEGIN PGP SIGNATURE-----\n"
+sig_header_len = len(sig_header)
+sig_footer = "-----END PGP SIGNATURE-----\n"
+sig_footer_len = len(sig_footer)
+
+def skip_signatures(iterable):
+ i = iter(iterable)
+# format is-
+#"""
+#-----BEGIN PGP SIGNED MESSAGE-----
+#Hash: SHA1
+#
+#"""
+
+ for line in i:
+ # so... prune msg first, then
+ if line.endswith(msg_header):
+ line = i.next()
+ while line[msg_hash_len:] == msg_hash:
+ line = i.next()
+ # skip blank line after msg.
+ i.next()
+ continue
+ while line.endswith(sig_header):
+ line = i.next()
+ # swallow the footer.
+ while not line.endswith(sig_footer):
+ line = i.next()
+ # leave the next line on the stack
+ line = i.next()
+
+ yield line
diff --git a/pkgcore/config/__init__.py b/pkgcore/config/__init__.py
new file mode 100644
index 0000000..0261504
--- /dev/null
+++ b/pkgcore/config/__init__.py
@@ -0,0 +1,80 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+configuration subsystem
+"""
+
+# keep these imports as minimal as possible; access to
+# pkgcore.config isn't uncommon, thus don't trigger till
+# actually needed
+from pkgcore.const import SYSTEM_CONF_FILE, USER_CONF_FILE
+
+class ConfigHint(object):
+
+ """hint for introspection supplying overrides"""
+
+ # be aware this is used in clone
+ __slots__ = (
+ "types", "positional", "required", "typename", "allow_unknowns", "doc")
+
+ def __init__(self, types=None, positional=None, required=None, doc=None,
+ typename=None, allow_unknowns=False):
+ self.types = types or {}
+ self.positional = positional or []
+ self.required = required or []
+ self.typename = typename
+ self.allow_unknowns = allow_unknowns
+ self.doc = doc
+
+ def clone(self, **kwds):
+ new_kwds = {}
+ for attr in self.__slots__:
+ new_kwds[attr] = kwds.pop(attr, getattr(self, attr))
+ if kwds:
+ raise TypeError("unknown type overrides: %r" % kwds)
+ return self.__class__(**new_kwds)
+
+
+def configurable(*args, **kwargs):
+ """Decorator version of ConfigHint."""
+ hint = ConfigHint(*args, **kwargs)
+ def decorator(original):
+ original.pkgcore_config_type = hint
+ return original
+ return decorator
+
+
+def load_config(user_conf_file=USER_CONF_FILE,
+ system_conf_file=SYSTEM_CONF_FILE,
+ debug=False, prepend_sources=(), skip_config_files=False):
+ """
+ the main entry point for any code looking to use pkgcore.
+
+ @param user_conf_file: file to attempt to load, else defaults to trying to
+ load portage 2 style configs (/etc/make.conf, /etc/make.profile)
+
+ @return: L{pkgcore.config.central.ConfigManager} instance
+ representing the system config.
+ """
+
+ from pkgcore.config import central, cparser
+ from pkgcore.plugin import get_plugins
+ import os
+
+ configs = list(prepend_sources)
+ if not skip_config_files:
+ have_system_conf = os.path.isfile(system_conf_file)
+ have_user_conf = os.path.isfile(user_conf_file)
+ if have_system_conf or have_user_conf:
+ if have_user_conf:
+ configs.append(cparser.config_from_file(open(user_conf_file)))
+ if have_system_conf:
+ configs.append(
+ cparser.config_from_file(open(system_conf_file)))
+ else:
+ # make.conf...
+ from pkgcore.ebuild.portage_conf import config_from_make_conf
+ configs.append(config_from_make_conf())
+ configs.extend(get_plugins('global_config'))
+ return central.ConfigManager(configs, debug=debug)
diff --git a/pkgcore/config/basics.py b/pkgcore/config/basics.py
new file mode 100644
index 0000000..1b96acd
--- /dev/null
+++ b/pkgcore/config/basics.py
@@ -0,0 +1,536 @@
+# Copyright: 2005 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""
+configuration subsystem primitives
+
+all callables can/may throw a
+L{configuration exception<pkgcore.config.errors.ConfigurationError>}
+"""
+
+
+from pkgcore.config import errors, configurable
+from snakeoil import currying
+from snakeoil.demandload import demandload
+demandload(globals(), "snakeoil:modules")
+
+type_names = ("list", "str", "bool", "int")
+
+
+# Copied from inspect.py which copied it from compile.h.
+# Also documented in http://docs.python.org/ref/types.html.
+CO_VARARGS, CO_VARKEYWORDS = 4, 8
+
+class ConfigType(object):
+
+ """A configurable type.
+
+ @ivar name: string specifying the protocol the instantiated object
+ conforms to.
+ @ivar callable: callable used to instantiate this type.
+ @ivar types: dict mapping key names to type strings.
+ @ivar positional: container holding positional arguments.
+ @ivar required: container holding required arguments.
+ @ivar allow_unknowns: controls whether unknown settings should error.
+ """
+
+ def __init__(self, func_obj):
+ """Create from a callable (function, member function, class).
+
+ It uses the defaults to determine type:
+ - True or False mean it's a boolean
+ - a tuple means it's a list (of strings)
+ - a str means it's a string
+
+ Arguments with a default of a different type are ignored.
+
+ If an argument has no default, it is assumed to be a string-
+ exception to this is if the callable has a pkgcore_config_type
+ attr that is a L{ConfigHint} instance, in which case those
+ override.
+ """
+ self.name = func_obj.__name__
+ self.callable = func_obj
+ self.doc = getattr(func_obj, '__doc__', None)
+ if not hasattr(func_obj, 'func_code'):
+ # No function or method, should be a class so grab __init__.
+ func_obj = func_obj.__init__
+ # We do not use the inspect module because that is a heavy
+ # thing to import and we can pretty easily get the data we
+ # need without it. Most of the code in its getargs function
+ # deals with tuples inside argument definitions, which we do
+ # not support anyway.
+ code = func_obj.func_code
+ args = code.co_varnames[:code.co_argcount]
+ varargs = bool(code.co_flags & CO_VARARGS)
+ varkw = bool(code.co_flags & CO_VARKEYWORDS)
+ if hasattr(func_obj, 'im_func'):
+ # It is a method. Chop off 'self':
+ args = args[1:]
+ defaults = func_obj.func_defaults
+ if defaults is None:
+ defaults = ()
+ self.types = {}
+ # iterate through defaults backwards, so they match up to argnames
+ for i, default in enumerate(reversed(defaults)):
+ argname = args[-1 - i]
+ for typeobj, typename in [(bool, 'bool'),
+ (tuple, 'list'),
+ (str, 'str'),
+ ((int, long), 'int')]:
+ if isinstance(default, typeobj):
+ self.types[argname] = typename
+ break
+ # just [:-len(defaults)] doesn't work if there are no defaults
+ self.positional = args[:len(args)-len(defaults)]
+ # no defaults to determine the type from -> default to str.
+ for arg in self.positional:
+ self.types[arg] = 'str'
+ self.required = tuple(self.positional)
+ self.allow_unknowns = False
+
+ # Process ConfigHint (if any)
+ hint_overrides = getattr(self.callable, "pkgcore_config_type", None)
+ if hint_overrides is not None:
+ self.types.update(hint_overrides.types)
+ if hint_overrides.required:
+ self.required = tuple(hint_overrides.required)
+ if hint_overrides.positional:
+ self.positional = tuple(hint_overrides.positional)
+ if hint_overrides.typename:
+ self.name = hint_overrides.typename
+ if hint_overrides.doc:
+ self.doc = hint_overrides.doc
+ self.allow_unknowns = hint_overrides.allow_unknowns
+ elif varargs or varkw:
+ raise TypeError(
+ 'func %s accepts *args or **kwargs, and no ConfigHint is '
+ 'provided' % (self.callable,))
+
+ for var in ('class', 'inherit', 'default'):
+ if var in self.types:
+ raise errors.TypeDefinitionError(
+ '%s: you cannot change the type of %r' % (
+ self.callable, var))
+
+ for var in self.positional:
+ if var not in self.required:
+ raise errors.TypeDefinitionError(
+ '%s: %r is in positionals but not in required' %
+ (self.callable, var))
+
+
+class LazySectionRef(object):
+
+ """Abstract base class for lazy-loaded section references."""
+
+ def __init__(self, central, typename):
+ self.central = central
+ self.typename = typename.split(':', 1)[1]
+ self.cached_config = None
+
+ def _collapse(self):
+ """Override this in a subclass."""
+ raise NotImplementedError(self._collapse)
+
+ def collapse(self):
+ """@returns: L{pkgcore.config.central.CollapsedConfig}."""
+ if self.cached_config is None:
+ config = self.cached_config = self._collapse()
+ if self.typename is not None and config.type.name != self.typename:
+ raise errors.ConfigurationError(
+ 'reference should be of type %r, got %r' % (
+ self.typename, config.type.name))
+ return self.cached_config
+
+ def instantiate(self):
+ """Convenience method returning the instantiated section."""
+ return self.collapse().instantiate()
+
+
+class LazyNamedSectionRef(LazySectionRef):
+
+ def __init__(self, central, typename, name):
+ LazySectionRef.__init__(self, central, typename)
+ self.name = name
+
+ def _collapse(self):
+ return self.central.collapse_named_section(self.name)
+
+
+class LazyUnnamedSectionRef(LazySectionRef):
+
+ def __init__(self, central, typename, section):
+ LazySectionRef.__init__(self, central, typename)
+ self.section = section
+
+ def _collapse(self):
+ return self.central.collapse_section(self.section)
+
+
+class ConfigSection(object):
+
+ """
+ Single Config section, returning typed values from a key.
+
+ Not much of an object this, if we were using zope.interface it'd
+ be an Interface.
+ """
+
+ def __contains__(self, name):
+ """Check if a key is in this section."""
+ raise NotImplementedError(self.__contains__)
+
+ def keys(self):
+ """Return a list of keys."""
+ raise NotImplementedError(self.keys)
+
+ def get_value(self, central, name, arg_type):
+ """Return a setting, converted to the requested type."""
+ raise NotImplementedError(self.get_value)
+
+
+class DictConfigSection(ConfigSection):
+
+ """Turns a dict and a conversion function into a ConfigSection."""
+
+ def __init__(self, conversion_func, source_dict):
+ """Initialize.
+
+ @type conversion_func: callable.
+ @param conversion_func: called with a ConfigManager, a value from
+ the dict and a type name.
+ @type source_dict: dict with string keys and arbitrary values.
+ """
+ ConfigSection.__init__(self)
+ self.func = conversion_func
+ self.dict = source_dict
+
+ def __contains__(self, name):
+ return name in self.dict
+
+ def keys(self):
+ return self.dict.keys()
+
+ def get_value(self, central, name, arg_type):
+ try:
+ return self.func(central, self.dict[name], arg_type)
+ except errors.ConfigurationError, e:
+ e.stack.append('Converting argument %r to %s' % (name, arg_type))
+ raise
+
+
+class FakeIncrementalDictConfigSection(ConfigSection):
+
+ """Turns a dict and a conversion function into a ConfigSection."""
+
+ def __init__(self, conversion_func, source_dict):
+ """Initialize.
+
+ A request for a section of a list type will look for
+ name.prepend and name.append keys too, using those for values
+ prepended/appended to the inherited values. The conversion
+ func should return a single sequence for list types and in
+ repr for list types.
+
+ @type conversion_func: callable.
+ @param conversion_func: called with a ConfigManager, a value from
+ the dict and a type name.
+ @type source_dict: dict with string keys and arbitrary values.
+ """
+ ConfigSection.__init__(self)
+ self.func = conversion_func
+ self.dict = source_dict
+
+ def __contains__(self, name):
+ return name in self.dict or name + '.append' in self.dict or \
+ name + '.prepend' in self.dict
+
+ def keys(self):
+ keys = set()
+ for key in self.dict:
+ if key.endswith('.append'):
+ keys.add(key[:-7])
+ elif key.endswith('.prepend'):
+ keys.add(key[:-8])
+ else:
+ keys.add(key)
+ return list(keys)
+
+ def get_value(self, central, name, arg_type):
+ # Check if we need our special incremental magic.
+ if arg_type in ('list', 'str', 'repr') or arg_type.startswith('refs:'):
+ result = []
+ # Careful: None is a valid dict value, so use something else here.
+ missing = object()
+ for subname in (name + '.prepend', name, name + '.append'):
+ val = self.dict.get(subname, missing)
+ if val is missing:
+ val = None
+ else:
+ try:
+ val = self.func(central, val, arg_type)
+ except errors.ConfigurationError, e:
+ e.stack.append('Converting argument %r to %s' % (
+ subname, arg_type))
+ raise
+ result.append(val)
+ if result[0] is result[1] is result[2] is None:
+ raise KeyError(name)
+ if arg_type != 'repr':
+ # Done.
+ return result
+ # If "kind" is of some incremental-ish kind or we have
+ # .prepend or .append for this key then we need to
+ # convert everything we have to the same kind and
+ # return all three.
+ #
+ # (we do not get called for separate reprs for the
+ # .prepend or .append because those are filtered from
+ # .keys(). If we do not filter those from .keys()
+ # central gets upset because it does not know their
+ # type. Perhaps this means we should have a separate
+ # .keys() used together with repr, not sure yet
+ # --marienz)
+ #
+ # The problem here is that we may get unsuitable for
+ # incremental or differing types for the three reprs
+ # we run, so we need to convert to a suitable common
+ # kind.
+ if result[0] is None and result[2] is None:
+ # Simple case: no extra data, so no need for any
+ # conversions.
+ kind, val = result[1]
+ if kind in ('list', 'str') or kind == 'refs':
+ # Caller expects a three-tuple.
+ return kind, (None, val, None)
+ else:
+ # non-incremental, just return as-is.
+ return kind, val
+ # We have more than one return value. Figure out what
+ # target to convert to. Choices are list, str and refs.
+ kinds = set(v[0] for v in result if v is not None)
+ if 'refs' in kinds or 'ref' in kinds:
+ # If we have any refs we have to convert to refs.
+ target_kind = 'refs'
+ elif kinds == set(['str']):
+ # If we have only str we can just use that.
+ target_kind = 'str'
+ else:
+ # Convert to list. May not make any sense, but is
+ # the best we can do.
+ target_kind = 'list'
+ converted = []
+ for val in result:
+ if val is None:
+ converted.append(None)
+ continue
+ kind, val = val
+ if kind == 'ref':
+ assert target_kind == 'refs', target_kind
+ converted.append([val])
+ elif kind == 'refs':
+ assert target_kind == 'refs', target_kind
+ converted.append(val)
+ elif kind == 'list':
+ assert target_kind != 'str', target_kind
+ converted.append(val)
+ else:
+ # Everything else gets converted to a string first.
+ if kind == 'callable':
+ val = '%s.%s' % (val.__module__, val.__name__)
+ elif kind in ('bool', 'int', 'str'):
+ val = str(val)
+ else:
+ raise errors.ConfigurationError(
+ 'unsupported type %r' % (kind,))
+ # Then convert the str to list if needed.
+ if target_kind == 'str':
+ converted.append(val)
+ else:
+ converted.append([val])
+ return target_kind, converted
+ # Not incremental.
+ try:
+ return self.func(central, self.dict[name], arg_type)
+ except errors.ConfigurationError, e:
+ e.stack.append('Converting argument %r to %s' % (name, arg_type))
+ raise
+
+def convert_string(central, value, arg_type):
+ """Conversion func for a string-based DictConfigSection."""
+ assert isinstance(value, basestring), value
+ if arg_type == 'callable':
+ try:
+ func = modules.load_attribute(value)
+ except modules.FailedImport:
+ raise errors.ConfigurationError('cannot import %r' % (value,))
+ if not callable(func):
+ raise errors.ConfigurationError('%r is not callable' % (value,))
+ return func
+ elif arg_type.startswith('refs:'):
+ return list(LazyNamedSectionRef(central, arg_type, ref)
+ for ref in list_parser(value))
+ elif arg_type.startswith('ref:'):
+ return LazyNamedSectionRef(central, arg_type, str_parser(value))
+ elif arg_type == 'repr':
+ return 'str', value
+ try:
+ func = {
+ 'list': list_parser,
+ 'str': str_parser,
+ 'bool': bool_parser,
+ 'int': int_parser,
+ }[arg_type]
+ except KeyError:
+ raise errors.ConfigurationError('Unknown type %r' % (arg_type,))
+ return func(value)
+
+def convert_asis(central, value, arg_type):
+ """"Conversion" func assuming the types are already correct."""
+ if arg_type == 'callable':
+ if not callable(value):
+ raise errors.ConfigurationError('%r is not callable' % (value,))
+ return value
+ elif arg_type.startswith('ref:'):
+ if not isinstance(value, ConfigSection):
+ raise errors.ConfigurationError('%r is not a config section' %
+ (value,))
+ return LazyUnnamedSectionRef(central, arg_type, value)
+ elif arg_type.startswith('refs:'):
+ l = []
+ for section in value:
+ if not isinstance(section, ConfigSection):
+ raise errors.ConfigurationError('%r is not a config section' %
+ (value,))
+ l.append(LazyUnnamedSectionRef(central, arg_type, section))
+ return l
+ elif arg_type == 'repr':
+ if callable(value):
+ return 'callable', value
+ if isinstance(value, ConfigSection):
+ return 'ref', value
+ if isinstance(value, str):
+ return 'str', value
+ if isinstance(value, bool):
+ return 'bool', value
+ if isinstance(value, (list, tuple)):
+ if not value or isinstance(value[0], str):
+ return 'list', value
+ if isinstance(value[0], ConfigSection):
+ return 'refs', value
+ raise errors.ConfigurationError('unsupported type for %r' % (value,))
+ elif not isinstance(value, {'list': (list, tuple),
+ 'str': str,
+ 'bool': bool}[arg_type]):
+ raise errors.ConfigurationError(
+ '%r does not have type %r' % (value, arg_type))
+ return value
+
+def convert_hybrid(central, value, arg_type):
+ """Automagically switch between L{convert_string} and L{convert_asis}.
+
+ L{convert_asis} is used for arg_type str and if value is not a basestring.
+ L{convert_string} is used for the rest.
+
+ Be careful about handing in escaped strings: they are not
+ unescaped (for arg_type str).
+ """
+ if arg_type != 'str' and isinstance(value, basestring):
+ return convert_string(central, value, arg_type)
+ return convert_asis(central, value, arg_type)
+
+# "Invalid name" (pylint thinks these are module-level constants)
+# pylint: disable-msg=C0103
+HardCodedConfigSection = currying.partial(
+ FakeIncrementalDictConfigSection, convert_asis)
+ConfigSectionFromStringDict = currying.partial(
+ FakeIncrementalDictConfigSection, convert_string)
+AutoConfigSection = currying.partial(
+ FakeIncrementalDictConfigSection, convert_hybrid)
+
+
+def section_alias(target, typename):
+ """Build a ConfigSection that instantiates a named reference.
+
+ Because of central's caching our instantiated value will be
+ identical to our target's.
+ """
+ @configurable({'target': 'ref:' + typename}, typename=typename)
+ def alias(target):
+ return target
+ return AutoConfigSection({'class': alias, 'target': target})
+
+
+def list_parser(string):
+ """split on whitespace honoring quoting for new tokens"""
+ l = []
+ i = 0
+ e = len(string)
+ # check for stringness because we return something interesting if
+ # feeded a sequence of strings
+ if not isinstance(string, basestring):
+ raise TypeError('expected a string, got %r' % (string,))
+ while i < e:
+ if not string[i].isspace():
+ if string[i] in ("'", '"'):
+ q = i
+ i += 1
+ res = []
+ while i < e and string[i] != string[q]:
+ if string[i] == '\\':
+ i += 1
+ res.append(string[i])
+ i += 1
+ if i >= e:
+ raise errors.QuoteInterpretationError(string)
+ l.append(''.join(res))
+ else:
+ res = []
+ while i < e and not (string[i].isspace() or
+ string[i] in ("'", '"')):
+ if string[i] == '\\':
+ i += 1
+ res.append(string[i])
+ i += 1
+ if i < e and string[i] in ("'", '"'):
+ raise errors.QuoteInterpretationError(string)
+ l.append(''.join(res))
+ i += 1
+ return l
+
+def str_parser(string):
+ """yank leading/trailing whitespace and quotation, along with newlines"""
+ s = string.strip()
+ if len(s) > 1 and s[0] in '"\'' and s[0] == s[-1]:
+ s = s[1:-1]
+ return s.replace('\n', ' ').replace('\t', ' ')
+
+def bool_parser(string):
+ """convert a string to a boolean"""
+ s = str_parser(string).lower()
+ if s in ("no", "false", "0"):
+ return False
+ if s in ("yes", "true", "1"):
+ return True
+ raise errors.ConfigurationError('%r is not a boolean' % s)
+
+def int_parser(string):
+ """convert a string to a integer"""
+ string = str_parser(string)
+ try:
+ return int(string)
+ except ValueError:
+ raise errors.ConfigurationError('%r is not an integer' % string)
+
+@configurable({'path': 'str', 'parser': 'callable'}, typename='configsection')
+def parse_config_file(path, parser):
+ try:
+ f = open(path, 'r')
+ except (IOError, OSError), e:
+ raise errors.InstantiationError(e.strerror)
+ try:
+ return parser(f)
+ finally:
+ f.close()
diff --git a/pkgcore/config/central.py b/pkgcore/config/central.py
new file mode 100644
index 0000000..87f3941
--- /dev/null
+++ b/pkgcore/config/central.py
@@ -0,0 +1,490 @@
+# Copyright: 2005-2006 Marien Zwart <marienz@gentoo.org>
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""Collapse multiple config-sources and instantiate from them.
+
+A lot of extra documentation on this is in dev-notes/config.rst.
+"""
+
+
+from pkgcore.config import errors, basics
+from snakeoil import mappings
+
+
+class _ConfigMapping(mappings.DictMixin):
+
+ """Minimal dict-like wrapper returning config sections by type.
+
+ Similar to L{LazyValDict<mappings.LazyValDict>} but __getitem__
+ does not call the key func for __getitem__.
+
+ Careful: getting the keys for this mapping will collapse all of
+ central's configs to get at their types, which might be slow if
+ any of them are remote!
+ """
+
+ def __init__(self, manager, typename):
+ mappings.DictMixin.__init__(self)
+ self.manager = manager
+ self.typename = typename
+
+ def __getitem__(self, key):
+ conf = self.manager.collapse_named_section(key, raise_on_missing=False)
+ if conf is None or conf.type.name != self.typename:
+ raise KeyError(key)
+ try:
+ return conf.instantiate()
+ except errors.ConfigurationError, e:
+ e.stack.append('Instantiating named section %r' % (key,))
+ raise
+
+ def iterkeys(self):
+ for config in self.manager.configs:
+ for name in config:
+ try:
+ collapsed = self.manager.collapse_named_section(name)
+ except errors.BaseException:
+ # Cannot be collapsed, ignore it (this is not
+ # an error, it can be used as base for
+ # something that can be collapsed)
+ pass
+ else:
+ if collapsed.type.name == self.typename:
+ yield name
+
+ def __contains__(self, key):
+ conf = self.manager.collapse_named_section(key, raise_on_missing=False)
+ return conf is not None and conf.type.name == self.typename
+
+
+class CollapsedConfig(object):
+
+ """A collapsed config section.
+
+ @type type: L{basics.ConfigType}
+ @ivar type: Our type.
+ @type config: dict
+ @ivar config: The supplied configuration values.
+ @ivar debug: if True exception wrapping is disabled.
+ @ivar default: True if this section is a default.
+ @type name: C{str} or C{None}
+ @ivar name: our section name or C{None} for an anonymous section.
+ """
+
+ def __init__(self, type_obj, config, manager, debug=False, default=False):
+ """Initialize instance vars."""
+ # Check if we got all values required to instantiate.
+ missing = set(type_obj.required) - set(config)
+ if missing:
+ raise errors.ConfigurationError(
+ 'type %s.%s needs settings for %s' % (
+ type_obj.callable.__module__,
+ type_obj.callable.__name__,
+ ', '.join(repr(var) for var in missing)))
+
+ self.name = None
+ self.default = default
+ self.debug = debug
+ self.type = type_obj
+ self.config = config
+ # Cached instance if we have one.
+ self._instance = None
+
+ def instantiate(self):
+ """Call our type's callable, cache and return the result.
+
+ Calling instantiate more than once will return the cached value.
+ """
+ if self._instance is not None:
+ return self._instance
+
+ # Needed because this code can run twice even with instance
+ # caching if we trigger an InstantiationError.
+ config = mappings.ProtectedDict(self.config)
+
+ # Instantiate section refs.
+ # Careful: not everything we have for needs to be in the conf dict
+ # (because of default values) and not everything in the conf dict
+ # needs to have a type (because of allow_unknowns).
+ for name, val in config.iteritems():
+ typename = self.type.types.get(name)
+ if typename is None:
+ continue
+ # central already checked the type, no need to repeat that here.
+ if typename.startswith('ref:'):
+ try:
+ config[name] = val.instantiate()
+ except errors.ConfigurationError, e:
+ e.stack.append('Instantiating ref %r' % (name,))
+ raise
+ elif typename.startswith('refs:'):
+ try:
+ config[name] = list(ref.instantiate() for ref in val)
+ except errors.ConfigurationError, e:
+ e.stack.append('Instantiating refs %r' % (name,))
+ raise
+
+ callable_obj = self.type.callable
+
+ pargs = []
+ for var in self.type.positional:
+ pargs.append(config.pop(var))
+ # Python is basically the worst language ever:
+ # TypeError: repo() argument after ** must be a dictionary
+ configdict = dict(config)
+ try:
+ self._instance = callable_obj(*pargs, **configdict)
+ except errors.InstantiationError, e:
+ # This is probably just paranoia, but better safe than sorry.
+ if e.callable is None:
+ e.callable = callable_obj
+ e.pargs = pargs
+ e.kwargs = configdict
+ raise
+ except (RuntimeError, SystemExit, KeyboardInterrupt):
+ raise
+ except Exception, e:
+ if self.debug:
+ raise
+ raise errors.InstantiationError(exception=e,
+ callable_obj=callable_obj,
+ pargs=pargs, kwargs=configdict)
+ if self._instance is None:
+ raise errors.InstantiationError(
+ 'No object returned', callable_obj=callable_obj, pargs=pargs,
+ kwargs=configdict)
+
+ return self._instance
+
+
+class ConfigManager(object):
+
+ """Combine config type definitions and configuration sections.
+
+ Creates instances of a requested type and name by pulling the
+ required data from any number of provided configuration sources.
+
+ The following special type names are recognized:
+ - configsection: instantiated and used the same way as an entry in the
+ configs L{__init__} arg.
+ - remoteconfigsection: Instantiated and used the same way as an entry in
+ theremote_configs L{__init__} arg.
+
+ These "magic" typenames are only recognized if they are used by a
+ section with a name starting with "autoload".
+ """
+
+ def __init__(self, configs=(), remote_configs=(), debug=False):
+ """Initialize.
+
+ @type configs: sequence of mappings of string to ConfigSection.
+ @param configs: configuration to use.
+ Can define extra configs that are also loaded.
+ @type remote_configs: sequence of mappings of string to ConfigSection.
+ @param remote_configs: configuration to use.
+ Cannot define extra configs.
+ @param debug: if set to True exception wrapping is disabled.
+ This means things can raise other exceptions than
+ ConfigurationError but tracebacks are complete.
+ """
+ self.original_configs = tuple(configs)
+ self.original_remote_configs = tuple(remote_configs)
+ # Set of encountered section names, used to catch recursive references.
+ self._refs = set()
+ self.debug = debug
+ self.reload()
+
+ def reload(self):
+ """Reinitialize us from the config sources originally passed in.
+
+ This throws away all cached instances and re-executes autoloads.
+ """
+ # "Attribute defined outside __init__"
+ # pylint: disable-msg=W0201
+ self.configs = (list(self.original_configs) +
+ list(self.original_remote_configs))
+ # Cache mapping confname to CollapsedConfig.
+ self.collapsed_configs = {}
+ self._exec_configs(self.original_configs)
+
+ __getattr__ = _ConfigMapping
+
+ def _exec_configs(self, configs):
+ """Pull extra type and config sections from configs and use them.
+
+ Things loaded this way are added after already loaded things
+ (meaning the config containing the autoload section overrides
+ the config(s) added by that section).
+ """
+ new_configs = []
+ for config in configs:
+ for name in config:
+ # Do not even touch the ConfigSection if it's not an autoload.
+ if not name.startswith('autoload'):
+ continue
+ # If this matches something we previously instantiated
+ # we should probably blow up to prevent massive
+ # amounts of confusion (and recursive autoloads)
+ if name in self.collapsed_configs:
+ raise errors.ConfigurationError(
+ 'section %r from autoload is already collapsed!' % (
+ name,))
+ try:
+ collapsed = self.collapse_named_section(name)
+ except errors.ConfigurationError, e:
+ e.stack.append('Collapsing autoload %r' % (name,))
+ raise
+ if collapsed.type.name not in (
+ 'configsection', 'remoteconfigsection'):
+ raise errors.ConfigurationError(
+ 'Section %r is marked as autoload but type is %s, not '
+ '(remote)configsection' % (name, collapsed.type.name))
+ try:
+ instance = collapsed.instantiate()
+ except errors.ConfigurationError, e:
+ e.stack.append('Instantiating autoload %r' % (name,))
+ raise
+ if collapsed.type.name == 'configsection':
+ new_configs.append(instance)
+ elif collapsed.type.name == 'remoteconfigsection':
+ self.configs.append(instance)
+ if new_configs:
+ self.configs.extend(new_configs)
+ self._exec_configs(new_configs)
+
+ def sections(self):
+ """Return an iterator of all section names."""
+ for config in self.configs:
+ for name in config:
+ yield name
+
+ def collapse_named_section(self, name, raise_on_missing=True):
+ """Collapse a config by name, possibly returning a cached instance.
+
+ @returns: L{CollapsedConfig}.
+
+ If there is no section with this name a ConfigurationError is raised,
+ unless raise_on_missing is False in which case None is returned.
+ """
+ if name in self._refs:
+ raise errors.ConfigurationError(
+ 'Reference to %r is recursive' % (name,))
+ self._refs.add(name)
+ try:
+ result = self.collapsed_configs.get(name)
+ if result is not None:
+ return result
+ for source_index, config in enumerate(self.configs):
+ if name in config:
+ section = config[name]
+ break
+ else:
+ if raise_on_missing:
+ raise errors.ConfigurationError(
+ 'no section called %r' % (name,))
+ return None
+ try:
+ result = self.collapse_section(section, name, source_index)
+ result.name = name
+ except errors.ConfigurationError, e:
+ e.stack.append('Collapsing section named %r' % (name,))
+ raise
+ self.collapsed_configs[name] = result
+ return result
+ finally:
+ self._refs.remove(name)
+
+ def collapse_section(self, section, _name=None, _index=None):
+ """Collapse a ConfigSection to a L{CollapsedConfig}."""
+
+ # Bail if this is an inherit-only (uncollapsable) section.
+ try:
+ inherit_only = section.get_value(self, 'inherit-only', 'bool')
+ except KeyError:
+ pass
+ else:
+ if inherit_only:
+ raise errors.CollapseInheritOnly(
+ 'cannot collapse inherit-only section')
+
+ # List of (name, ConfigSection, index) tuples, most specific first.
+ slist = [(_name, section, _index)]
+
+ # first map out inherits.
+ inherit_names = set([_name])
+ for current_section, current_conf, index in slist:
+ if 'inherit' not in current_conf:
+ continue
+ prepend, inherits, append = current_conf.get_value(
+ self, 'inherit', 'list')
+ if prepend is not None or append is not None:
+ raise errors.ConfigurationError(
+ 'Prepending or appending to the inherit list makes no '
+ 'sense')
+ for inherit in inherits:
+ if inherit == current_section:
+ # Self-inherit is a bit special.
+ for i, config in enumerate(self.configs[index + 1:]):
+ if inherit in config:
+ slist.append((inherit, config[inherit],
+ index + i + 1))
+ break
+ else:
+ raise errors.ConfigurationError(
+ 'Self-inherit %r cannot be found' % (inherit,))
+ else:
+ if inherit in inherit_names:
+ raise errors.ConfigurationError(
+ 'Inherit %r is recursive' % (inherit,))
+ inherit_names.add(inherit)
+ for i, config in enumerate(self.configs):
+ if inherit in config:
+ slist.append((inherit, config[inherit], i))
+ break
+ else:
+ raise errors.ConfigurationError(
+ 'inherit target %r cannot be found' % (inherit,))
+
+ # Grab the "class" setting first (we need it to get a type obj
+ # to collapse to the right type in the more general loop)
+ for inherit_name, inherit_conf, index in slist:
+ if "class" in inherit_conf:
+ break
+ else:
+ raise errors.ConfigurationError('no class specified')
+
+ type_obj = basics.ConfigType(inherit_conf.get_value(self, 'class',
+ 'callable'))
+
+ conf = {}
+ for section_nr, (inherit_name, inherit_conf, index) in \
+ enumerate(reversed(slist)):
+ for key in inherit_conf.keys():
+ if key in ('class', 'inherit', 'inherit-only'):
+ continue
+ typename = type_obj.types.get(key)
+ if typename is None:
+ if key == 'default':
+ typename = 'bool'
+ elif not type_obj.allow_unknowns:
+ if section_nr != len(slist) - 1:
+ raise errors.ConfigurationError(
+ 'Type of %r inherited from %r unknown' % (
+ key, inherit_name))
+ raise errors.ConfigurationError(
+ 'Type of %r unknown' % (key,))
+ else:
+ typename = 'str'
+ is_ref = typename.startswith('ref:')
+ is_refs = typename.startswith('refs:')
+ # The sections do not care about lazy vs nonlazy.
+ if typename.startswith('lazy_'):
+ typename = typename[5:]
+ result = inherit_conf.get_value(self, key, typename)
+ if is_ref:
+ try:
+ result = result.collapse()
+ except errors.ConfigurationError, e:
+ e.stack.append(
+ 'Collapsing section ref %r' % (key,))
+ raise
+ elif is_refs:
+ try:
+ result = list(
+ list(ref.collapse() for ref in subresult or ())
+ for subresult in result)
+ except errors.ConfigurationError, e:
+ e.stack.append(
+ 'Collapsing section refs %r' % (key,))
+ raise
+ if typename == 'list' or typename.startswith('refs:'):
+ prepend, result, append = result
+ if result is None:
+ if key in conf:
+ result = conf[key]
+ else:
+ result = []
+ if prepend:
+ result = prepend + result
+ if append:
+ result += append
+ elif typename == 'str':
+ prepend, result, append = result
+ if result is None and key in conf:
+ result = conf[key]
+ result = ' '.join(
+ v for v in (prepend, result, append) if v)
+ conf[key] = result
+ default = conf.pop('default', False)
+ return CollapsedConfig(
+ type_obj, conf, self, debug=self.debug, default=default)
+
+ def get_default(self, type_name):
+ """Finds the configuration specified default obj of type_name.
+
+ Returns C{None} if no defaults.
+ """
+ # The name of the "winning" default or None if there is none.
+ default_name = None
+ # The collapsed default section or None.
+ default = None
+ for source in self.configs:
+ for name, section in source.iteritems():
+ collapsed = None
+ try:
+ is_default = section.get_value(self, 'default', 'bool')
+ except KeyError:
+ is_default = False
+ if not is_default:
+ continue
+ # We need to know the type name of this section, for
+ # which we need the class. Try to grab this from the
+ # section directly:
+ try:
+ klass = section.get_value(self, 'class', 'callable')
+ except errors.ConfigurationError:
+ # There is a class setting but it is not valid.
+ # This means it is definitely not the one we are
+ # interested in, so just skip this.
+ continue
+ except KeyError:
+ # There is no class value on the section. Collapse
+ # it to see if it inherits one:
+ try:
+ collapsed = self.collapse_named_section(name)
+ except errors.ConfigurationError:
+ # Uncollapsable. Just ignore this, since we
+ # have no clean way of determining if this
+ # would be an "interesting" section if it
+ # could be collapsed (and complaining about
+ # every uncollapsable section with
+ # default=true would be too much).
+ continue
+ type_obj = collapsed.type
+ else:
+ # Grabbed the class directly from the section.
+ type_obj = basics.ConfigType(klass)
+ if type_obj.name != type_name:
+ continue
+ if default_name is not None:
+ raise errors.ConfigurationError(
+ 'both %r and %r are default for %r' % (
+ default_name, name, type_name))
+ default_name = name
+ default = collapsed
+ if default_name is not None:
+ if default is None:
+ try:
+ default = self.collapse_named_section(default_name)
+ except errors.ConfigurationError, e:
+ e.stack.append('Collapsing default %s %r' % (
+ type_name, default_name))
+ raise
+ try:
+ return default.instantiate()
+ except errors.ConfigurationError, e:
+ e.stack.append('Instantiating default %s %r' %
+ (type_name, default_name))
+ raise
+ return None
diff --git a/pkgcore/config/cparser.py b/pkgcore/config/cparser.py
new file mode 100644
index 0000000..5ba5817
--- /dev/null
+++ b/pkgcore/config/cparser.py
@@ -0,0 +1,29 @@
+# Copyright: 2005 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""
+ini based configuration format
+"""
+
+from ConfigParser import ConfigParser
+
+from pkgcore.config import basics
+from snakeoil import mappings
+
+class CaseSensitiveConfigParser(ConfigParser):
+ def optionxform(self, val):
+ return val
+
+
+def config_from_file(file_obj):
+ """
+ generate a config dict
+
+ @param file_obj: file protocol instance
+ @return: L{snakeoil.mappings.LazyValDict} instance
+ """
+ cparser = CaseSensitiveConfigParser()
+ cparser.readfp(file_obj)
+ def get_section(section):
+ return basics.ConfigSectionFromStringDict(dict(cparser.items(section)))
+ return mappings.LazyValDict(cparser.sections, get_section)
diff --git a/pkgcore/config/dhcpformat.py b/pkgcore/config/dhcpformat.py
new file mode 100644
index 0000000..357596f
--- /dev/null
+++ b/pkgcore/config/dhcpformat.py
@@ -0,0 +1,169 @@
+# Copyright: 2005 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""Parse a dhcpd.conf(5) style configuration file.
+
+Example of the supported format (not a complete config)::
+
+ # this is a comment.
+ # this is a config section.
+ metadata {
+ # strings may be quoted or unquoted. semicolon terminator.
+ type cache;
+ class pkgcore.cache.metadata.database;
+ # true, yes or 1 for boolean True (case-insensitive).
+ readonly true;
+ location /usr/portage/
+ }
+
+ # this needs to be quoted because it has a space in it
+ "livefs domain" {
+ # this could be unquoted.
+ type "domain";
+ package.keywords "/etc/portage/package.keywords";
+ default yes;
+ # this is a section reference, with a nested anonymous section.
+ repositories {
+ type repo;
+ class pkgcore.ebuild.repository.tree;
+ location /usr/portage;
+ # this is also a section reference, but instead of a nested section
+ # we refer to the named metadata section above
+ cache metadata;
+ };
+ fetcher {
+ type fetcher;
+ distdir /usr/portage/distfiles;
+ }
+ }
+"""
+
+from pkgcore.config import basics, errors
+from snakeoil import mappings, modules, demandload
+demandload.demandload(globals(), 'snakeoil.compatibility:all')
+
+import pyparsing as pyp
+
+# this is based on the 'BIND named.conf parser' on pyparsing's webpage
+
+_section = pyp.Forward()
+_value = (pyp.Word(pyp.alphanums + './_') |
+ pyp.quotedString.copy().setParseAction(pyp.removeQuotes))
+
+_section_contents = pyp.dictOf(
+ _value, pyp.Group(pyp.OneOrMore(_value | _section)) + pyp.Suppress(';'))
+
+# "statement seems to have no effect"
+# pylint: disable-msg=W0104
+_section << pyp.Group(pyp.Suppress('{') + _section_contents +
+ pyp.Suppress('}'))
+
+parser = (
+ pyp.stringStart +
+ pyp.dictOf(_value, _section).ignore(pyp.pythonStyleComment) +
+ pyp.stringEnd)
+
+
+class ConfigSection(basics.ConfigSection):
+
+ """Expose a section_contents from pyparsing as a ConfigSection.
+
+ mke2fsformat also uses this.
+ """
+
+ def __init__(self, section):
+ basics.ConfigSection.__init__(self)
+ self.section = section
+
+ def __contains__(self, name):
+ return name in self.section
+
+ def keys(self):
+ return self.section.keys()
+
+ def get_value(self, central, name, arg_type):
+ value = self.section[name]
+ if arg_type == 'callable':
+ if len(value) != 1:
+ raise errors.ConfigurationError('only one argument required')
+ value = value[0]
+ if not isinstance(value, basestring):
+ raise errors.ConfigurationError(
+ 'need a callable, not a section')
+ try:
+ value = modules.load_attribute(value)
+ except modules.FailedImport:
+ raise errors.ConfigurationError('cannot import %r' % (value,))
+ if not callable(value):
+ raise errors.ConfigurationError('%r is not callable' % value)
+ return value
+ elif arg_type.startswith('ref:'):
+ if len(value) != 1:
+ raise errors.ConfigurationError('only one argument required')
+ value = value[0]
+ if isinstance(value, basestring):
+ # it's a section ref
+ return basics.LazyNamedSectionRef(central, arg_type, value)
+ else:
+ # it's an anonymous inline section
+ return basics.LazyUnnamedSectionRef(central, arg_type,
+ ConfigSection(value))
+ elif arg_type.startswith('refs:'):
+ result = []
+ for ref in value:
+ if isinstance(ref, basestring):
+ # it's a section ref
+ result.append(basics.LazyNamedSectionRef(
+ central, arg_type, ref))
+ else:
+ # it's an anonymous inline section
+ result.append(basics.LazyUnnamedSectionRef(
+ central, arg_type, ConfigSection(ref)))
+ return result
+ elif arg_type == 'list':
+ if not isinstance(value, basestring):
+ # sequence
+ value = ' '.join(value)
+ return None, basics.list_parser(value), None
+ elif arg_type == 'repr':
+ if len(value) == 1:
+ value = value[0]
+ if isinstance(value, basestring):
+ return 'str', value
+ else:
+ return 'ref', ConfigSection(value)
+ else:
+ if all(isinstance(v, basestring) for v in value):
+ return 'list', list(value)
+ result = []
+ for v in value:
+ if isinstance(v, basestring):
+ result.append(v)
+ else:
+ result.append(ConfigSection(v))
+ return 'refs', result
+ assert False, 'unreachable'
+ else:
+ if len(value) != 1:
+ raise errors.ConfigurationError('only one argument required')
+ if not isinstance(value[0], basestring):
+ raise errors.ConfigurationError(
+ '%r should be a string' % value)
+ if arg_type == 'str':
+ return [None, basics.str_parser(value[0]), None]
+ elif arg_type == 'bool':
+ return basics.bool_parser(value[0])
+ else:
+ raise errors.ConfigurationError(
+ 'unsupported type %r' % (arg_type,))
+
+
+def config_from_file(file_obj):
+ try:
+ config = parser.parseFile(file_obj)
+ except pyp.ParseException, e:
+ name = getattr(file_obj, 'name', file_obj)
+ raise errors.ConfigurationError('%s: %s' % (name, e))
+ def build_section(name):
+ return ConfigSection(config[name])
+ return mappings.LazyValDict(config.keys, build_section)
diff --git a/pkgcore/config/domain.py b/pkgcore/config/domain.py
new file mode 100644
index 0000000..f9c13df
--- /dev/null
+++ b/pkgcore/config/domain.py
@@ -0,0 +1,30 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+base class to derive from for domain objects
+
+Bit empty at the moment
+"""
+from snakeoil.demandload import demandload
+demandload(globals(), "pkgcore.repository:multiplex")
+
+# yes this is basically empty. will fill it out as the base is better
+# identified.
+
+class domain(object):
+
+ def __getattr__(self, attr):
+ if attr == "all_repos":
+ if len(self.repos) == 1:
+ a = self.all_repos = self.repos[0]
+ else:
+ a = self.all_repos = multiplex.tree(*self.repos)
+ elif attr == "all_vdbs":
+ if len(self.vdb) == 1:
+ a = self.all_vdbs = self.vdb[0]
+ else:
+ a = self.all_vdbs = multiplex.tree(*self.vdb)
+ else:
+ raise AttributeError(attr)
+ return a
diff --git a/pkgcore/config/errors.py b/pkgcore/config/errors.py
new file mode 100644
index 0000000..9dd0e2a
--- /dev/null
+++ b/pkgcore/config/errors.py
@@ -0,0 +1,104 @@
+# Copyright: 2005 Marien Zwart <marienz@gentoo.org>
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+# potentially use an intermediate base for user config errors,
+# seperate base for instantiation?
+
+
+"""Exceptions raised by the config code."""
+
+
+class BaseException(Exception):
+ pass
+
+
+class TypeDefinitionError(BaseException):
+ """Fatal error in type construction."""
+
+
+class ConfigurationError(BaseException):
+
+ """Fatal error in parsing a config section.
+
+ @type stack: sequence of strings.
+ @ivar stack: messages describing where this ConfigurationError originated.
+ configuration-related code catching ConfigurationError that wants to
+ raise its own ConfigurationError should modify (usually append to)
+ the stack and then re-raise the original exception (this makes sure
+ the traceback is preserved).
+ """
+
+ def __init__(self, message):
+ BaseException.__init__(self, message)
+ self.stack = [message]
+
+ def __str__(self):
+ return ':\n'.join(reversed(self.stack))
+
+
+class CollapseInheritOnly(ConfigurationError):
+ """Attempt was made to collapse an uncollapsable section.
+
+ Separate exception because pconfig catches it separately.
+ """
+
+
+class InstantiationError(ConfigurationError):
+
+ """Exception occured during instantiation.
+
+ @ivar callable: callable object which failed during instantiation.
+ @ivar pargs: positional args passed to callable.
+ @ivar kwargs: keyword args passed to callable.
+ @ivar exc: Original exception object or None.
+
+ A well-behaved configurable callable should raise this exception
+ if instantiation failed, providing one or both of message and
+ exception. The other fields will be filled in by central.
+
+ If the callable raises something else central will wrap it in
+ this, but that will lose the traceback.
+ """
+
+ def __init__(self, message=None, exception=None, callable_obj=None,
+ pargs=None, kwargs=None):
+ if message is not None:
+ ConfigurationError.__init__(self, message)
+ elif exception is not None:
+ ConfigurationError.__init__(self, str(exception))
+ else:
+ raise ValueError('specify at least one of message and exception')
+ self.message = message
+ self.callable = callable_obj
+ self.pargs = pargs
+ self.kwargs = kwargs
+ self.exc = exception
+
+ def __str__(self):
+ # self.callable should be set here (nothing should try to catch
+ # and str() this before central had a chance to fill it in)
+ if self.message is not None:
+ if self.callable is None:
+ message = '%r, callable unset!' % (self.message,)
+ else:
+ message = '%r instantiating %s.%s' % (
+ self.message, self.callable.__module__,
+ self.callable.__name__)
+ # The weird repr(str(exc)) used here quotes the message nicely.
+ elif self.callable is not None:
+ message = "Caught exception %r instantiating %s.%s" % (
+ str(self.exc), self.callable.__module__,
+ self.callable.__name__)
+ else:
+ message = "Caught exception %r, callable unset!" % (str(self.exc),)
+ return ':\n'.join(reversed([message] + self.stack[1:]))
+
+
+class QuoteInterpretationError(ConfigurationError):
+
+ """Quoting of a var was screwed up."""
+
+ def __init__(self, string):
+ ConfigurationError.__init__(self, "Parsing of %r failed" % (string,))
+ self.str = string
diff --git a/pkgcore/config/mke2fsformat.py b/pkgcore/config/mke2fsformat.py
new file mode 100644
index 0000000..212d597
--- /dev/null
+++ b/pkgcore/config/mke2fsformat.py
@@ -0,0 +1,77 @@
+# Copyright: 2005 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""Parse a mke2fs.conf(5) style configuration file.
+
+Example of the supported format (not a complete config)::
+
+ # this is a comment.
+ # this is a config section.
+ [metadata]
+ # strings may be quoted or unquoted. semicolon terminator.
+ type = cache
+ class = pkgcore.cache.metadata.database
+ # true, yes or 1 for boolean True (case-insensitive).
+ readonly = true
+ location = /usr/portage/
+
+
+ # this needs to be quoted because it has a space in it
+ [livefs domain]
+ # this could be unquoted.
+ type = "domain"
+ package.keywords = "/etc/portage/package.keywords"
+ default = yes
+ # this is a section reference, with a nested anonymous section.
+ repositories = {
+ type = repo
+ class = pkgcore.ebuild.repository.tree
+ location = /usr/portage
+ # this is also a section reference, but instead of a nested section
+ # we refer to the named metadata section above
+ cache = metadata
+ }
+ fetcher = {
+ type = fetcher
+ distdir = /usr/portage/distfiles
+ }
+"""
+
+# The tests for this are in test_dhcpformat.
+
+from pkgcore.config import dhcpformat, errors
+from snakeoil import mappings
+import pyparsing as pyp
+
+
+_section_contents = pyp.Forward()
+_value = (pyp.Word(pyp.alphanums + './_').setWhitespaceChars(' \t') |
+ pyp.quotedString.copy().setParseAction(pyp.removeQuotes))
+
+_section = pyp.Group(
+ pyp.Suppress('{' + pyp.lineEnd) + _section_contents + pyp.Suppress('}'))
+
+# "statement seems to have no effect"
+# pylint: disable-msg=W0104
+_section_contents << pyp.dictOf(
+ _value + pyp.Suppress('='),
+ pyp.Group(pyp.OneOrMore((_value | _section).setWhitespaceChars(' \t'))) +
+ pyp.Suppress(pyp.lineEnd))
+
+parser = (
+ pyp.stringStart +
+ pyp.dictOf(
+ pyp.Suppress('[') + _value + pyp.Suppress(']' + pyp.lineEnd),
+ _section_contents).ignore(pyp.pythonStyleComment) +
+ pyp.stringEnd)
+
+
+def config_from_file(file_obj):
+ try:
+ config = parser.parseFile(file_obj)
+ except pyp.ParseException, e:
+ name = getattr(file_obj, 'name', file_obj)
+ raise errors.ConfigurationError('%s: %s' % (name, e))
+ def build_section(name):
+ return dhcpformat.ConfigSection(config[name])
+ return mappings.LazyValDict(config.keys, build_section)
diff --git a/pkgcore/const.py b/pkgcore/const.py
new file mode 100644
index 0000000..c92ea49
--- /dev/null
+++ b/pkgcore/const.py
@@ -0,0 +1,65 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# Copyright: 2000-2005 Gentoo Foundation
+# License: GPL2
+
+
+"""
+Internal constants.
+
+Future of this module is debatable- for the most part you likely don't
+want to be using this. As soon as is possible, most of these defines
+will be shifted to wherever they're best situated.
+"""
+
+# note this is lifted out of portage 2. so... it's held onto for the
+# sake of having stuff we still need, but it does need cleanup.
+
+import os.path as osp
+
+
+# the pkgcore package directory
+PORTAGE_BASE_PATH = osp.dirname(osp.abspath(__file__))
+PKGCORE_BIN_PATH = osp.join(PORTAGE_BASE_PATH, 'bin')
+SYSTEM_CONF_FILE = '/etc/pkgcore.conf'
+USER_CONF_FILE = osp.expanduser('~/.pkgcore.conf')
+
+#PORTAGE_PYM_PATH = PORTAGE_BASE_PATH+"/pym"
+#PROFILE_PATH = "/etc/make.profile"
+LOCALE_DATA_PATH = PORTAGE_BASE_PATH+"/locale"
+
+EBUILD_DAEMON_PATH = PKGCORE_BIN_PATH+"/ebuild-env/ebuild-daemon.sh"
+
+SANDBOX_BINARY = "/usr/bin/sandbox"
+
+DEPSCAN_SH_BINARY = "/sbin/depscan.sh"
+BASH_BINARY = "/bin/bash"
+MOVE_BINARY = "/bin/mv"
+COPY_BINARY = "/bin/cp"
+PRELINK_BINARY = "/usr/sbin/prelink"
+depends_phase_path = PKGCORE_BIN_PATH+"/ebuild-env/:/bin:/usr/bin"
+EBUILD_ENV_PATH = [PKGCORE_BIN_PATH+"/"+x for x in [
+ "ebuild-env", "ebuild-helpers"]] \
+ + ["/sbin", "/bin", "/usr/sbin", "/usr/bin"]
+EBD_ENV_PATH = PKGCORE_BIN_PATH+"/ebuild-env"
+
+# XXX this is out of place
+WORLD_FILE = '/var/lib/portage/world'
+#MAKE_CONF_FILE = "/etc/make.conf"
+#MAKE_DEFAULTS_FILE = PROFILE_PATH + "/make.defaults"
+
+# XXX this is out of place
+CUSTOM_MIRRORS_FILE = "/etc/portage/mirrors"
+SANDBOX_PIDS_FILE = "/tmp/sandboxpids.tmp"
+
+#CONFCACHE_FILE = CACHE_PATH+"/confcache"
+#CONFCACHE_LIST = CACHE_PATH+"/confcache_files.anydbm"
+
+LIBFAKEROOT_PATH = "/usr/lib/libfakeroot.so"
+FAKED_PATH = "/usr/bin/faked"
+
+RSYNC_BIN = "/usr/bin/rsync"
+RSYNC_HOST = "rsync.gentoo.org/gentoo-portage"
+
+CVS_BIN = "/usr/bin/cvs"
+
+VERSION = '0.3.1'
diff --git a/pkgcore/ebuild/__init__.py b/pkgcore/ebuild/__init__.py
new file mode 100644
index 0000000..c3c11f3
--- /dev/null
+++ b/pkgcore/ebuild/__init__.py
@@ -0,0 +1,6 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+gentoo ebuild support
+"""
diff --git a/pkgcore/ebuild/_atom.so b/pkgcore/ebuild/_atom.so
new file mode 100755
index 0000000..8c746a1
--- /dev/null
+++ b/pkgcore/ebuild/_atom.so
Binary files differ
diff --git a/pkgcore/ebuild/_cpv.so b/pkgcore/ebuild/_cpv.so
new file mode 100755
index 0000000..6550d22
--- /dev/null
+++ b/pkgcore/ebuild/_cpv.so
Binary files differ
diff --git a/pkgcore/ebuild/_depset.so b/pkgcore/ebuild/_depset.so
new file mode 100755
index 0000000..73ef49e
--- /dev/null
+++ b/pkgcore/ebuild/_depset.so
Binary files differ
diff --git a/pkgcore/ebuild/_filter_env.so b/pkgcore/ebuild/_filter_env.so
new file mode 100755
index 0000000..c551c16
--- /dev/null
+++ b/pkgcore/ebuild/_filter_env.so
Binary files differ
diff --git a/pkgcore/ebuild/atom.py b/pkgcore/ebuild/atom.py
new file mode 100644
index 0000000..ca56898
--- /dev/null
+++ b/pkgcore/ebuild/atom.py
@@ -0,0 +1,504 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+# "More than one statement on a single line"
+# pylint: disable-msg=C0321
+
+"""
+gentoo ebuild atom, should be generalized into an agnostic base
+"""
+
+from pkgcore.restrictions import values, packages, boolean
+from pkgcore.ebuild import cpv, errors
+from pkgcore.ebuild.atom_restricts import VersionMatch
+from snakeoil.compatibility import all
+from snakeoil.klass import generic_equality
+from snakeoil.demandload import demandload
+demandload(globals(),
+ "pkgcore.restrictions.delegated:delegate",
+ "snakeoil.currying:partial",
+)
+
+# namespace compatibility...
+MalformedAtom = errors.MalformedAtom
+
+valid_use_chars = set(str(x) for x in xrange(10))
+valid_use_chars.update(chr(x) for x in xrange(ord("a"), ord("z")))
+valid_use_chars.update(chr(x) for x in xrange(ord("A"), ord("Z")))
+valid_use_chars.update(["_", ".", "+", "-"])
+valid_use_chars = frozenset(valid_use_chars)
+
+def native_init(self, atom, negate_vers=False):
+ """
+ @param atom: string, see gentoo ebuild atom syntax
+ """
+ sf = object.__setattr__
+
+ orig_atom = atom
+
+ u = atom.find("[")
+ if u != -1:
+ # use dep
+ u2 = atom.find("]", u)
+ if u2 == -1:
+ raise errors.MalformedAtom(atom,
+ "use restriction isn't completed")
+ sf(self, "use", tuple(sorted(atom[u+1:u2].split(','))))
+ for x in self.use:
+ if not all(y in valid_use_chars for y in x):
+ raise errors.MalformedAtom(atom,
+ "invalid char spotted in use dep")
+ if not all(x.rstrip("-") for x in self.use):
+ raise errors.MalformedAtom(
+ atom, "cannot have empty use deps in use restriction")
+ atom = atom[0:u]+atom[u2 + 1:]
+ else:
+ sf(self, "use", None)
+ s = atom.find(":")
+ if s != -1:
+ i2 = atom.find(":", s + 1)
+ if i2 != -1:
+ repo_id = atom[i2 + 1:]
+ if not repo_id:
+ raise errors.MalformedAtom(atom,
+ "repo_id must not be empty")
+ elif ":" in repo_id:
+ raise errors.MalformedAtom(atom,
+ "repo_id may contain only [a-Z0-9_.-+/]")
+ atom = atom[:i2]
+ sf(self, "repo_id", repo_id)
+ else:
+ sf(self, "repo_id", None)
+ # slot dep.
+ slots = tuple(sorted(atom[s+1:].split(",")))
+ if not all(slots):
+ # if the slot char came in only due to repo_id, force slots to None
+ if len(slots) == 1 and i2 != -1:
+ slots = None
+ else:
+ raise errors.MalformedAtom(atom,
+ "empty slots aren't allowed")
+ sf(self, "slot", slots)
+ atom = atom[:s]
+ else:
+ sf(self, "slot", None)
+ sf(self, "repo_id", None)
+ del u, s
+
+ sf(self, "blocks", atom[0] == "!")
+ if self.blocks:
+ atom = atom[1:]
+
+ if atom[0] in ('<', '>'):
+ if atom[1] == '=':
+ sf(self, 'op', atom[:2])
+ atom = atom[2:]
+ else:
+ sf(self, 'op', atom[0])
+ atom = atom[1:]
+ elif atom[0] == '=':
+ if atom[-1] == '*':
+ sf(self, 'op', '=*')
+ atom = atom[1:-1]
+ else:
+ atom = atom[1:]
+ sf(self, 'op', '=')
+ elif atom[0] == '~':
+ sf(self, 'op', '~')
+ atom = atom[1:]
+ else:
+ sf(self, 'op', '')
+ sf(self, 'cpvstr', atom)
+
+ try:
+ c = cpv.CPV(self.cpvstr)
+ except errors.InvalidCPV, e:
+ raise errors.MalformedAtom(orig_atom, str(e))
+ sf(self, "key", c.key)
+ sf(self, "package", c.package)
+ sf(self, "category", c.category)
+ sf(self, "version", c.version)
+ sf(self, "fullver", c.fullver)
+ sf(self, "revision", c.revision)
+
+ if self.op:
+ if self.version is None:
+ raise errors.MalformedAtom(orig_atom,
+ "operator requires a version")
+ elif self.version is not None:
+ raise errors.MalformedAtom(orig_atom,
+ 'versioned atom requires an operator')
+ sf(self, "hash", hash(orig_atom))
+ sf(self, "negate_vers", negate_vers)
+
+def native__getattr__(self, attr):
+ if attr != "restrictions":
+ raise AttributeError(attr)
+
+ # ordering here matters; against 24702 ebuilds for
+ # a non matchable atom with package as the first restriction
+ # 10 loops, best of 3: 206 msec per loop
+ # with category as the first(the obvious ordering)
+ # 10 loops, best of 3: 209 msec per loop
+ # why? because category is more likely to collide;
+ # at the time of this profiling, there were 151 categories.
+ # over 11k packages however.
+ r = [packages.PackageRestriction(
+ "package", values.StrExactMatch(self.package)),
+ packages.PackageRestriction(
+ "category", values.StrExactMatch(self.category))]
+
+ if self.repo_id is not None:
+ r.insert(0, packages.PackageRestriction("repo.repo_id",
+ values.StrExactMatch(self.repo_id)))
+
+ if self.fullver is not None:
+ if self.op == '=*':
+ r.append(packages.PackageRestriction(
+ "fullver", values.StrGlobMatch(self.fullver)))
+ else:
+ r.append(VersionMatch(self.op, self.version, self.revision,
+ negate=self.negate_vers))
+
+ if self.slot is not None:
+ if len(self.slot) == 1:
+ v = values.StrExactMatch(self.slot[0])
+ else:
+ v = values.OrRestriction(*map(values.StrExactMatch,
+ self.slot))
+ r.append(packages.PackageRestriction("slot", v))
+
+ if self.use is not None:
+ false_use = [x[1:] for x in self.use if x[0] == "-"]
+ true_use = [x for x in self.use if x[0] != "-"]
+ v = []
+ if false_use:
+ v.append(values.ContainmentMatch(negate=True,
+ all=True, *false_use))
+
+ if true_use:
+ v.append(values.ContainmentMatch(all=True, *true_use))
+ if len(v) == 1:
+ v = v[0]
+ else:
+ v = values.AndRestriction(*v)
+ r.append(packages.PackageRestriction("use", v))
+
+ r = tuple(r)
+ object.__setattr__(self, attr, r)
+ return r
+
+
+native_atom_overrides = {"__init__":native_init,
+ "__getattr__":native__getattr__}
+
+try:
+ from pkgcore.ebuild._atom import overrides as atom_overrides
+except ImportError:
+ atom_overrides = native_atom_overrides
+
+
+class atom(boolean.AndRestriction):
+
+ """Currently implements gentoo ebuild atom parsing.
+
+ Should be converted into an agnostic dependency base.
+ """
+
+ __slots__ = (
+ "blocks", "op", "negate_vers", "cpvstr", "use",
+ "slot", "hash", "category", "version", "revision", "fullver",
+ "package", "key", "repo_id")
+
+ type = packages.package_type
+ negate = False
+
+ __attr_comparison__ = ("cpvstr", "op", "blocks", "negate_vers",
+ "use", "slot")
+
+ __metaclass__ = generic_equality
+ __inst_caching__ = True
+
+ locals().update(atom_overrides.iteritems())
+
+ def __repr__(self):
+ if self.op == '=*':
+ atom = "=%s*" % self.cpvstr
+ else:
+ atom = self.op + self.cpvstr
+ if self.blocks:
+ atom = '!' + atom
+ attrs = [atom]
+ if self.use:
+ attrs.append('use=' + repr(self.use))
+ if self.slot:
+ attrs.append('slot=' + repr(self.slot))
+ return '<%s %s @#%x>' % (
+ self.__class__.__name__, ' '.join(attrs), id(self))
+
+ def __reduce__(self):
+ return (atom, (str(self), self.negate_vers))
+
+ def iter_dnf_solutions(self, full_solution_expansion=False):
+ if full_solution_expansion:
+ return boolean.AndRestriction.iter_dnf_solutions(
+ self, full_solution_expansion=True)
+ return iter([[self]])
+
+ def cnf_solutions(self, full_solution_expansion=False):
+ if full_solution_expansion:
+ return boolean.AndRestriction.cnf_solutions(
+ self, full_solution_expansion=True)
+ return [[self]]
+
+ def __str__(self):
+ if self.op == '=*':
+ s = "=%s*" % self.cpvstr
+ else:
+ s = self.op + self.cpvstr
+ if self.blocks:
+ s = "!" + s
+ if self.use:
+ s += "[%s]" % ",".join(self.use)
+ if self.slot:
+ s += ":%s" % ",".join(self.slot)
+ if self.repo_id:
+ s += ":%s" % self.repo_id
+ elif self.repo_id:
+ s += "::%s" % self.repo_id
+ return s
+
+ def __hash__(self):
+ return self.hash
+
+ def __iter__(self):
+ return iter(self.restrictions)
+
+ def __getitem__(self, index):
+ return self.restrictions[index]
+
+ def __cmp__(self, other):
+ if not isinstance(other, self.__class__):
+ raise TypeError("other isn't of %s type, is %s" %
+ (self.__class__, other.__class__))
+
+ c = cmp(self.category, other.category)
+ if c:
+ return c
+
+ c = cmp(self.package, other.package)
+ if c:
+ return c
+
+ c = cmp(self.op, other.op)
+ if c:
+ return c
+
+ c = cpv.ver_cmp(self.version, self.revision,
+ other.version, other.revision)
+ if c:
+ return c
+
+ c = cmp(self.blocks, other.blocks)
+ if c:
+ # invert it; cmp(True, False) == 1
+ # want non blockers then blockers.
+ return -c
+
+ c = cmp(self.negate_vers, other.negate_vers)
+ if c:
+ return c
+
+ c = cmp(self.slot, other.slot)
+ if c:
+ return c
+
+ return cmp(self.use, other.use)
+
+ def intersects(self, other):
+ """Check if a passed in atom "intersects" this restriction's atom.
+
+ Two atoms "intersect" if a package can be constructed that
+ matches both:
+ - if you query for just "dev-lang/python" it "intersects" both
+ "dev-lang/python" and ">=dev-lang/python-2.4"
+ - if you query for "=dev-lang/python-2.4" it "intersects"
+ ">=dev-lang/python-2.4" and "dev-lang/python" but not
+ "<dev-lang/python-2.3"
+
+ USE and slot deps are also taken into account.
+
+ The block/nonblock state of the atom is ignored.
+ """
+ # Our "key" (cat/pkg) must match exactly:
+ if self.key != other.key:
+ return False
+ # Slot dep only matters if we both have one. If we do they
+ # must be identical:
+ if (self.slot is not None and other.slot is not None and
+ self.slot != other.slot):
+ return False
+
+ if (self.repo_id is not None and other.repo_id is not None and
+ self.repo_id != other.repo_id):
+ return False
+
+ # Use deps are similar: if one of us forces a flag on and the
+ # other forces it off we do not intersect. If only one of us
+ # cares about a flag it is irrelevant.
+
+ # Skip the (very common) case of one of us not having use deps:
+ if self.use and other.use:
+ # Set of flags we do not have in common:
+ flags = set(self.use) ^ set(other.use)
+ for flag in flags:
+ # If this is unset and we also have the set version we fail:
+ if flag[0] == '-' and flag[1:] in flags:
+ return False
+
+ # Remaining thing to check is version restrictions. Get the
+ # ones we can check without actual version comparisons out of
+ # the way first.
+
+ # If one of us is unversioned we intersect:
+ if not self.op or not other.op:
+ return True
+
+ # If we are both "unbounded" in the same direction we intersect:
+ if (('<' in self.op and '<' in other.op) or
+ ('>' in self.op and '>' in other.op)):
+ return True
+
+ # Trick used here: just use the atoms as sufficiently
+ # package-like object to pass to these functions (all that is
+ # needed is a version and revision attr).
+
+ # If one of us is an exact match we intersect if the other matches it:
+ if self.op == '=':
+ if other.op == '=*':
+ return self.fullver.startswith(other.fullver)
+ return VersionMatch(
+ other.op, other.version, other.revision).match(self)
+ if other.op == '=':
+ if self.op == '=*':
+ return other.fullver.startswith(self.fullver)
+ return VersionMatch(
+ self.op, self.version, self.revision).match(other)
+
+ # If we are both ~ matches we match if we are identical:
+ if self.op == other.op == '~':
+ return (self.version == other.version and
+ self.revision == other.revision)
+
+ # If we are both glob matches we match if one of us matches the other.
+ if self.op == other.op == '=*':
+ return (self.fullver.startswith(other.fullver) or
+ other.fullver.startswith(self.fullver))
+
+ # If one of us is a glob match and the other a ~ we match if the glob
+ # matches the ~ (ignoring a revision on the glob):
+ if self.op == '=*' and other.op == '~':
+ return other.fullver.startswith(self.version)
+ if other.op == '=*' and self.op == '~':
+ return self.fullver.startswith(other.version)
+
+ # If we get here at least one of us is a <, <=, > or >=:
+ if self.op in ('<', '<=', '>', '>='):
+ ranged, other = self, other
+ else:
+ ranged, other = other, self
+
+ if '<' in other.op or '>' in other.op:
+ # We are both ranged, and in the opposite "direction" (or
+ # we would have matched above). We intersect if we both
+ # match the other's endpoint (just checking one endpoint
+ # is not enough, it would give a false positive on <=2 vs >2)
+ return (
+ VersionMatch(
+ other.op, other.version, other.revision).match(ranged) and
+ VersionMatch(
+ ranged.op, ranged.version, ranged.revision).match(other))
+
+ if other.op == '~':
+ # Other definitely matches its own version. If ranged also
+ # does we're done:
+ if VersionMatch(
+ ranged.op, ranged.version, ranged.revision).match(other):
+ return True
+ # The only other case where we intersect is if ranged is a
+ # > or >= on other's version and a nonzero revision. In
+ # that case other will match ranged. Be careful not to
+ # give a false positive for ~2 vs <2 here:
+ return ranged.op in ('>', '>=') and VersionMatch(
+ other.op, other.version, other.revision).match(ranged)
+
+ if other.op == '=*':
+ # The fun one, since glob matches do not correspond to a
+ # single contiguous region of versions.
+
+ # a glob match definitely matches its own version, so if
+ # ranged does too we're done:
+ if VersionMatch(
+ ranged.op, ranged.version, ranged.revision).match(other):
+ return True
+ if '<' in ranged.op:
+ # Remaining cases where this intersects: there is a
+ # package smaller than ranged.fullver and
+ # other.fullver that they both match.
+
+ # If other.revision is not None then other does not
+ # match anything smaller than its own fullver:
+ if other.revision is not None:
+ return False
+
+ # If other.revision is None then we can always
+ # construct a package smaller than other.fullver by
+ # tagging e.g. an _alpha1 on, since
+ # cat/pkg_beta2_alpha1_alpha1 is a valid version.
+ # (Yes, really. Try it if you don't believe me.)
+ # If and only if other also matches ranged then
+ # ranged will also match one of those smaller packages.
+ # XXX (I think, need to try harder to verify this.)
+ return ranged.fullver.startswith(other.version)
+ else:
+ # Remaining cases where this intersects: there is a
+ # package greater than ranged.fullver and
+ # other.fullver that they both match.
+
+ # We can always construct a package greater than
+ # other.fullver by adding a digit to it.
+ # If and only if other also matches ranged then
+ # ranged will match such a larger package
+ # XXX (I think, need to try harder to verify this.)
+ return ranged.fullver.startswith(other.version)
+
+ # Handled all possible ops.
+ raise NotImplementedError(
+ 'Someone added an op to atom without adding it to intersects')
+
+
+def split_atom(inst):
+ if len(inst.restrictions) > 3:
+ a = packages.AndRestriction(*inst.restrictions[2:])
+ elif len(inst.restrictions) == 3:
+ a = inst.restrictions[2]
+ else:
+ a = []
+ return inst.category + "/" + inst.package, a
+
+def _collapsed_restrict_match(data, pkg, mode):
+ # mode is ignored; non applicable.
+ for r in data.get(pkg.key, ()):
+ if r.match(pkg):
+ return True
+ return False
+
+def generate_collapsed_restriction(atoms, negate=False):
+ d = {}
+ for a in atoms:
+ k = a.key
+ if k not in d:
+ d[k] = [a]
+ else:
+ d[k].append(a)
+ return delegate(partial(_collapsed_restrict_match, d), negate=negate)
diff --git a/pkgcore/ebuild/atom_restricts.py b/pkgcore/ebuild/atom_restricts.py
new file mode 100644
index 0000000..c58fdcf
--- /dev/null
+++ b/pkgcore/ebuild/atom_restricts.py
@@ -0,0 +1,116 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+# "More than one statement on a single line"
+# pylint: disable-msg=C0321
+
+"""
+atom version restrict
+"""
+
+from pkgcore.restrictions import packages, restriction
+from pkgcore.ebuild import cpv, errors
+from snakeoil.klass import generic_equality
+
+# TODO: change values.EqualityMatch so it supports le, lt, gt, ge, eq,
+# ne ops, and convert this to it.
+
+class VersionMatch(restriction.base):
+
+ """
+ package restriction implementing gentoo ebuild version comparison rules
+
+ any overriding of this class *must* maintain numerical order of
+ self.vals, see intersect for reason why. vals also must be a tuple.
+ """
+
+ __slots__ = ("ver", "rev", "vals", "droprev", "negate")
+
+ __inst_caching__ = True
+ __metaclass__ = generic_equality
+ __attr_comparison__ = ('negate', 'rev', 'droprev', 'vals')
+
+ type = packages.package_type
+ attr = "fullver"
+
+ _convert_op2str = {(-1,):"<", (-1, 0): "<=", (0,):"=",
+ (0, 1):">=", (1,):">"}
+
+ _convert_str2op = dict([(v, k) for k, v in _convert_op2str.iteritems()])
+ del k, v
+
+ def __init__(self, operator, ver, rev=None, negate=False, **kwd):
+ """
+ @param operator: version comparison to do,
+ valid operators are ('<', '<=', '=', '>=', '>', '~')
+ @type operator: string
+ @param ver: version to base comparison on
+ @type ver: string
+ @param rev: revision to base comparison on
+ @type rev: None (no rev), or an int
+ @param negate: should the restriction results be negated;
+ currently forced to False
+ """
+
+ kwd["negate"] = False
+ super(self.__class__, self).__init__(**kwd)
+ sf = object.__setattr__
+ sf(self, "ver", ver)
+ sf(self, "rev", rev)
+ if operator != "~" and operator not in self._convert_str2op:
+ raise errors.InvalidVersion(self.ver, self.rev,
+ "invalid operator, '%s'" % operator)
+
+ sf(self, "negate", negate)
+ if operator == "~":
+ if ver is None:
+ raise ValueError(
+ "for ~ op, version must be something other then None")
+ sf(self, "droprev", True)
+ sf(self, "vals", (0,))
+ else:
+ sf(self, "droprev", False)
+ sf(self, "vals", self._convert_str2op[operator])
+
+ def match(self, pkginst):
+ if self.droprev:
+ r1, r2 = None, None
+ else:
+ r1, r2 = self.rev, pkginst.revision
+
+ return (cpv.ver_cmp(pkginst.version, r2, self.ver, r1) in self.vals) \
+ != self.negate
+
+ def __str__(self):
+ s = self._convert_op2str[self.vals]
+
+ if self.negate:
+ n = "not "
+ else:
+ n = ''
+
+ if self.droprev or self.rev is None:
+ return "ver %s%s %s" % (n, s, self.ver)
+ return "ver-rev %s%s %s-r%s" % (n, s, self.ver, self.rev)
+
+ @staticmethod
+ def _convert_ops(inst):
+ if inst.negate:
+ if inst.droprev:
+ return inst.vals
+ return tuple(sorted(set((-1, 0, 1)).difference(inst.vals)))
+ return inst.vals
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if isinstance(other, self.__class__):
+ if self.droprev != other.droprev or self.ver != other.ver \
+ or self.rev != other.rev:
+ return False
+ return self._convert_ops(self) == self._convert_ops(other)
+
+ return False
+
+ def __hash__(self):
+ return hash((self.droprev, self.ver, self.rev, self.negate, self.vals))
diff --git a/pkgcore/ebuild/conditionals.py b/pkgcore/ebuild/conditionals.py
new file mode 100644
index 0000000..a1dd74b
--- /dev/null
+++ b/pkgcore/ebuild/conditionals.py
@@ -0,0 +1,342 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""DepSet parsing.
+
+Turns a DepSet (depends, rdepends, SRC_URI, license, etc) into
+appropriate conditionals.
+"""
+
+# TODO: move exceptions elsewhere, bind them to a base exception for pkgcore
+
+from pkgcore.restrictions import packages, values, boolean
+from snakeoil.iterables import expandable_chain
+from snakeoil.lists import iflatten_instance
+from pkgcore.ebuild.atom import atom
+from pkgcore.ebuild.errors import ParseError
+
+try:
+ from pkgcore.ebuild._depset import parse_depset
+except ImportError:
+ parse_depset = None
+
+class DepSet(boolean.AndRestriction):
+
+ """
+ gentoo DepSet syntax parser
+ """
+
+ __slots__ = ("has_conditionals", "element_class", "_node_conds",
+ "restrictions", "_known_conditionals")
+ type = packages.package_type
+ negate = False
+
+ __inst_caching__ = False
+ parse_depset = parse_depset
+ if parse_depset is not None:
+ parse_depset = staticmethod(parse_depset)
+
+ def __init__(self, dep_str, element_class, \
+ operators=None,
+ element_func=None):
+
+ """
+ @param dep_str: string abiding by DepSet syntax
+ @param operators: mapping of node -> callable for special operators
+ in DepSet syntax
+ @param element_func: if None, element_class is used for generating
+ elements, else it's used to generate elements.
+ Mainly useful for when you need to curry a few args for instance
+ generation, since element_class _must_ be a class
+ @param element_class: class of generated elements
+ """
+
+ if not isinstance(element_class, type):
+ # yes, this blocks non new style classes. touch cookies.
+ raise ValueError("element_class must be a new style class")
+
+ sf = object.__setattr__
+ sf(self, "_known_conditionals", None)
+ sf(self, "element_class", element_class)
+ if element_func is None:
+ element_func = element_class
+
+ if self.parse_depset is not None:
+ restrictions = None
+ if operators is None:
+ has_conditionals, restrictions = self.parse_depset(dep_str,
+ element_func, boolean.AndRestriction,
+ boolean.OrRestriction)
+ else:
+ for x in operators:
+ if x not in ("", "||"):
+ break
+ else:
+ has_conditionals, restrictions = self.parse_depset(dep_str,
+ element_func, operators.get(""), operators.get("||"))
+
+ if restrictions is not None:
+ sf(self, "_node_conds", has_conditionals)
+ sf(self, "restrictions", restrictions)
+ return
+
+ sf(self, "restrictions", [])
+ if operators is None:
+ operators = {"||":boolean.OrRestriction, "":boolean.AndRestriction}
+
+ raw_conditionals = []
+ depsets = [self.restrictions]
+
+ node_conds = False
+ words = iter(dep_str.split())
+ k = None
+ try:
+ for k in words:
+ if ")" in k:
+ if ")" != k:
+ raise ParseError(dep_str, k)
+ # no elements == error. if closures don't map up,
+ # indexerror would be chucked from trying to pop
+ # the frame so that is addressed.
+ if not depsets[-1]:
+ raise ParseError(dep_str)
+ elif raw_conditionals[-1].endswith('?'):
+ node_conds = True
+ c = raw_conditionals[-1]
+ if c[0] == "!":
+ c = values.ContainmentMatch(c[1:-1], negate=True)
+ else:
+ c = values.ContainmentMatch(c[:-1])
+
+ depsets[-2].append(
+ packages.Conditional("use", c, tuple(depsets[-1])))
+
+ else:
+ if len(depsets[-1]) == 1:
+ depsets[-2].append(depsets[-1][0])
+ elif raw_conditionals[-1] == '' and (len(raw_conditionals) == 1 or ('' == raw_conditionals[-2])):
+ # if the frame is an and and the parent is an and, collapse it in.
+ depsets[-2].extend(depsets[-1])
+ else:
+ depsets[-2].append(
+ operators[raw_conditionals[-1]](finalize=True,
+ *depsets[-1]))
+
+ raw_conditionals.pop()
+ depsets.pop()
+
+ elif "(" in k:
+ if k != "(":
+ raise ParseError(dep_str, k)
+
+ k = ''
+ # push another frame on
+ depsets.append([])
+ raw_conditionals.append(k)
+
+ elif k[-1] == '?' or k in operators:
+ # use conditional or custom op.
+ # no tokens left == bad dep_str.
+ k2 = words.next()
+
+ if k2 != "(":
+ raise ParseError(dep_str, k2)
+
+ # push another frame on
+ depsets.append([])
+ raw_conditionals.append(k)
+
+ elif "|" in k:
+ raise ParseError(dep_str, k)
+ else:
+ # node/element.
+ depsets[-1].append(element_func(k))
+
+
+ except (RuntimeError, SystemExit, KeyboardInterrupt):
+ raise
+ except IndexError:
+ # [][-1] for a frame access, which means it was a parse error.
+ raise
+ except StopIteration:
+ if k is None:
+ raise
+ raise ParseError(dep_str, k)
+ except Exception, e:
+ raise ParseError(dep_str, e)
+
+ # check if any closures required
+ if len(depsets) != 1:
+ raise ParseError(dep_str)
+
+ sf(self, "_node_conds", node_conds)
+ sf(self, "restrictions", tuple(self.restrictions))
+
+
+ def evaluate_depset(self, cond_dict, tristate_filter=None):
+ """
+ @param cond_dict: container to be used for conditional collapsing,
+ typically is a use list
+ @param tristate_filter: a control; if specified, must be a container
+ of conditionals to lock to cond_dict.
+ during processing, if it's not in tristate_filter will
+ automatically enable the payload
+ (regardless of the conditionals negation)
+ """
+
+ if not self.has_conditionals:
+ return self
+
+ flat_deps = self.__class__("", self.element_class)
+
+ stack = [boolean.AndRestriction, iter(self.restrictions)]
+ base_restrict = []
+ restricts = [base_restrict]
+ count = 1
+ while count:
+ for node in stack[-1]:
+ if isinstance(node, self.element_class):
+ restricts[-1].append(node)
+ continue
+ if isinstance(node, packages.Conditional):
+ if not node.payload:
+ continue
+ elif tristate_filter is not None:
+ assert len(node.restriction.vals) == 1
+ val = list(node.restriction.vals)[0]
+ if val in tristate_filter:
+ # if val is forced true, but the check is
+ # negation ignore it
+ # if !mips != mips
+ if (val in cond_dict) == node.restriction.negate:
+ continue
+ elif not node.restriction.match(cond_dict):
+ continue
+ if not isinstance(node.payload, tuple):
+ stack += [boolean.AndRestriction, iter((node.payload))]
+ else:
+ stack += [boolean.AndRestriction, iter(node.payload)]
+ else:
+ stack += [node.__class__,
+ iter(node.restrictions)]
+ count += 1
+ restricts.append([])
+ break
+ else:
+ stack.pop()
+ l = len(restricts)
+ if l != 1:
+ if restricts[-1]:
+ # optimization to avoid uneccessary frames.
+ if len(restricts[-1]) == 1:
+ restricts[-2].append(restricts[-1][0])
+ elif stack[-1] is stack[-3] is boolean.AndRestriction:
+ restricts[-2].extend(restricts[-1])
+ else:
+ restricts[-2].append(stack[-1](*restricts[-1]))
+ stack.pop()
+ count -= 1
+ restricts.pop()
+
+ object.__setattr__(flat_deps, "restrictions", tuple(base_restrict))
+ return flat_deps
+
+ @staticmethod
+ def find_cond_nodes(restriction_set, yield_non_conditionals=False):
+ conditions_stack = []
+ new_set = expandable_chain(restriction_set)
+ for cur_node in new_set:
+ if isinstance(cur_node, packages.Conditional):
+ conditions_stack.append(cur_node.restriction)
+ new_set.appendleft(list(cur_node.payload) + [None])
+ elif (isinstance(cur_node, boolean.base)
+ and not isinstance(cur_node, atom)):
+ new_set.appendleft(cur_node.restrictions)
+ elif cur_node is None:
+ conditions_stack.pop()
+ elif conditions_stack or yield_non_conditionals: # leaf
+ yield (cur_node, conditions_stack[:])
+
+ @property
+ def node_conds(self):
+ if self._node_conds is False:
+ object.__setattr__(self, "_node_conds", {})
+ elif self._node_conds is True:
+ nc = {}
+
+ always_required = set()
+
+ for payload, restrictions in self.find_cond_nodes(
+ self.restrictions, True):
+ if not restrictions:
+ always_required.add(payload)
+ else:
+ if len(restrictions) == 1:
+ current = restrictions[0]
+ else:
+ current = values.AndRestriction(finalize=True,
+ *restrictions)
+
+ nc.setdefault(payload, []).append(current)
+
+ for k in always_required:
+ if k in nc:
+ del nc[k]
+ for k in nc:
+ nc[k] = tuple(nc[k])
+
+ object.__setattr__(self, "_node_conds", nc)
+
+ return self._node_conds
+
+ @property
+ def has_conditionals(self):
+ return bool(self._node_conds)
+
+ @property
+ def known_conditionals(self):
+ if self._node_conds is False:
+ return frozenset()
+ if self._known_conditionals is None:
+ kc = set()
+ for payload, restrictions in self.find_cond_nodes(
+ self.restrictions):
+ kc.update(iflatten_instance(x.vals for x in restrictions))
+ kc = frozenset(kc)
+ object.__setattr__(self, "_known_conditionals", kc)
+ return kc
+ return self._known_conditionals
+
+ def match(self, *a):
+ raise NotImplementedError
+
+ force_False = force_True = match
+
+ def __str__(self):
+ return ' '.join(stringify_boolean(x) for x in self.restrictions)
+
+ def __iter__(self):
+ return iter(self.restrictions)
+
+ def __getitem__(self, key):
+ return self.restrictions[key]
+
+
+def stringify_boolean(node, func=str):
+ """func is used to stringify the actual content. Useful for fetchables."""
+ if isinstance(node, boolean.OrRestriction):
+ return "|| ( %s )" % " ".join(stringify_boolean(x, func)
+ for x in node.restrictions)
+ elif isinstance(node, DepSet):
+ return ' '.join(stringify_boolean(x, func) for x in node.restrictions)
+ elif isinstance(node, boolean.AndRestriction) and \
+ not isinstance(node, atom):
+ return "( %s )" % " ".join(stringify_boolean(x, func)
+ for x in node.restrictions)
+ elif isinstance(node, packages.Conditional):
+ assert len(node.restriction.vals) == 1
+ return "%s%s? ( %s )" % (
+ node.restriction.negate and "!" or "",
+ list(node.restriction.vals)[0],
+ " ".join(stringify_boolean(x, func) for x in node.payload))
+ return func(node)
diff --git a/pkgcore/ebuild/const.py b/pkgcore/ebuild/const.py
new file mode 100644
index 0000000..8815d79
--- /dev/null
+++ b/pkgcore/ebuild/const.py
@@ -0,0 +1,22 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+
+"""
+ebuild internal constants
+"""
+
+eapi_capable = (0, 1)
+unknown_eapi = 2
+
+incrementals = (
+ "USE", "FEATURES", "ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
+ "CONFIG_PROTECT_MASK", "CONFIG_PROTECT", "PRELINK_PATH",
+ "PRELINK_PATH_MASK")
+
+metadata_keys = (
+ 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI', 'RESTRICT', 'HOMEPAGE', 'LICENSE',
+ 'DESCRIPTION', 'KEYWORDS', 'INHERITED', 'IUSE', 'PDEPEND', 'PROVIDE',
+ 'EAPI', '_mtime_', '_eclasses_')
+
+ACCEPT_LICENSE = ()
diff --git a/pkgcore/ebuild/cpv.py b/pkgcore/ebuild/cpv.py
new file mode 100644
index 0000000..a3c95ba
--- /dev/null
+++ b/pkgcore/ebuild/cpv.py
@@ -0,0 +1,311 @@
+# Copyright: 2005 Jason Stubbs <jstubbs@gentoo.org>
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+
+"""gentoo ebuild specific base package class"""
+
+from pkgcore.ebuild.errors import InvalidCPV
+
+from pkgcore.package import base
+# do this to break the cycle.
+from snakeoil.demandload import demandload, demand_compile_regexp
+demandload(globals(), "pkgcore.ebuild:atom")
+
+suffix_regexp = demand_compile_regexp(
+ globals(), 'suffix_regexp', '^(alpha|beta|rc|pre|p)(\\d*)$')
+suffix_value = {"pre": -2, "p": 1, "alpha": -4, "beta": -3, "rc": -1}
+
+# while the package section looks fugly, there is a reason for it-
+# to prevent version chunks from showing up in the package
+
+valid_cat = "[a-zA-Z0-9][-a-zA-Z0-9+._]*"
+parser = demand_compile_regexp(
+ globals(), 'parser',
+ "^(?P<key>(?P<category>(?:%s)(?:/%s)*)/"
+ "(?P<package>[a-zA-Z0-9+][a-zA-Z0-9_+]*"
+ "(?:-(?:[0-9]+[a-zA-Z+]{2,}[_+a-zA-Z0-9]*|[a-zA-Z+][a-zA-Z0-9+_]*))*))"
+ "(?:-(?P<fullver>(?P<version>(?:cvs\\.)?(?:\\d+)(?:\\.\\d+)*[a-z]?"
+ "(?:_(p(?:re)?|beta|alpha|rc)\\d*)*)"
+ "(?:-r(?P<revision>\\d+))?))?$" %
+ (valid_cat, valid_cat))
+
+
+class native_CPV(object):
+
+ """
+ base ebuild package class
+
+ @ivar category: str category
+ @ivar package: str package
+ @ivar key: strkey (cat/pkg)
+ @ivar version: str version
+ @ivar revision: int revision
+ @ivar versioned_atom: atom matching this exact version
+ @ivar unversioned_atom: atom matching all versions of this package
+ @cvar _get_attr: mapping of attr:callable to generate attributes on the fly
+ """
+
+ __slots__ = ("__weakref__", "cpvstr", "key", "category", "package",
+ "version", "revision", "fullver")
+
+ # if native is being used, forget trying to reuse strings.
+ def __init__(self, *a):
+ """
+ Can be called with one string or with three string args.
+
+ If called with one arg that is the cpv string. (See L{parser}
+ for allowed syntax).
+
+ If called with three args they are the category, package and
+ version components of the cpv string respectively.
+ """
+ l = len(a)
+ if l == 1:
+ cpvstr = a[0]
+ elif l == 3:
+ for x in a:
+ if not isinstance(x, basestring):
+ raise TypeError("all args must be strings, got %r" % (a,))
+ cpvstr = "%s/%s-%s" % a
+ else:
+ raise TypeError("CPV takes 1 arg (cpvstr), or 3 (cat, pkg, ver):"
+ " got %r" % (a,))
+ if not isinstance(cpvstr, basestring):
+ raise TypeError(self.cpvstr)
+ m = parser.match(cpvstr)
+ if not m:
+ raise InvalidCPV(cpvstr)
+ object.__setattr__(self, "cpvstr", cpvstr)
+ for k, v in m.groupdict().iteritems():
+ object.__setattr__(self, k, v)
+ r = self.revision
+ if r is not None:
+ object.__setattr__(self, "revision", int(r))
+
+ def __hash__(self):
+ return hash(self.cpvstr)
+
+ def __repr__(self):
+ return '<%s cpvstr=%s @%#8x>' % (
+ self.__class__.__name__, getattr(self, 'cpvstr', None), id(self))
+
+ def __str__(self):
+ return getattr(self, 'cpvstr', 'None')
+
+ def __cmp__(self, other):
+ try:
+ if self.cpvstr == other.cpvstr:
+ return 0
+
+ if (self.category and other.category and
+ self.category != other.category):
+ return cmp(self.category, other.category)
+
+ if self.package and other.package and self.package != other.package:
+ return cmp(self.package, other.package)
+
+ # note I chucked out valueerror, none checks on versions
+ # passed in. I suck, I know.
+ # ~harring
+ # fails in doing comparison of unversioned atoms against
+ # versioned atoms
+ return native_ver_cmp(self.version, self.revision, other.version,
+ other.revision)
+ except AttributeError:
+ return 1
+
+
+def native_ver_cmp(ver1, rev1, ver2, rev2):
+
+ # If the versions are the same, comparing revisions will suffice.
+ if ver1 == ver2:
+ return cmp(rev1, rev2)
+
+ # Split up the versions into dotted strings and lists of suffixes.
+ parts1 = ver1.split("_")
+ parts2 = ver2.split("_")
+
+ # If the dotted strings are equal, we can skip doing a detailed comparison.
+ if parts1[0] != parts2[0]:
+
+ # First split up the dotted strings into their components.
+ ver_parts1 = parts1[0].split(".")
+ ver_parts2 = parts2[0].split(".")
+
+ # And check if CVS ebuilds come into play. If there is only
+ # one it wins by default. Otherwise any CVS component can
+ # be ignored.
+ if ver_parts1[0] == "cvs" and ver_parts2[0] != "cvs":
+ return 1
+ elif ver_parts1[0] != "cvs" and ver_parts2[0] == "cvs":
+ return -1
+ elif ver_parts1[0] == "cvs":
+ del ver_parts1[0]
+ del ver_parts2[0]
+
+ # Pull out any letter suffix on the final components and keep
+ # them for later.
+ letters = []
+ for ver_parts in (ver_parts1, ver_parts2):
+ if ver_parts[-1][-1].isalpha():
+ letters.append(ord(ver_parts[-1][-1]))
+ ver_parts[-1] = ver_parts[-1][:-1]
+ else:
+ # Using -1 simplifies comparisons later
+ letters.append(-1)
+
+ # OPT: Pull length calculation out of the loop
+ ver_parts1_len = len(ver_parts1)
+ ver_parts2_len = len(ver_parts2)
+ len_list = (ver_parts1_len, ver_parts2_len)
+
+ # Iterate through the components
+ for x in xrange(max(len_list)):
+
+ # If we've run out components, we can figure out who wins
+ # now. If the version that ran out of components has a
+ # letter suffix, it wins. Otherwise, the other version wins.
+ if x in len_list:
+ if x == ver_parts1_len:
+ return cmp(letters[0], 0)
+ else:
+ return cmp(0, letters[1])
+
+ # If the string components are equal, the numerical
+ # components will be equal too.
+ if ver_parts1[x] == ver_parts2[x]:
+ continue
+
+ # If one of the components begins with a "0" then they
+ # are compared as floats so that 1.1 > 1.02.
+ if ver_parts1[x][0] == "0" or ver_parts2[x][0] == "0":
+ v1 = ver_parts1[x]
+ v2 = ver_parts2[x]
+ else:
+ v1 = int(ver_parts1[x])
+ v2 = int(ver_parts2[x])
+
+ # If they are not equal, the higher value wins.
+ c = cmp(v1, v2)
+ if c:
+ return c
+
+ # The dotted components were equal. Let's compare any single
+ # letter suffixes.
+ if letters[0] != letters[1]:
+ return cmp(letters[0], letters[1])
+
+ # The dotted components were equal, so remove them from our lists
+ # leaving only suffixes.
+ del parts1[0]
+ del parts2[0]
+
+ # OPT: Pull length calculation out of the loop
+ parts1_len = len(parts1)
+ parts2_len = len(parts2)
+
+ # Iterate through the suffixes
+ for x in xrange(max(parts1_len, parts2_len)):
+
+ # If we're at the end of one of our lists, we need to use
+ # the next suffix from the other list to decide who wins.
+ if x == parts1_len:
+ match = suffix_regexp.match(parts2[x])
+ val = suffix_value[match.group(1)]
+ if val:
+ return cmp(0, val)
+ return cmp(0, int("0"+match.group(2)))
+ if x == parts2_len:
+ match = suffix_regexp.match(parts1[x])
+ val = suffix_value[match.group(1)]
+ if val:
+ return cmp(val, 0)
+ return cmp(int("0"+match.group(2)), 0)
+
+ # If the string values are equal, no need to parse them.
+ # Continue on to the next.
+ if parts1[x] == parts2[x]:
+ continue
+
+ # Match against our regular expression to make a split between
+ # "beta" and "1" in "beta1"
+ match1 = suffix_regexp.match(parts1[x])
+ match2 = suffix_regexp.match(parts2[x])
+
+ # If our int'ified suffix names are different, use that as the basis
+ # for comparison.
+ c = cmp(suffix_value[match1.group(1)], suffix_value[match2.group(1)])
+ if c:
+ return c
+
+ # Otherwise use the digit as the basis for comparison.
+ c = cmp(int("0"+match1.group(2)), int("0"+match2.group(2)))
+ if c:
+ return c
+
+ # Our versions had different strings but ended up being equal.
+ # The revision holds the final difference.
+ return cmp(rev1, rev2)
+
+fake_cat = "fake"
+fake_pkg = "pkg"
+def cpy_ver_cmp(ver1, rev1, ver2, rev2):
+ if ver1 == ver2:
+ return cmp(rev1, rev2)
+ if ver1 is None:
+ ver1 = ''
+ if ver2 is None:
+ ver2 = ''
+ c = cmp(cpy_CPV(fake_cat, fake_pkg, ver1),
+ cpy_CPV(fake_cat, fake_pkg, ver2))
+ if c != 0:
+ return c
+ return cmp(rev1, rev2)
+
+
+try:
+ # No name in module
+ # pylint: disable-msg=E0611
+ from pkgcore.ebuild._cpv import CPV as cpy_CPV
+ base_CPV = cpy_CPV
+ ver_cmp = cpy_ver_cmp
+ cpy_builtin = True
+except ImportError:
+ base_CPV = native_CPV
+ ver_cmp = native_ver_cmp
+ cpy_builtin = False
+
+
+class CPV(base.base, base_CPV):
+
+ """
+ base ebuild package class
+
+ @ivar category: str category
+ @ivar package: str package
+ @ivar key: strkey (cat/pkg)
+ @ivar version: str version
+ @ivar revision: int revision
+ @ivar versioned_atom: atom matching this exact version
+ @ivar unversioned_atom: atom matching all versions of this package
+ @cvar _get_attr: mapping of attr:callable to generate attributes on the fly
+ """
+
+ __slots__ = ()
+
+# __metaclass__ = WeakInstMeta
+
+# __inst_caching__ = True
+
+ def __repr__(self):
+ return '<%s cpvstr=%s @%#8x>' % (
+ self.__class__.__name__, self.cpvstr, id(self))
+
+ @property
+ def versioned_atom(self):
+ return atom.atom("=%s" % self.cpvstr)
+
+ @property
+ def unversioned_atom(self):
+ return atom.atom(self.key)
diff --git a/pkgcore/ebuild/digest.py b/pkgcore/ebuild/digest.py
new file mode 100644
index 0000000..22a995f
--- /dev/null
+++ b/pkgcore/ebuild/digest.py
@@ -0,0 +1,228 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+ebuild tree manifest/digest support
+"""
+
+from itertools import izip
+from os.path import basename, dirname, sep
+
+from pkgcore.chksum import errors, gpg, get_handler
+from pkgcore.fs.livefs import iter_scan
+from pkgcore.fs.fs import fsFile, fsDir
+
+from snakeoil.obj import make_SlottedDict_kls
+from snakeoil.compatibility import any
+from snakeoil.demandload import demandload
+demandload(globals(),
+ "pkgcore:fetch",
+ "snakeoil.lists:iflatten_instance",
+)
+
+def parse_digest(source, throw_errors=True):
+ d = {}
+ chf_keys = set(["size"])
+ try:
+ f = None
+ try:
+ if isinstance(source, basestring):
+ f = open(source, "r", 32768)
+ else:
+ f = source.get_fileobj()
+ for line in f:
+ l = line.split()
+ if not l:
+ continue
+ if len(l) != 4:
+ if throw_errors:
+ raise errors.ParseChksumError(
+ source, "line count was not 4, was %i: '%s'" % (
+ len(l), line))
+ continue
+ chf = l[0].lower()
+ #MD5 c08f3a71a51fff523d2cfa00f14fa939 diffball-0.6.2.tar.bz2 305567
+ d2 = d.get(l[2])
+ if d2 is None:
+ d[l[2]] = {chf:long(l[1], 16), "size":long(l[3])}
+ else:
+ d2[chf] = long(l[1], 16)
+ chf_keys.add(chf)
+ except (OSError, IOError), e:
+ raise errors.MissingChksum(source)
+ except TypeError, e:
+ raise errors.ParseChksumError("%r" % source, e)
+ finally:
+ if f is not None and f.close:
+ f.close()
+
+ kls = make_SlottedDict_kls(chf_keys)
+ for k, v in d.items():
+ d[k] = kls(v.iteritems())
+ return d
+
+def serialize_digest(handle, fetchables):
+ """
+ write out a digest entry for a fetchable
+
+ throws KeyError if needed chksums are missing. Requires at least md5
+ and size chksums per fetchable.
+
+ @param handle: file object to write to
+ @param fetchables: list of L{pkgcore.fetch.fetchable} instances
+ """
+ for fetchable in iflatten_instance(fetchables, fetch.fetchable):
+ d = dict(fetchable.chksums)
+ size = d.pop("size")
+ try:
+ md5 = d.pop("md5")
+ handle.write("MD5 %s %s %i\n" % (get_handler('md5').long2str(md5), fetchable.filename, size))
+ except KeyError:
+ pass
+ for chf, sum in d.iteritems():
+ handle.write("%s %s %s %i\n" % (chf.upper(), get_handler(chf).long2str(sum),
+ fetchable.filename, size))
+
+def serialize_manifest(pkgdir, fetchables):
+ """
+ Write a manifest given a pkg_instance
+
+ @param
+ @param
+ """
+ handle = open(pkgdir + '/Manifest', 'w')
+ for file in [x for x in iter_scan(pkgdir) if isinstance(x, fsFile)]:
+ excludes=set(["CVS", ".svn", "Manifest"])
+ if any(True for x in file.location.split(sep) if x in excludes):
+ continue
+ type = 'misc'
+ if 'files' in dirname(file.location):
+ type = 'aux'
+ elif basename(file.location)[-7:] == '.ebuild':
+ type = 'ebuild'
+ _write_manifest(handle, type, basename(file.location), dict(file.chksums))
+ type = 'dist'
+ for fetchable in iflatten_instance(fetchables, fetch.fetchable):
+ _write_manifest(handle, type, basename(fetchable.filename), dict(fetchable.chksums))
+
+def _write_manifest(handle, type, filename, chksums):
+ """Convenient, internal method for writing manifests"""
+ size = chksums.pop("size")
+ handle.write("%s %s %i" % (type.upper(), filename, size))
+ for chf, sum in chksums.iteritems():
+ handle.write(" %s %s" %(chf.upper(), get_handler(chf).long2str(sum)))
+ handle.write('\n')
+
+def convert_chksums(iterable):
+ for chf, sum in iterable:
+ chf = chf.lower()
+ if chf == 'size':
+ # explicit size entries are stupid, format has implicit size
+ continue
+ else:
+ yield chf, long(sum, 16)
+
+
+def parse_manifest(source, throw_errors=True, ignore_gpg=True,
+ kls_override=None):
+ d = {}
+ dist, aux, ebuild, misc = {}, {}, {}, {}
+ types = (("DIST", dist), ("AUX", aux), ("EBUILD", ebuild), ("MISC", misc))
+ files = {}
+ # type format (see glep 44 for exact rules)
+ # TYPE filename size (CHF sum)+
+ # example 'type' entry, all one line
+ #MISC metadata.xml 219 RMD160 613195ece366b33606e71ff1753be048f2507841 SHA1 d162fb909241ef50b95a3539bdfcde95429bdf81 SHA256 cbd3a20e5c89a48a842f7132fe705bf39959f02c1025052efce8aad8a8baa8dc
+ # old style manifest
+ # CHF sum filename size
+ chf_types = set(["size"])
+ manifest_type = 1
+ try:
+ f = None
+ try:
+ if isinstance(source, basestring):
+ i = f = open(source, "r", 32768)
+ else:
+ i = f = source.get_fileobj()
+ if ignore_gpg:
+ i = gpg.skip_signatures(f)
+ for data in i:
+ line = data.split()
+ if not line:
+ continue
+ for t, d in types:
+ if line[0] != t:
+ continue
+ if len(line) % 2 != 1:
+ if throw_errors:
+ raise errors.ParseChksumError(source,
+ "manifest 2 entry doesn't have right "
+ "number of tokens, %i: %r" %
+ (len(line), line))
+ else:
+ chf_types.update(line[3::2])
+ # this is a trick to do pairwise collapsing;
+ # [size, 1] becomes [(size, 1)]
+ i = iter(line[3:])
+ d[line[1]] = [("size", long(line[2]))] + \
+ list(convert_chksums(izip(i, i)))
+ manifest_type = 2
+ break
+ else:
+ if len(line) != 4:
+ if throw_errors:
+ raise errors.ParseChksumError(source,
+ "line count was not 4, was %i: %r" %
+ (len(line), line))
+ continue
+ chf_types.add(line[0])
+ files.setdefault(line[2], []).append(
+ [long(line[3]), line[0].lower(), long(line[1], 16)])
+
+ except (OSError, IOError, TypeError), e:
+ raise errors.ParseChksumError("failed parsing %r" % source, e)
+ finally:
+ if f is not None and f.close:
+ f.close()
+
+ # collapse files into 4 types, convert to lower mem dicts
+ # doesn't handle files sublists correctly yet
+ for fname, data in files.iteritems():
+ for t, d in types:
+ existing = d.get(fname)
+ if existing is None:
+ continue
+ break
+ else:
+ # work around portage_manifest sucking and not
+ # specifying all files in the manifest.
+ if fname.endswith(".ebuild"):
+ existing = ebuild.setdefault(fname, [])
+ else:
+ existing = misc.setdefault(fname, [])
+
+ for chksum in data:
+ if existing:
+ if existing[0][1] != chksum[0]:
+ if throw_errors:
+ raise errors.ParseChksumError(source,
+ "size collision for file %s" % fname)
+ else:
+ existing.append(chksum[1:])
+ else:
+ existing.append(("size", chksum[0]))
+ existing.append(chksum[1:])
+
+ del files
+
+ # finally convert it to slotted dict for memory savings.
+ kls = make_SlottedDict_kls(x.lower() for x in chf_types)
+ ret = []
+ for t, d in types:
+ if kls_override is None:
+ for k, v in d.items():
+ d[k] = kls(v)
+ else:
+ d = kls_override((k, kls(v)) for k, v in d.iteritems())
+ ret.append(d)
+ return ret, manifest_type
diff --git a/pkgcore/ebuild/domain.py b/pkgcore/ebuild/domain.py
new file mode 100644
index 0000000..7747bfd
--- /dev/null
+++ b/pkgcore/ebuild/domain.py
@@ -0,0 +1,436 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+gentoo configuration domain
+"""
+
+# XXX doc this up better...
+
+from itertools import izip
+from os.path import isfile
+
+import pkgcore.config.domain
+from pkgcore.config import ConfigHint
+from pkgcore.restrictions.delegated import delegate
+from pkgcore.restrictions import packages, values
+from pkgcore.ebuild.atom import generate_collapsed_restriction
+from pkgcore.repository import multiplex, visibility
+from pkgcore.interfaces.data_source import local_source
+from pkgcore.config.errors import BaseException
+from pkgcore.ebuild import const
+from pkgcore.ebuild.profiles import incremental_expansion
+from pkgcore.ebuild.misc import (collapsed_restrict_to_data,
+ non_incremental_collapsed_restrict_to_data)
+from pkgcore.util.parserestrict import parse_match
+
+from snakeoil.lists import stable_unique, unstable_unique
+from snakeoil.compatibility import any
+from snakeoil.mappings import ProtectedDict
+from snakeoil.fileutils import iter_read_bash
+from snakeoil.currying import partial
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'errno',
+ 'pkgcore.fs.livefs:iter_scan',
+ 'pkgcore.fs.fs:fsFile',
+)
+
+class MissingFile(BaseException):
+ def __init__(self, filename, setting):
+ BaseException.__init__(self,
+ "setting %s points at %s, which doesn't exist."
+ % (setting, filename))
+ self.file, self.setting = filename, setting
+
+class Failure(BaseException):
+ def __init__(self, text):
+ BaseException.__init__(self, "domain failure: %s" % (text,))
+ self.text = text
+
+
+def package_keywords_splitter(val):
+ v = val.split()
+ return parse_match(v[0]), stable_unique(v[1:])
+
+
+# ow ow ow ow ow ow....
+# this manages a *lot* of crap. so... this is fun.
+#
+# note also, that this is rather ebuild centric. it shouldn't be, and
+# should be redesigned to be a seperation of configuration
+# instantiation manglers, and then the ebuild specific chunk (which is
+# selected by config)
+# ~harring
+
+
+def generate_masking_restrict(masks):
+ # if it's masked, it's not a match
+ return generate_collapsed_restriction(masks, negate=True)
+
+def generate_unmasking_restrict(unmasks):
+ return generate_collapsed_restriction(unmasks)
+
+
+class domain(pkgcore.config.domain.domain):
+
+ # XXX ouch, verify this crap and add defaults and stuff
+ _types = {
+ 'profile': 'ref:profile', 'fetcher': 'ref:fetcher',
+ 'repositories': 'lazy_refs:repo', 'vdb': 'lazy_refs:repo',
+ 'name': 'str',
+ }
+ for _thing in list(const.incrementals) + ['bashrc']:
+ _types[_thing] = 'list'
+ for _thing in [
+ 'package.mask', 'package.keywords', 'package.license', 'package.use',
+ 'package.unmask']:
+ _types[_thing] = 'list'
+ for _thing in ['root', 'CHOST', 'CFLAGS', 'PATH', 'PORTAGE_TMPDIR',
+ 'DISTCC_PATH', 'DISTCC_DIR', 'CCACHE_DIR']:
+ _types[_thing] = 'str'
+
+ # TODO this is missing defaults
+ pkgcore_config_type = ConfigHint(
+ _types, typename='domain',
+ required=['repositories', 'profile', 'vdb', 'fetcher', 'name'],
+ allow_unknowns=True)
+
+ del _types, _thing
+
+ def __init__(self, profile, repositories, vdb, name=None,
+ root='/', incrementals=const.incrementals, **settings):
+ # voodoo, unfortunately (so it goes)
+ # break this up into chunks once it's stabilized (most of code
+ # here has already, but still more to add)
+ settings.setdefault('ACCEPT_LICENSE', const.ACCEPT_LICENSE)
+
+ # map out sectionname -> config manager immediately.
+ repositories_collapsed = [r.collapse() for r in repositories]
+ repositories = [r.instantiate() for r in repositories_collapsed]
+ vdb_collapsed = [r.collapse() for r in vdb]
+ vdb = [r.instantiate() for r in vdb_collapsed]
+ self.named_repos = dict(
+ (collapsed.name, repo) for (collapsed, repo) in izip(
+ repositories_collapsed, repositories))
+ self.named_repos.update(
+ (collapsed.name, repo) for (collapsed, repo) in izip(
+ vdb_collapsed, vdb))
+ self.named_repos.pop(None, None)
+
+ pkg_maskers = set(profile.masks)
+ for r in repositories:
+ pkg_maskers.update(r.default_visibility_limiters)
+ pkg_maskers = list(pkg_maskers)
+ pkg_unmaskers, pkg_keywords, pkg_license = [], [], []
+ pkg_use = []
+
+ for key, val, action in (
+ ("package.mask", pkg_maskers, parse_match),
+ ("package.unmask", pkg_unmaskers, parse_match),
+ ("package.keywords", pkg_keywords, package_keywords_splitter),
+ ("package.license", pkg_license, package_keywords_splitter),
+ ("package.use", pkg_use, package_keywords_splitter)):
+
+ for fp in settings.pop(key, ()):
+ try:
+ if isfile(fp):
+ val.extend(action(x) for x in iter_read_bash(fp))
+ else:
+ # Ok, so it might not be a dir, but iter_scan'ing it
+ # means we get a nice exception w/o having to set it
+ # ourselves.
+ for file in iter_scan(fp):
+ if (not isinstance(file, fsFile) or
+ any(True for thing in file.location.split('/')
+ if thing.startswith('.'))):
+ continue
+ val.extend(action(x) for x in iter_read_bash(file.location))
+ except (IOError, OSError), e:
+ if e.errno == errno.ENOENT:
+ raise MissingFile(settings[key], key)
+ raise Failure("failed reading '%s': %s" % (fp, e))
+ except ValueError, e:
+ raise Failure("failed reading '%s': %s" % (fp, e))
+
+ self.name = name
+ settings.setdefault("PKGCORE_DOMAIN", name)
+ for x in incrementals:
+ if isinstance(settings.get(x), basestring):
+ settings[x] = set(settings[x].split())
+
+ for x, v in profile.default_env.iteritems():
+ if x in settings:
+ if x in incrementals:
+ if isinstance(v, basestring):
+ v = set(v.split())
+ else:
+ v = set(v)
+ incremental_expansion(v, settings[x])
+ settings[x] = v
+ else:
+ if x in incrementals:
+ if isinstance(v, basestring):
+ v = set(v.split())
+ settings[x] = v
+ else:
+ settings[x] = v
+
+ # use is collapsed; now stack use_expand.
+ use = settings.setdefault("USE", set())
+
+ # hackish implementation; if test is on, flip on the flag
+ if "test" in settings.get("FEATURES", ()):
+ use.add("test")
+
+ self.use_expand = frozenset(profile.use_expand)
+ self.use_expand_hidden = frozenset(profile.use_expand_hidden)
+ for u in profile.use_expand:
+ v = settings.get(u)
+ if v is None:
+ continue
+ u2 = u.lower()+"_"
+ use.update(u2 + x for x in settings[u].split())
+
+ # visibility mask...
+ # if ((package.mask or visibility) and not package.unmask)
+ # or not (package.keywords or accept_keywords)
+
+ vfilter = packages.AndRestriction(finalize=False,
+ disable_inst_caching=False)
+ r = None
+ if pkg_maskers:
+ r = generate_masking_restrict(pkg_maskers)
+ if pkg_unmaskers:
+ if r is None:
+ # unmasking without masking... 'k (wtf?)
+ r = generate_unmasking_restrict(pkg_unmaskers)
+ else:
+ r = packages.OrRestriction(
+ r, generate_unmasking_restrict(pkg_unmaskers),
+ disable_inst_caching=True)
+ if r:
+ vfilter.add_restriction(r)
+ del pkg_unmaskers, pkg_maskers
+
+ license, default_keywords = [], []
+ master_license = []
+ for k, v in (("ACCEPT_KEYWORDS", default_keywords),
+ ("ACCEPT_LICENSE", master_license)):
+ if k not in settings:
+ raise Failure("No %s setting detected from profile, "
+ "or user config" % k)
+ s = set()
+ incremental_expansion(s, settings[k], "while expanding %s: " % k)
+ v.extend(s)
+ settings[k] = v
+
+
+ self.use = use
+
+ if "ARCH" not in settings:
+ raise Failure(
+ "No ARCH setting detected from profile, or user config")
+
+ self.arch = settings["ARCH"]
+
+ # ~amd64 -> [amd64, ~amd64]
+ for x in default_keywords[:]:
+ if x.startswith("~"):
+ default_keywords.append(x.lstrip("~"))
+ default_keywords = unstable_unique(default_keywords + [self.arch])
+
+ vfilter.add_restriction(self.make_keywords_filter(
+ self.arch, default_keywords, pkg_keywords,
+ incremental="package.keywords" in incrementals))
+
+ del default_keywords
+ # we can finally close that fricking
+ # "DISALLOW NON FOSS LICENSES" bug via this >:)
+ if master_license:
+ vfilter.add_restriction(self.make_license_filter(
+ master_license, license))
+
+ del master_license, license
+
+ # if it's made it this far...
+
+ self.root = settings["ROOT"] = root
+ self.settings = settings
+
+ bashrc = list(profile.bashrc)
+
+ if "bashrc" in self.settings:
+ for data in self.settings['bashrc']:
+ source = local_source(data)
+ # this is currently local-only so a get_path check is ok
+ # TODO make this more general
+ if source.get_path() is None:
+ raise Failure(
+ 'user-specified bashrc %r does not exist' % (data,))
+ bashrc.append(source)
+
+ # stack use stuff first, then profile.
+ # could do an intersect up front to pull out the forced disabled
+ # also, although that code would be fugly
+ self.enabled_use = collapsed_restrict_to_data(
+ ((packages.AlwaysTrue, self.use),
+ (packages.AlwaysTrue, [self.arch])),
+ profile.pkg_use.iteritems(),
+ pkg_use)
+ self.forced_use = collapsed_restrict_to_data(
+ profile.forced_use.iteritems(),
+ ((packages.AlwaysTrue, [self.arch]),))
+ self.disabled_use = collapsed_restrict_to_data(
+ profile.masked_use.iteritems())
+
+ self.settings["bashrc"] = bashrc
+ self.repos = []
+ self.vdb = []
+ self.configured_named_repos = {}
+ self.filtered_named_repos = {}
+
+ rev_names = dict((repo, name) for name, repo in self.named_repos.iteritems())
+
+
+ for l, repos, filtered in ((self.repos, repositories, True),
+ (self.vdb, vdb, False)):
+
+ for repo in repos:
+ if not repo.configured:
+ pargs = [repo]
+ try:
+ for x in repo.configurables:
+ if x == "domain":
+ pargs.append(self)
+ elif x == "settings":
+ pargs.append(ProtectedDict(settings))
+ elif x == "profile":
+ pargs.append(profile)
+ else:
+ pargs.append(getattr(self, x))
+ except AttributeError, ae:
+ raise Failure("failed configuring repo '%s': "
+ "configurable missing: %s" % (repo, ae))
+ wrapped_repo = repo.configure(*pargs)
+ else:
+ wrapped_repo = repo
+ key = rev_names.get(repo)
+ self.configured_named_repos[key] = wrapped_repo
+ if filtered:
+ wrapped_repo = visibility.filterTree(wrapped_repo,
+ vfilter, True)
+ self.filtered_named_repos[key] = wrapped_repo
+ l.append(wrapped_repo)
+
+ if profile.virtuals:
+ l = [x for x in (getattr(v, 'old_style_virtuals', None)
+ for v in self.vdb) if x is not None]
+ profile_repo = profile.make_virtuals_repo(
+ multiplex.tree(*repositories), *l)
+ self.named_repos["profile virtuals"] = profile_repo
+ self.filtered_named_repos["profile virtuals"] = profile_repo
+ self.configured_named_repos["profile virtuals"] = profile_repo
+ self.repos = [profile_repo] + self.repos
+
+ def make_license_filter(self, master_license, pkg_licenses):
+ data = collapsed_restrict_to_data(
+ ((packages.AlwaysTrue, master_license),),
+ pkg_licenses)
+ return delegate(partial(self.apply_license_filter, data))
+
+ def apply_license_filter(self, data, pkg, mode):
+ # note we're not honoring mode; it's always match.
+ # reason is that of not turning on use flags to get acceptible license
+ # pairs.
+ # maybe change this down the line?
+ allowed_licenses = data.pull_data(pkg)
+ for and_pair in pkg.license.dnf_solutions():
+ for license in and_pair:
+ if license not in allowed_licenses:
+ break
+ else:
+ # tiz fine.
+ return True
+ return False
+
+ def make_keywords_filter(self, arch, default_keys, pkg_keywords,
+ incremental=False):
+ """Generates a restrict that matches iff the keywords are allowed."""
+ if not pkg_keywords:
+ return packages.PackageRestriction(
+ "keywords", values.ContainmentMatch(*default_keys))
+
+ if "~" + arch.lstrip("~") not in default_keys:
+ # stable; thus empty entries == ~arch
+ unstable = "~" + arch
+ def f(r, v):
+ if not v:
+ return r, unstable
+ return r, v
+ data = collapsed_restrict_to_data(
+ ((packages.AlwaysTrue, default_keys),),
+ (f(*i) for i in pkg_keywords))
+ else:
+ if incremental:
+ f = collapsed_restrict_to_data
+ else:
+ f = non_incremental_collapsed_restrict_to_data
+ data = f(((packages.AlwaysTrue, default_keys),),
+ pkg_keywords)
+
+ if incremental:
+ raise NotImplementedError(self.incremental_apply_keywords_filter)
+ #f = self.incremental_apply_keywords_filter
+ else:
+ f = self.apply_keywords_filter
+ return delegate(partial(f, data))
+
+ @staticmethod
+ def incremental_apply_keywords_filter(data, pkg, mode):
+ # note we ignore mode; keywords aren't influenced by conditionals.
+ # note also, we're not using a restriction here. this is faster.
+ allowed = data.pull_data(pkg)
+ return any(True for x in pkg.keywords if x in allowed)
+
+ @staticmethod
+ def apply_keywords_filter(data, pkg, mode):
+ # note we ignore mode; keywords aren't influenced by conditionals.
+ # note also, we're not using a restriction here. this is faster.
+ allowed = data.pull_data(pkg)
+ if '**' in allowed:
+ return True
+ if "*" in allowed:
+ for k in pkg.keywords:
+ if k[0] not in "-~":
+ return True
+ if "~*" in allowed:
+ for k in pkg.keywords:
+ if k[0] == "~":
+ return True
+ return any(True for x in pkg.keywords if x in allowed)
+
+ def make_per_package_use(self, default_use, pkg_use):
+ if not pkg_use:
+ return default_use, ((), {})
+ return collapsed_restrict_to_data(default_use, pkg_use)
+
+ def get_package_use(self, pkg):
+ disabled = self.disabled_use.pull_data(pkg)
+ enabled = self.enabled_use.pull_data(pkg)
+ immutable = self.forced_use.pull_data(pkg, False)
+ if disabled:
+ if enabled is self.enabled_use.defaults:
+ enabled = set(enabled)
+ if immutable is self.forced_use.defaults:
+ immutable = set(immutable)
+ elif immutable:
+ if enabled is self.enabled_use.defaults:
+ enabled = set(enabled)
+ else:
+ return immutable, enabled
+ enabled.update(immutable)
+ enabled.difference_update(disabled)
+ immutable.update(disabled)
+
+ return immutable, enabled
diff --git a/pkgcore/ebuild/ebd.py b/pkgcore/ebuild/ebd.py
new file mode 100644
index 0000000..df8519f
--- /dev/null
+++ b/pkgcore/ebuild/ebd.py
@@ -0,0 +1,666 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+EBuild Daemon (ebd), main high level interface to ebuild execution env
+(ebuild.sh being part of it).
+
+Wraps L{pkgcore.ebuild.processor} functionality into a higher level
+api, per phase methods for example
+"""
+
+
+import os, errno, shutil
+
+from pkgcore.interfaces import format, data_source
+from pkgcore.ebuild.processor import \
+ request_ebuild_processor, release_ebuild_processor, \
+ expected_ebuild_env, chuck_UnhandledCommand
+from pkgcore.os_data import portage_gid, portage_uid
+from pkgcore.spawn import (
+ spawn_bash, spawn, is_sandbox_capable, is_fakeroot_capable)
+from pkgcore.os_data import xargs
+from pkgcore.ebuild.const import eapi_capable
+from pkgcore.interfaces import observer
+from pkgcore.ebuild.ebuild_built import fake_package_factory, package
+from snakeoil.currying import post_curry, pretty_docs
+from snakeoil.osutils import ensure_dirs, normpath, join as pjoin
+
+from snakeoil.demandload import demandload
+demandload(globals(),
+ "pkgcore.log:logger",
+ "pkgcore.package.mutated:MutatedPkg",
+)
+
+
+def _reset_env_data_source(method):
+ return method
+
+ # unreachable code. --charlie
+ #def store_env_data_wrapper(self, *args, **kwds):
+ # try:
+ # return method(self, *args, **kwds)
+ # finally:
+ # # note that we're *not* return'ing anything ourselves.
+ # # we want the original val to slide back
+ # if self.env_data_source is None:
+ # try:
+ # fp = self.env["PORT_ENV_FILE"]
+ # f = self.env_data.get_fileobj()
+ # f.seek(0, 0)
+ # f.truncate(0)
+ # f.write(open(fp, "r").read())
+ # del f, fp
+ # except (IOError, OSError), oe:
+ # if oe.errno != errno.ENOENT:
+ # raise
+
+ #store_env_data_wrapper.__doc__ = method.__doc__
+ #return store_env_data_wrapper
+
+
+class ebd(object):
+
+ def __init__(self, pkg, initial_env=None, env_data_source=None,
+ features=None, observer=None, clean=False, tmp_offset=None):
+ """
+ @param pkg:
+ L{ebuild package instance<pkgcore.ebuild.ebuild_src.package>}
+ instance this env is being setup for
+ @param initial_env: initial environment to use for this ebuild
+ @param env_data_source: a L{pkgcore.interfaces.data_source} instance
+ to restore the environment from- used for restoring the
+ state of an ebuild processing, whether for unmerging, or
+ walking phases during building
+ @param features: ebuild features, hold over from portage,
+ will be broken down at some point
+ """
+
+ if not hasattr(self, "observer"):
+ self.observer = observer
+ if pkg.eapi not in eapi_capable:
+ raise TypeError(
+ "pkg isn't of a supported eapi!, %i not in %s for %s" % (
+ pkg.eapi, eapi_capable, pkg))
+
+ if initial_env is not None:
+ # copy.
+ self.env = dict(initial_env)
+ for x in ("USE", "ACCEPT_LICENSE"):
+ if x in self.env:
+ del self.env[x]
+ else:
+ self.env = {}
+
+ # temp hack.
+ if "PYTHONPATH" in os.environ:
+ self.env["PYTHONPATH"] = os.environ["PYTHONPATH"]
+ if "PKGCORE_DEBUG" in os.environ:
+ self.env["PKGCORE_DEBUG"] = str(int(os.environ["PKGCORE_DEBUG"]))
+
+ self.env.setdefault("ROOT", "/")
+ self.env_data_source = env_data_source
+ if env_data_source is not None and \
+ not isinstance(env_data_source, data_source.base):
+ raise TypeError(
+ "env_data_source must be None, or a pkgcore.data_source.base "
+ "derivative: %s: %s" % (
+ env_data_source.__class__, env_data_source))
+
+ if features is None:
+ features = self.env.get("FEATURES", ())
+
+ self.features = set(x.lower() for x in features)
+
+ self.env["FEATURES"] = ' '.join(sorted(self.features))
+
+ expected_ebuild_env(pkg, self.env, env_source_override=self.env_data_source)
+
+ self.env["USE"] = ' '.join(str(x) for x in pkg.use)
+ self.env["INHERITED"] = ' '.join(pkg.data.get("_eclasses_", ()))
+ self.env["SLOT"] = pkg.slot
+ self.env["FINALIZED_RESTRICT"] = ' '.join(str(x) for x in pkg.restrict)
+
+ self.restrict = pkg.restrict
+
+ for x in ("sandbox", "userpriv", "fakeroot"):
+ setattr(self, x, self.feat_or_bool(x) and not (x in self.restrict))
+ if self.fakeroot:
+ logger.warn("disabling fakeroot; unusable till coreutils/fakeroot" +
+ " interaction is fixed")
+ self.fakeroot = False
+ if self.userpriv and os.getuid() != 0:
+ self.userpriv = False
+
+ if "PORT_LOGDIR" in self.env:
+ self.logging = pjoin(self.env["PORT_LOGDIR"],
+ pkg.cpvstr+".log")
+ del self.env["PORT_LOGDIR"]
+ else:
+ self.logging = False
+
+ self.env["XARGS"] = xargs
+
+ self.bashrc = self.env.get("bashrc", ())
+ if self.bashrc:
+ del self.env["bashrc"]
+
+ self.pkg = pkg
+ self.eapi = pkg.eapi
+ wipes = [k for k, v in self.env.iteritems()
+ if not isinstance(v, basestring)]
+ for k in wipes:
+ del self.env[k]
+ del wipes, k, v
+
+ self.set_op_vars(tmp_offset)
+ self.clean_at_start = clean
+ self.clean_needed = False
+
+ def start(self):
+ if self.clean_at_start:
+ self.clean_needed = True
+ if not self.cleanup():
+ return False
+ self.setup_workdir()
+ self.setup_env_data_source()
+ self.clean_needed = True
+ return True
+
+ def set_op_vars(self, tmp_offset):
+ # don't fool with this, without fooling with setup.
+ self.base_tmpdir = self.env["PORTAGE_TMPDIR"]
+ self.tmpdir = normpath(pjoin(self.base_tmpdir, "portage"))
+ if tmp_offset:
+ self.tmpdir = pjoin(self.tmpdir,
+ tmp_offset.strip(os.path.sep))
+
+ self.builddir = pjoin(self.tmpdir, self.env["CATEGORY"],
+ self.env["PF"])
+ for x, y in (("T", "temp"), ("WORKDIR", "work"), ("D", "image"),
+ ("HOME", "homedir")):
+ self.env[x] = pjoin(self.builddir, y) +"/"
+
+ self.env["IMAGE"] = self.env["D"]
+
+ def get_env_source(self):
+ return data_source.data_source(
+ open(pjoin(self.env["T"], "environment"), "r").read())
+
+ def setup_env_data_source(self):
+ if not ensure_dirs(self.env["T"], mode=0770, gid=portage_gid,
+ minimal=True):
+ raise format.FailedDirectory(self.env['T'],
+ "%s doesn't fulfill minimum mode %o and gid %i" % (
+ self.env['T'], 0770, portage_gid))
+
+ if self.env_data_source is not None:
+ fp = pjoin(self.env["T"], "environment")
+ # load data first (might be a local_source), *then* right
+ # if it's a src_ebuild being installed, trying to do two steps
+ # stomps the local_sources data.
+ data = self.env_data_source.get_fileobj().read()
+ open(fp, "w").write(data)
+ del data
+
+ def setup_logging(self):
+ if self.logging and not ensure_dirs(os.path.dirname(self.logging),
+ mode=02770, gid=portage_gid):
+ raise format.FailedDirectory(
+ os.path.dirname(self.logging),
+ "PORT_LOGDIR, desired mode 02770 and gid %i" % portage_gid)
+
+ def setup_workdir(self):
+ # ensure dirs.
+ for k in ("HOME", "T", "WORKDIR", "D"):
+ if not ensure_dirs(self.env[k], mode=04770,
+ gid=portage_gid, minimal=True):
+ raise format.FailedDirectory(
+ self.env[k],
+ "%s doesn't fulfill minimum mode %o and gid %i" % (
+ k, 0770, portage_gid))
+ # XXX hack, just 'til pkgcore controls these directories
+ if (os.stat(self.env[k]).st_mode & 02000):
+ logger.warn("%s ( %s ) is setgid" % (self.env[k], k))
+
+
+ @_reset_env_data_source
+ def _generic_phase(self, phase, userpriv, sandbox, fakeroot,
+ extra_handlers=None):
+ """
+ @param phase: phase to execute
+ @param userpriv: will we drop to
+ L{portage_uid<pkgcore.os_data.portage_uid>} and
+ L{portage_gid<pkgcore.os_data.portage_gid>} access for this phase?
+ @param sandbox: should this phase be sandboxed?
+ @param fakeroot: should the phase be fakeroot'd? Only really useful
+ for install phase, and is mutually exclusive with sandbox
+ """
+ ebd = request_ebuild_processor(userpriv=(self.userpriv and userpriv),
+ sandbox=(self.sandbox and sandbox and is_sandbox_capable()),
+ fakeroot=(self.fakeroot and fakeroot and is_fakeroot_capable()))
+ try:
+ ebd.prep_phase(phase, self.env, sandbox=self.sandbox,
+ logging=self.logging)
+ ebd.write("start_processing")
+ if not ebd.generic_handler(additional_commands=extra_handlers):
+ raise format.GenericBuildError(
+ phase + ": Failed building (False/0 return from handler)")
+
+ except Exception, e:
+ ebd.shutdown_processor()
+ release_ebuild_processor(ebd)
+ if isinstance(e, (SystemExit, format.GenericBuildError)):
+ raise
+ raise format.GenericBuildError(
+ phase + ": Caught exception while building: %s" % e)
+
+ release_ebuild_processor(ebd)
+ return True
+
+ def cleanup(self, disable_observer=False):
+ if not self.clean_needed or not os.path.exists(self.builddir):
+ return True
+ if disable_observer:
+ return self.do_cleanup(disable_observer=disable_observer)
+ return self.do_cleanup()
+
+ @observer.decorate_build_method("cleanup")
+ def do_cleanup(self):
+ try:
+ shutil.rmtree(self.builddir)
+ # try to wipe the cat dir; if not empty, ignore it
+ try:
+ os.rmdir(os.path.dirname(self.builddir))
+ except OSError, e:
+ if e.errno != errno.ENOTEMPTY:
+ raise
+ except OSError, oe:
+ raise format.GenericBuildError(
+ "clean: Caught exception while cleansing: %s" % oe)
+ return True
+
+ def feat_or_bool(self, name, extra_env=None):
+ if name in self.env:
+ v = bool(self.env[name])
+ del self.env[name]
+ name = name.lower()
+ if v:
+ self.features.add(name)
+ else:
+ if name in self.features:
+ self.features.remove(name)
+ elif extra_env is not None and name in extra_env:
+ v = bool(extra_env[name])
+ if v:
+ self.features.add(name.lower())
+ else:
+ self.features.remove(name.lower())
+ else:
+ v = name.lower() in self.features
+ return v
+
+
+class setup_mixin(object):
+
+ setup_is_for_src = True
+
+ def setup(self):
+ self.setup_logging()
+
+ additional_commands = {}
+ phase_name = "setup-binpkg"
+ if self.setup_is_for_src:
+ phase_name = "setup"
+
+ ebdp = request_ebuild_processor(userpriv=False, sandbox=False)
+ if self.setup_is_for_src:
+ additional_commands["request_inherit"] = \
+ post_curry(ebdp.__class__._inherit, self.eclass_cache)
+ additional_commands["request_profiles"] = self._request_bashrcs
+
+ try:
+ ebdp.prep_phase(phase_name, self.env, sandbox=self.sandbox,
+ logging=self.logging)
+ ebdp.write("start_processing")
+ if not ebdp.generic_handler(
+ additional_commands=additional_commands):
+ raise format.GenericBuildError(
+ "setup: Failed building (False/0 return from handler)")
+
+ except Exception, e:
+ # regardless of what occured, we kill the processor.
+ ebdp.shutdown_processor()
+ release_ebuild_processor(ebdp)
+ # either we know what it is, or it's a shutdown. re-raise
+ if isinstance(e, (SystemExit, format.GenericBuildError)):
+ raise
+ # wrap.
+ raise format.GenericBuildError(
+ "setup: Caught exception while building: " + str(e))
+
+ release_ebuild_processor(ebdp)
+ return True
+
+ def _request_bashrcs(self, ebd, a):
+ if a is not None:
+ chuck_UnhandledCommand(ebd, "bashrc request with arg"+str(a))
+ for source in self.bashrc:
+ if source.get_path is not None:
+ ebd.write("path\n%s" % source.get_path())
+ elif source.get_data is not None:
+ raise NotImplementedError
+ else:
+ chuck_UnhandledCommand(
+ ebd, "bashrc request: unable to process bashrc "
+ "due to source '%s' due to lacking usable get_*" % (
+ source,))
+ if not ebd.expect("next"):
+ chuck_UnhandledCommand(
+ ebd, "bashrc transfer, didn't receive 'next' response. "
+ "failure?")
+ ebd.write("end_request")
+
+
+class install_op(ebd, format.install):
+ """
+ phase operations and steps for install execution
+ """
+
+ preinst = pretty_docs(
+ observer.decorate_build_method("preinst")(
+ post_curry(
+ ebd._generic_phase, "preinst", False, False, False)),
+ "run the postinst phase")
+ postinst = pretty_docs(
+ observer.decorate_build_method("postinst")(
+ post_curry(
+ ebd._generic_phase, "postinst", False, False, False)),
+ "run the postinst phase")
+
+
+class uninstall_op(ebd, format.uninstall):
+ """
+ phase operations and steps for uninstall execution
+ """
+
+ def __init__(self, *args, **kwargs):
+ kwargs["tmp_offset"] = "unmerge"
+ ebd.__init__(self, *args, **kwargs)
+
+ prerm = pretty_docs(
+ observer.decorate_build_method("prerm")(
+ post_curry(
+ ebd._generic_phase, "prerm", False, False, False)),
+ "run the prerm phase")
+ postrm = pretty_docs(
+ observer.decorate_build_method("postrm")(
+ post_curry(
+ ebd._generic_phase, "postrm", False, False, False)),
+ "run the postrm phase")
+
+
+class replace_op(format.replace, install_op, uninstall_op):
+ def __init__(self, *args, **kwargs):
+ ebd.__init__(self, *args, **kwargs)
+
+
+class buildable(ebd, setup_mixin, format.build):
+
+ """
+ build operation
+ """
+
+ _built_class = package
+
+ # XXX this is unclean- should be handing in strictly what is build
+ # env, rather then dumping domain settings as env.
+ def __init__(self, pkg, domain_settings, eclass_cache, fetcher,
+ observer=None, **kwargs):
+
+ """
+ @param pkg: L{pkgcore.ebuild.ebuild_src.package} instance we'll be
+ building
+ @param domain_settings: dict bled down from the domain configuration;
+ basically initial env
+ @param eclass_cache: the L{eclass_cache<pkgcore.ebuild.eclass_cache>}
+ we'll be using
+ @param fetcher: a L{pkgcore.fetch.base.fetcher} instance to use to
+ access our required files for building
+ """
+
+ format.build.__init__(self, observer=observer)
+ ebd.__init__(self, pkg, initial_env=domain_settings,
+ features=domain_settings["FEATURES"], **kwargs)
+
+ self.env["FILESDIR"] = pjoin(os.path.dirname(pkg.ebuild.get_path()), "files")
+ self.eclass_cache = eclass_cache
+ self.env["ECLASSDIR"] = eclass_cache.eclassdir
+ self.env["PORTDIR"] = eclass_cache.portdir
+
+ self.fetcher = fetcher
+
+ self.run_test = self.feat_or_bool("test", domain_settings)
+ if "test" in self.restrict:
+ self.run_test = False
+ elif "test" not in pkg.use:
+ if self.run_test:
+ logger.warn("disabling test for %s due to test use flag being disabled")
+ self.run_test = False
+
+ # XXX minor hack
+ path = self.env["PATH"].split(":")
+
+ for s, default in (("DISTCC", ".distcc"), ("CCACHE", "ccache")):
+ b = (self.feat_or_bool(s, domain_settings)
+ and not s in self.restrict)
+ setattr(self, s.lower(), b)
+ if b:
+ # looks weird I realize, but
+ # pjoin("/foor/bar", "/barr/foo") == "/barr/foo"
+ # and pjoin("/foo/bar",".asdf") == "/foo/bar/.asdf"
+ self.env.setdefault(s+"_DIR", pjoin(self.tmpdir, default))
+ path.insert(0, "/usr/lib/%s/bin" % s.lower())
+ else:
+ for y in ("_PATH", "_DIR"):
+ if s+y in self.env:
+ del self.env[s+y]
+ path = [piece for piece in path if piece]
+ self.env["PATH"] = ":".join(path)
+ self.fetchables = pkg.fetchables[:]
+ self.env["A"] = ' '.join(set(x.filename
+ for x in self.fetchables))
+
+ if self.setup_is_for_src:
+ self.init_distfiles_env()
+
+ def init_distfiles_env(self):
+ # cvs/svn ebuilds need to die.
+ distdir_write = self.fetcher.get_storage_path()
+ if distdir_write is None:
+ raise format.GenericBuildError("no usable distdir was found "
+ "for PORTAGE_ACTUAL_DISTDIR from fetcher %s" % self.fetcher)
+ self.env["PORTAGE_ACTUAL_DISTDIR"] = distdir_write
+ self.env["DISTDIR"] = normpath(
+ pjoin(self.builddir, "distdir"))
+ for x in ("PORTAGE_ACTUAL_DISTDIR", "DISTDIR"):
+ self.env[x] = os.path.realpath(self.env[x]).rstrip("/") + "/"
+
+ def setup_distfiles(self):
+ # added to protect against no-auto usage in pebuild.
+ if not hasattr(self, 'files'):
+ self.fetch()
+
+ if self.files:
+ try:
+ if os.path.exists(self.env["DISTDIR"]):
+ if (os.path.isdir(self.env["DISTDIR"])
+ and not os.path.islink(self.env["DISTDIR"])):
+ shutil.rmtree(self.env["DISTDIR"])
+ else:
+ os.unlink(self.env["DISTDIR"])
+
+ except OSError, oe:
+ raise format.FailedDirectory(
+ self.env["DISTDIR"],
+ "failed removing existing file/dir/link at: exception %s"
+ % oe)
+
+ if not ensure_dirs(self.env["DISTDIR"], mode=0770,
+ gid=portage_gid):
+ raise format.FailedDirectory(
+ self.env["DISTDIR"],
+ "failed creating distdir symlink directory")
+
+ try:
+ for src, dest in [
+ (k, pjoin(self.env["DISTDIR"], v.filename))
+ for (k, v) in self.files.items()]:
+ os.symlink(src, dest)
+
+ except OSError, oe:
+ raise format.GenericBuildError(
+ "Failed symlinking in distfiles for src %s -> %s: %s" % (
+ src, dest, str(oe)))
+
+ @observer.decorate_build_method("setup")
+ def setup(self):
+ """
+ execute the setup phase, mapping out to pkg_setup in the ebuild
+
+ necessarily dirs are created as required, and build env is
+ initialized at this point
+ """
+ if self.distcc:
+ for p in ("", "/lock", "/state"):
+ if not ensure_dirs(pjoin(self.env["DISTCC_DIR"], p),
+ mode=02775, gid=portage_gid):
+ raise format.FailedDirectory(
+ pjoin(self.env["DISTCC_DIR"], p),
+ "failed creating needed distcc directory")
+ if self.ccache:
+ # yuck.
+ st = None
+ try:
+ st = os.stat(self.env["CCACHE_DIR"])
+ except OSError:
+ st = None
+ if not ensure_dirs(self.env["CCACHE_DIR"], mode=02775,
+ gid=portage_gid):
+ raise format.FailedDirectory(
+ self.env["CCACHE_DIR"],
+ "failed creation of ccache dir")
+
+ # XXX this is more then mildly stupid.
+ st = os.stat(self.env["CCACHE_DIR"])
+ try:
+ if st.st_gid != portage_gid or (st.st_mode & 02775) != 02775:
+ try:
+ cwd = os.getcwd()
+ except OSError:
+ cwd = "/"
+ try:
+ # crap.
+ os.chmod(self.env["CCACHE_DIR"], 02775)
+ os.chown(self.env["CCACHE_DIR"], -1, portage_gid)
+ os.chdir(cwd)
+ if 0 != spawn(["chgrp", "-R", str(portage_gid),
+ self.env["CCACHE_DIR"]]):
+ raise format.FailedDirectory(
+ self.env["CCACHE_DIR"],
+ "failed changing ownership for CCACHE_DIR")
+ if 0 != spawn_bash(
+ "find '%s' -type d -print0 | %s --null chmod 02775"
+ % (self.env["CCACHE_DIR"], xargs)):
+ raise format.FailedDirectory(
+ self.env["CCACHE_DIR"],
+ "failed correcting perms for CCACHE_DIR")
+
+ if 0 != spawn_bash(
+ "find '%s' -type f -print0 | %s --null chmod 0775"
+ % (self.env["CCACHE_DIR"], xargs)):
+ raise format.FailedDirectory(
+ self.env["CCACHE_DIR"],
+ "failed correcting perms for CCACHE_DIR")
+ finally:
+ os.chdir(cwd)
+ except OSError:
+ raise format.FailedDirectory(
+ self.env["CCACHE_DIR"],
+ "failed ensuring perms/group owner for CCACHE_DIR")
+ return setup_mixin.setup(self)
+
+ def configure(self):
+ """
+ execute the configure phase.
+
+ does nothing if the pkg is EAPI=0 (that spec lacks a seperated
+ configure phase).
+ """
+ if self.eapi > 0:
+ return self._generic_phase("configure", True, True, False)
+ return True
+
+ def unpack(self):
+ """
+ execute the unpack phase.
+ """
+ if self.setup_is_for_src:
+ self.setup_distfiles()
+ if self.userpriv:
+ try:
+ os.chown(self.env["WORKDIR"], portage_uid, -1)
+ except OSError, oe:
+ raise format.GenericBuildError(
+ "failed forcing %i uid for WORKDIR: %s" %
+ (portage_uid, str(oe)))
+ return self._generic_phase("unpack", True, True, False)
+
+ compile = pretty_docs(
+ observer.decorate_build_method("compile")(
+ post_curry(
+ ebd._generic_phase, "compile", True, True, False)),
+ "run the compile phase (maps to src_compile)")
+
+ @observer.decorate_build_method("install")
+ @_reset_env_data_source
+ def install(self):
+ """run the install phase (maps to src_install)"""
+ if self.fakeroot:
+ return self._generic_phase("install", True, False, True)
+ else:
+ return self._generic_phase("install", False, True, False)
+
+ @observer.decorate_build_method("test")
+ @_reset_env_data_source
+ def test(self):
+ """run the test phase (if enabled), maps to src_test"""
+ if not self.run_test:
+ return True
+ return self._generic_phase("test", True, True, False)
+
+ def finalize(self):
+ """
+ finalize the operation.
+
+ this yields a built package, but the packages
+ metadata/contents are bound to the workdir. In other words,
+ install the package somewhere prior to executing clean if you
+ intend on installing it.
+
+ @return: L{pkgcore.ebuild.ebuild_built.package} instance
+ """
+ return fake_package_factory(self._built_class).new_package(self.pkg,
+ self.env["IMAGE"], pjoin(self.env["T"], "environment"))
+
+
+class binpkg_buildable(ebd, setup_mixin, format.build):
+
+ stage_depends = {"finalize":"setup", "setup":"start"}
+ setup_is_for_src = False
+
+ def __init__(self, *args, **kwargs):
+ ebd.__init__(self, *args, **kwargs)
+
+ def finalize(self):
+ return MutatedPkg(self.pkg, {"environment":self.get_env_source()})
diff --git a/pkgcore/ebuild/ebuild_built.py b/pkgcore/ebuild/ebuild_built.py
new file mode 100644
index 0000000..b91e5c5
--- /dev/null
+++ b/pkgcore/ebuild/ebuild_built.py
@@ -0,0 +1,224 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+built ebuild packages (vdb packages and binpkgs are derivatives of this)
+"""
+
+from pkgcore.ebuild import ebuild_src
+from pkgcore.package import metadata
+from pkgcore.interfaces.data_source import local_source
+
+from snakeoil.mappings import IndeterminantDict
+from snakeoil.currying import post_curry
+from snakeoil.obj import DelayedInstantiation
+
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'pkgcore.merge:engine',
+ 'pkgcore.ebuild:triggers',
+ 're',
+ 'pkgcore.ebuild:ebd',
+ 'pkgcore.fs.livefs:scan',
+)
+
+
+def passthrough(inst, attr, rename=None):
+ if rename is None:
+ rename = attr
+ return inst.data[rename]
+
+def flatten_depset(inst, conditionals):
+ return inst.evaluate_depset(conditionals)
+
+default_pkg_preinst_re = None
+
+def pkg_uses_default_preinst(pkg):
+ global default_pkg_preinst_re
+ if default_pkg_preinst_re is None:
+ default_pkg_preinst_re = re.compile(
+ "(?:^|\n)pkg_preinst *\(\)\s*{\s*return;?\s*}[ \t]*(?:\n|$)")
+
+ data = pkg.environment.get_fileobj().read()
+ m = default_pkg_preinst_re.search(data)
+
+ # second check. make sure there aren't two matches- if so, that
+ # means we should not guess it.
+ return m is not None and \
+ default_pkg_preinst_re.search(data[m.end():]) is None
+
+def wrap_inst(self, wrap, inst):
+ return wrap(inst(self), self.use)
+
+class package(ebuild_src.base):
+
+ """
+ built form of an ebuild
+ """
+
+ immutable = True
+ tracked_attributes = list(ebuild_src.package.tracked_attributes)
+ tracked_attributes.extend(["contents", "use", "environment"])
+ tracked_attributes = tuple(tracked_attributes)
+ allow_regen = False
+
+ built = True
+
+ _get_attr = dict(ebuild_src.package._get_attr)
+
+ del _get_attr["fetchables"]
+
+ _get_attr.update((x, post_curry(passthrough, x))
+ for x in ("contents", "environment", "ebuild"))
+ _get_attr.update(
+ (k, post_curry(wrap_inst,
+ ebuild_src.package._config_wrappables[k],
+ ebuild_src.package._get_attr[k]))
+ for k in ebuild_src.package._config_wrappables
+ if k in ebuild_src.package.tracked_attributes)
+
+ _get_attr["use"] = lambda s:DelayedInstantiation(frozenset,
+ lambda: frozenset(s.data["USE"].split()))
+
+ def _update_metadata(self, pkg):
+ raise NotImplementedError()
+
+ def _repo_install_op(self, *args, **kwds):
+ return self._parent._generate_format_install_op(self, *args, **kwds)
+
+ def _repo_uninstall_op(self, *args, **kwds):
+ return self._parent._generate_format_uninstall_op(self, *args, **kwds)
+
+ def _repo_replace_op(self, *args, **kwds):
+ return self._parent._generate_format_replace_op(self, *args, **kwds)
+
+ def _fetch_metadata(self):
+ return self._parent._get_metadata(self)
+
+ def __str__(self):
+ return "built ebuild: %s" % (self.cpvstr)
+
+ def build(self, **kwargs):
+ return self.repo._generate_build_op(self)
+
+ def add_format_triggers(self, *args, **kwds):
+ return self._parent._add_format_triggers(self, *args, **kwds)
+
+ @property
+ def ebuild(self):
+ o = self.data.get("ebuild")
+ if o is not None:
+ return o
+ return self._parent._get_ebuild_src(self)
+
+ @property
+ def _mtime_(self):
+ raise AttributeError(self, "_mtime_")
+
+
+def generic_format_triggers(self, pkg, op_inst, format_op_inst, engine_inst):
+ if (engine_inst.mode in (engine.REPLACE_MODE, engine.INSTALL_MODE)
+ and pkg == engine_inst.new and pkg.repo is engine_inst.new.repo):
+ if not pkg_uses_default_preinst(pkg):
+ t = triggers.preinst_contents_reset(format_op_inst)
+ t.register(engine_inst)
+ # for ebuild format, always check the syms.
+ # this isn't perfect for binpkgs since if the binpkg is already
+ # screwed, the target is in place already
+ triggers.FixImageSymlinks(format_op_inst).register(engine_inst)
+
+def _generic_format_install_op(self, pkg, domain_settings, **kwds):
+ return ebd.install_op(pkg, initial_env=domain_settings,
+ env_data_source=pkg.environment, **kwds)
+
+def _generic_format_uninstall_op(self, pkg, domain_settings, **kwds):
+ return ebd.uninstall_op(pkg, initial_env=domain_settings,
+ env_data_source=pkg.environment, **kwds)
+
+def _generic_format_replace_op(self, pkg, domain_settings, **kwds):
+ return ebd.replace_op(pkg, initial_env=domain_settings,
+ env_data_source=pkg.environment, **kwds)
+
+
+class package_factory(metadata.factory):
+ child_class = package
+
+ # For the plugin system.
+ priority = 5
+
+ def _get_metadata(self, pkg):
+ return self._parent_repo._get_metadata(pkg)
+
+ def new_package(self, *args):
+ inst = self._cached_instances.get(args)
+ if inst is None:
+ inst = self._cached_instances[args] = self.child_class(self, *args)
+ return inst
+
+ _generate_format_install_op = _generic_format_install_op
+ _generate_format_uninstall_op = _generic_format_uninstall_op
+ _generate_format_replace_op = _generic_format_replace_op
+ _add_format_triggers = generic_format_triggers
+
+
+class fake_package_factory(package_factory):
+ """
+ a fake package_factory, so that we can reuse the normal get_metadata hooks.
+
+ a factory is generated per package instance, rather then one
+ factory, N packages.
+
+ Do not use this unless you know it's what your after; this is
+ strictly for transitioning a built ebuild (still in the builddir)
+ over to an actual repo. It literally is a mapping of original
+ package data to the new generated instances data store.
+ """
+
+ def __init__(self, child_class):
+ self.child_class = child_class
+ self._parent_repo = None
+
+ def __del__(self):
+ pass
+
+ _forced_copy = ebuild_src.package.tracked_attributes
+
+ def new_package(self, pkg, image_root, environment_path):
+ self.pkg = pkg
+ self.image_root = image_root
+ self.environment_path = environment_path
+ # lambda redirects path to environment path
+ obj = self.child_class(self, pkg.cpvstr)
+ for x in self._forced_copy:
+ # bypass setattr restrictions.
+ object.__setattr__(obj, x, getattr(self.pkg, x))
+ object.__setattr__(obj, "use", self.pkg.use)
+ return obj
+
+ def get_ebuild_src(self, pkg):
+ return self.pkg.ebuild
+
+ def scan_contents(self, location):
+ return scan(location, offset=location, mutable=True)
+
+ def _get_metadata(self, pkg):
+ return IndeterminantDict(self.__pull_metadata)
+
+ def __pull_metadata(self, key):
+ if key == "contents":
+ return self.scan_contents(self.image_root)
+ elif key == "environment":
+ return local_source(self.environment_path)
+ else:
+ try:
+ return getattr(self.pkg, key)
+ except AttributeError:
+ raise KeyError
+
+ _generate_format_install_op = _generic_format_install_op
+ _generate_format_uninstall_op = _generic_format_uninstall_op
+ _generate_format_replace_op = _generic_format_replace_op
+ _add_format_triggers = generic_format_triggers
+
+
+generate_new_factory = package_factory
diff --git a/pkgcore/ebuild/ebuild_src.py b/pkgcore/ebuild/ebuild_src.py
new file mode 100644
index 0000000..b892a37
--- /dev/null
+++ b/pkgcore/ebuild/ebuild_src.py
@@ -0,0 +1,367 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+package class for buildable ebuilds
+"""
+
+import os
+from itertools import imap
+
+from pkgcore.package import metadata
+from pkgcore.package import errors as metadata_errors
+from pkgcore.ebuild.cpv import CPV
+from pkgcore.ebuild import conditionals
+from pkgcore.ebuild.atom import atom
+from pkgcore.cache import errors as cache_errors
+from pkgcore.restrictions.packages import AndRestriction
+from pkgcore.restrictions import boolean
+from pkgcore.chksum.errors import MissingChksum
+from pkgcore.fetch.errors import UnknownMirror
+from pkgcore.fetch import fetchable, mirror, uri_list, default_mirror
+from pkgcore.ebuild import const, processor
+
+from snakeoil.mappings import IndeterminantDict
+from snakeoil.currying import alias_class_method, partial
+
+from snakeoil.demandload import demandload
+demandload(globals(), "pkgcore.log:logger")
+
+WeakValCache = metadata.WeakValCache
+
+def generate_depset(c, key, non_package_type, s, **kwds):
+ try:
+ if non_package_type:
+ return conditionals.DepSet(s.data.pop(key, ""), c,
+ operators={"||":boolean.OrRestriction,
+ "":boolean.AndRestriction}, **kwds)
+ return conditionals.DepSet(s.data.pop(key, ""), c, **kwds)
+ except conditionals.ParseError, p:
+ raise metadata_errors.MetadataException(s, str(key), str(p))
+
+def generate_providers(self):
+ rdep = AndRestriction(self.versioned_atom, finalize=True)
+ func = partial(virtual_ebuild, self._parent, self,
+ {"rdepends":rdep, "slot":"%s-%s" % (self.category, self.version)})
+ # re-enable license at some point.
+ #, "license":self.license})
+
+ try:
+ return conditionals.DepSet(
+ self.data.pop("PROVIDE", ""), virtual_ebuild, element_func=func,
+ operators={"":boolean.AndRestriction})
+
+ except conditionals.ParseError, p:
+ raise metadata_errors.MetadataException(self, "provide", str(p))
+
+def generate_fetchables(self):
+ chksums = self.repo._get_digests(self)
+
+ mirrors = getattr(self._parent, "mirrors", {})
+ default_mirrors = getattr(self._parent, "default_mirrors", None)
+ common = {}
+ try:
+ d = conditionals.DepSet(
+ self.data.pop("SRC_URI", ""), fetchable, operators={},
+ element_func=partial(create_fetchable_from_uri, self, chksums,
+ mirrors, default_mirrors, common))
+ for v in common.itervalues():
+ v.uri.finalize()
+ return d
+ except conditionals.ParseError, p:
+ raise metadata_errors.MetadataException(self, "src_uri", str(p))
+
+# utility func.
+def create_fetchable_from_uri(pkg, chksums, mirrors, default_mirrors,
+ common_files, uri):
+
+ filename = os.path.basename(uri)
+
+ preexisting = common_files.get(filename)
+
+ if preexisting is None:
+ if filename not in chksums:
+ raise MissingChksum(filename)
+ uris = uri_list(filename)
+ else:
+ uris = preexisting.uri
+
+ if filename != uri:
+ if preexisting is None:
+ if "primaryuri" not in pkg.restrict:
+ if default_mirrors and "mirror" not in pkg.restrict:
+ uris.add_mirror(default_mirrors)
+
+ if uri.startswith("mirror://"):
+ # mirror:// is 9 chars.
+
+ tier, remaining_uri = uri[9:].split("/", 1)
+
+ if tier not in mirrors:
+ raise UnknownMirror(tier, remaining_uri)
+
+ uris.add_mirror(mirrors[tier], remaining_uri)
+
+ else:
+ uris.add_uri(uri)
+ if preexisting is None and "primaryuri" in pkg.restrict:
+ if default_mirrors and "mirror" not in pkg.restrict:
+ uris.add_mirror(default_mirrors)
+
+ if preexisting is None:
+ common_files[filename] = fetchable(filename, uris, chksums[filename])
+ return common_files[filename]
+
+def generate_eapi(self):
+ try:
+ d = self.data.pop("EAPI", 0)
+ if d == "":
+ return 0
+ return int(d)
+ except ValueError:
+ return const.unknown_eapi
+
+def get_slot(self):
+ o = self.data.pop("SLOT", "0").strip()
+ if not o:
+ raise ValueError(self, "SLOT cannot be unset")
+ return o
+
+def rewrite_restrict(restrict):
+ if restrict[0:2] == 'no':
+ return restrict[2:]
+ return restrict
+
+
+class base(metadata.package):
+
+ """
+ ebuild package
+
+ @cvar tracked_attributes: sequence of attributes that are required to exist
+ in the built version of ebuild-src
+ @cvar _config_wrappables: mapping of attribute to callable for
+ re-evaluating attributes dependant on configuration
+ """
+
+ tracked_attributes = (
+ "depends", "rdepends", "post_rdepends", "provides", "license",
+ "slot", "keywords", "eapi", "restrict", "eapi", "description", "iuse")
+
+ _config_wrappables = dict((x, alias_class_method("evaluate_depset"))
+ for x in ["depends", "rdepends", "post_rdepends", "fetchables",
+ "license", "src_uri", "license", "provides", "restrict"])
+
+ _get_attr = dict(metadata.package._get_attr)
+ _get_attr["provides"] = generate_providers
+ _get_attr["depends"] = partial(generate_depset, atom, "DEPEND", False)
+ _get_attr["rdepends"] = partial(generate_depset, atom, "RDEPEND", False)
+ _get_attr["post_rdepends"] = partial(generate_depset, atom, "PDEPEND",
+ False)
+ _get_attr["license"] = partial(generate_depset, str,
+ "LICENSE", True, element_func=intern)
+ _get_attr["slot"] = get_slot # lambda s: s.data.pop("SLOT", "0").strip()
+ _get_attr["fetchables"] = generate_fetchables
+ _get_attr["description"] = lambda s:s.data.pop("DESCRIPTION", "").strip()
+ _get_attr["keywords"] = lambda s:tuple(map(intern,
+ s.data.pop("KEYWORDS", "").split()))
+ _get_attr["restrict"] = lambda s:conditionals.DepSet(
+ s.data.pop("RESTRICT", ''), str, operators={},
+ element_func=rewrite_restrict)
+ _get_attr["eapi"] = generate_eapi
+ _get_attr["iuse"] = lambda s:frozenset(imap(intern,
+ s.data.pop("IUSE", "").split()))
+ _get_attr["homepage"] = lambda s:s.data.pop("HOMEPAGE", "").strip()
+
+ __slots__ = tuple(_get_attr.keys() + ["_pkg_metadata_shared"])
+
+ @property
+ def P(self):
+ return "%s-%s" % (self.package, self.version)
+
+ @property
+ def PF(self):
+ return "%s-%s" % (self.package, self.fullver)
+
+ @property
+ def PN(self):
+ return self.package
+
+ @property
+ def PR(self):
+ r = self.revision
+ if r is not None:
+ return r
+ return 0
+
+ @property
+ def ebuild(self):
+ return self._parent.get_ebuild_src(self)
+
+ def _fetch_metadata(self):
+ return self._parent._get_metadata(self)
+
+ def __str__(self):
+ return "ebuild src: %s" % self.cpvstr
+
+ def __repr__(self):
+ return "<%s cpv=%r @%#8x>" % (self.__class__, self.cpvstr, id(self))
+
+
+class package(base):
+
+ __slots__ = ("_shared_pkg_data")
+
+ _get_attr = dict(base._get_attr)
+
+ def __init__(self, shared_pkg_data, *args, **kwargs):
+ base.__init__(self, *args, **kwargs)
+ object.__setattr__(self, "_shared_pkg_data", shared_pkg_data)
+
+ @property
+ def maintainers(self):
+ return self._shared_pkg_data.metadata_xml.maintainers
+
+ @property
+ def herds(self):
+ return self._shared_pkg_data.metadata_xml.herds
+
+ @property
+ def longdescription(self):
+ return self._shared_pkg_data.metadata_xml.longdescription
+
+ @property
+ def _mtime_(self):
+ return self._parent._get_ebuild_mtime(self)
+
+ @property
+ def manifest(self):
+ return self._shared_pkg_data.manifest
+
+
+class package_factory(metadata.factory):
+ child_class = package
+
+ # For the plugin system.
+ priority = 5
+
+ def __init__(self, parent, cachedb, eclass_cache, mirrors, default_mirrors,
+ *args, **kwargs):
+ super(package_factory, self).__init__(parent, *args, **kwargs)
+ self._cache = cachedb
+ self._ecache = eclass_cache
+ if mirrors:
+ mirrors = dict((k, mirror(v, k)) for k, v in mirrors.iteritems())
+
+ self.mirrors = mirrors
+ if default_mirrors:
+ self.default_mirrors = default_mirror(default_mirrors,
+ "conf. default mirror")
+ else:
+ self.default_mirrors = None
+
+ def get_ebuild_src(self, pkg):
+ return self._parent_repo._get_ebuild_src(pkg)
+
+ def _get_ebuild_path(self, pkg):
+ return self._parent_repo._get_ebuild_path(pkg)
+
+ def _get_ebuild_mtime(self, pkg):
+ return os.stat(self._get_ebuild_path(pkg)).st_mtime
+
+ def _invalidated_eclasses(self, data, pkg):
+ return (data.get("_eclasses_") is not None and not
+ self._ecache.is_eclass_data_valid(data["_eclasses_"]))
+
+ def _get_metadata(self, pkg):
+ for cache in self._cache:
+ if cache is not None:
+ try:
+ data = cache[pkg.cpvstr]
+ except KeyError:
+ continue
+ except cache_errors.CacheError, ce:
+ logger.warn("caught cache error: %s" % ce)
+ del ce
+ continue
+ if long(data.pop("_mtime_", -1)) != long(pkg._mtime_) or \
+ self._invalidated_eclasses(data, pkg):
+ if not cache.readonly:
+ del cache[pkg.cpvstr]
+ continue
+ return data
+
+ # no cache entries, regen
+ return self._update_metadata(pkg)
+
+ def _update_metadata(self, pkg):
+ ebp = processor.request_ebuild_processor()
+ try:
+ mydata = ebp.get_keys(pkg, self._ecache)
+ finally:
+ processor.release_ebuild_processor(ebp)
+
+ mydata["_mtime_"] = pkg._mtime_
+ if mydata.get("INHERITED", False):
+ mydata["_eclasses_"] = self._ecache.get_eclass_data(
+ mydata["INHERITED"].split())
+ del mydata["INHERITED"]
+ else:
+ mydata["_eclasses_"] = {}
+
+ if self._cache is not None:
+ for cache in self._cache:
+ if not cache.readonly:
+ cache[pkg.cpvstr] = mydata
+ break
+
+ return mydata
+
+ def new_package(self, *args):
+ inst = self._cached_instances.get(args)
+ if inst is None:
+ # key being cat/pkg
+ mxml = self._parent_repo._get_shared_pkg_data(args[0], args[1])
+ inst = self._cached_instances[args] = self.child_class(
+ mxml, self, *args)
+ return inst
+
+
+generate_new_factory = package_factory
+
+
+class virtual_ebuild(metadata.package):
+
+ """
+ PROVIDES generated fake packages
+ """
+
+ package_is_real = False
+ built = True
+
+ __slots__ = ("_orig_data", "data", "provider")
+
+ def __init__(self, parent_repository, pkg, data, cpvstr):
+ """
+ @param cpvstr: cpv for the new pkg
+ @param parent_repository: actual repository that this pkg should
+ claim it belongs to
+ @param pkg: parent pkg that is generating this pkg
+ @param data: mapping of data to push to use in __getattr__ access
+ """
+ c = CPV(cpvstr)
+ if c.fullver is None:
+ cpvstr = cpvstr + "-" + pkg.fullver
+
+ metadata.package.__init__(self, parent_repository, cpvstr)
+ sfunc = object.__setattr__
+ sfunc(self, "data", IndeterminantDict(lambda *a: str(), data))
+ sfunc(self, "_orig_data", data)
+ sfunc(self, "provider", pkg.versioned_atom)
+
+ def __getattr__(self, attr):
+ if attr in self._orig_data:
+ return self._orig_data[attr]
+ return metadata.package.__getattr__(self, attr)
+
+ _get_attr = package._get_attr.copy()
diff --git a/pkgcore/ebuild/eclass_cache.py b/pkgcore/ebuild/eclass_cache.py
new file mode 100644
index 0000000..523ec80
--- /dev/null
+++ b/pkgcore/ebuild/eclass_cache.py
@@ -0,0 +1,135 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# Based upon eclass_cache from portage; matches api, but was reimplemented.
+# License: GPL2
+
+"""
+in memory representation of on disk eclass stacking order
+"""
+
+from pkgcore.interfaces.data_source import local_source
+from pkgcore.config import ConfigHint
+
+from snakeoil.mappings import ImmutableDict
+from snakeoil.weakrefs import WeakValCache
+from snakeoil.osutils import join as pjoin
+
+from snakeoil.demandload import demandload
+demandload(globals(),
+ "os",
+ "snakeoil.osutils:normpath",
+ "snakeoil.mappings:StackedDict",
+)
+
+class base(object):
+ """
+ Maintains the cache information about eclasses available to an ebuild.
+ """
+
+ def __init__(self, portdir=None, eclassdir=None):
+ self._eclass_data_inst_cache = WeakValCache()
+ # generate this.
+ # self.eclasses = {} # {"Name": ("location","_mtime_")}
+ self.eclasses = {}
+ self.portdir = portdir
+ self.eclassdir = eclassdir
+
+ def is_eclass_data_valid(self, ec_dict):
+ """Check if eclass data is still valid.
+
+ Given a dict as returned by get_eclass_data, walk it comparing
+ it to internal eclass view.
+
+ @return: a boolean representing whether that eclass data is still
+ up to date, or not
+ """
+ ec = self.eclasses
+ for eclass, tup in ec_dict.iteritems():
+ if eclass not in ec:
+ return False
+ elif isinstance(tup, tuple):
+ if tup[1] != ec[eclass][1]:
+ return False
+ elif tup != ec[eclass][1]:
+ return False
+ return True
+
+ def get_eclass_data(self, inherits):
+ """Return the cachable entries from a list of inherited eclasses.
+
+ Only make get_eclass_data calls for data you know came from
+ this eclass_cache, otherwise be ready to catch a KeyError
+ exception for any eclass that was requested, but not known to
+ this cache.
+ """
+
+ keys = tuple(sorted(inherits))
+ o = self._eclass_data_inst_cache.get(keys)
+ if o is None:
+ o = ImmutableDict((k, self.eclasses[k]) for k in keys)
+ self._eclass_data_inst_cache[keys] = o
+ return o
+
+ def get_eclass(self, eclass):
+ o = self.eclasses.get(eclass)
+ if o is None:
+ return None
+ return local_source(pjoin(o[0], eclass+".eclass"))
+
+
+class cache(base):
+
+ pkgcore_config_type = ConfigHint({"path":"str", "portdir":"str"},
+ typename='eclass_cache')
+
+ def __init__(self, path, portdir=None):
+ """
+ @param portdir: ondisk location of the tree we're working with
+ """
+ base.__init__(self, portdir=portdir, eclassdir=normpath(path))
+ self.update_eclasses()
+
+ def update_eclasses(self):
+ """Force an update of the internal view of on disk/remote eclasses."""
+ self.eclasses = {}
+ eclass_len = len(".eclass")
+ if os.path.isdir(self.eclassdir):
+ for y in os.listdir(self.eclassdir):
+ if not y.endswith(".eclass"):
+ continue
+ try:
+ mtime = os.stat(pjoin(self.eclassdir, y)).st_mtime
+ except OSError:
+ continue
+ ys = y[:-eclass_len]
+ self.eclasses[intern(ys)] = (self.eclassdir, long(mtime))
+
+
+class StackedCaches(cache):
+
+ """
+ collapse multiple eclass caches into one.
+
+ Does L->R searching for eclass matches.
+ """
+
+ pkgcore_config_type = ConfigHint(
+ {'caches': 'refs:eclass_cache', 'portdir': 'str', 'eclassdir': 'str'},
+ typename='eclass_cache')
+
+ def __init__(self, caches, **kwds):
+ """
+ @param caches: L{cache} instances to stack;
+ ordering should be desired lookup order
+ @keyword eclassdir: override for the master eclass dir, required for
+ eapi0 and idiot eclass usage. defaults to pulling from the first
+ cache.
+ """
+ if len(caches) < 2:
+ raise TypeError(
+ "%s requires at least two eclass_caches" % self.__class__)
+
+ kwds.setdefault("eclassdir", caches[0].eclassdir)
+ kwds.setdefault("portdir",
+ os.path.dirname(kwds["eclassdir"].rstrip(os.path.sep)))
+ base.__init__(self, **kwds)
+ self.eclasses = StackedDict(*[ec.eclasses for ec in caches])
diff --git a/pkgcore/ebuild/errors.py b/pkgcore/ebuild/errors.py
new file mode 100644
index 0000000..17b6a90
--- /dev/null
+++ b/pkgcore/ebuild/errors.py
@@ -0,0 +1,53 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+# "More than one statement on a single line"
+# pylint: disable-msg=C0321
+
+"""
+atom exceptions
+"""
+
+from pkgcore.package import errors
+
+class MalformedAtom(errors.InvalidDependency):
+
+ def __init__(self, atom, err=''):
+ errors.InvalidDependency.__init__(
+ self, "atom '%s' is malformed: error %s" % (atom, err))
+ self.atom, self.err = atom, err
+
+
+class InvalidVersion(errors.InvalidDependency):
+
+ def __init__(self, ver, rev, err=''):
+ errors.InvalidDependency.__init__(
+ self,
+ "Version restriction ver='%s', rev='%s', is malformed: error %s" %
+ (ver, rev, err))
+ self.ver, self.rev, self.err = ver, rev, err
+
+
+class InvalidCPV(errors.InvalidPackage):
+ """Raised if an invalid cpv was passed in.
+
+ @ivar args: single-element tuple containing the invalid string.
+ @type args: C{tuple}
+ """
+
+
+class ParseError(errors.InvalidDependency):
+
+ def __init__(self, s, token=None, msg=None):
+ if msg is None:
+ str_msg = ''
+ else:
+ str_msg = ': %s' % msg
+ if token is not None:
+ Exception.__init__(self,
+ "%s is unparseable%s\nflagged token- %s" %
+ (s, str_msg, token))
+ else:
+ Exception.__init__(self,
+ "%s is unparseable%s" % (s, str_msg))
+ self.dep_str, self.token, self.msg = s, token, msg
diff --git a/pkgcore/ebuild/filter_env.py b/pkgcore/ebuild/filter_env.py
new file mode 100644
index 0000000..cc8ca55
--- /dev/null
+++ b/pkgcore/ebuild/filter_env.py
@@ -0,0 +1,418 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# Copyright: 2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+# Based on filter-env by Brian Harring <ferringb@gmail.com>
+# and Mike Frysinger <spanky@gentoo.org>
+
+
+"""Filter a bash environment dump."""
+
+
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 're',
+ 'pkgcore.log:logger'
+)
+
+
+COMMAND_PARSING, SPACE_PARSING = range(2)
+
+
+def native_run(out, file_buff, vsr, fsr,
+ desired_var_match, desired_func_match):
+ """Print a filtered environment.
+
+ @param out: file-like object to write to.
+ @param file_buff: string containing the environment to filter.
+ Should end in '\0'.
+ @param vsr: result of build_regex_string or C{None}, for variables.
+ @param vsr: result of build_regex_string or C{None}, for functions.
+ @param desired_var_match: boolean indicating vsr should match or not.
+ @param desired_func_match: boolean indicating fsr should match or not.
+ """
+ if fsr is None:
+ func_match = None
+ else:
+ fsr = re.compile(fsr)
+ if desired_func_match:
+ func_match = fsr.match
+ else:
+ def func_match(data):
+ return fsr.match(data) is None
+
+ if vsr is None:
+ var_match = None
+ else:
+ vsr = re.compile(vsr)
+ if desired_var_match:
+ var_match = vsr.match
+ else:
+ def var_match(data):
+ return vsr.match(data) is None
+
+ process_scope(out, file_buff, 0, var_match, func_match, '\0')
+
+
+try:
+ from pkgcore.ebuild._filter_env import run
+except ImportError:
+ cpy_run = None
+ run = native_run
+else:
+ cpy_run = run
+
+
+def build_regex_string(tokens):
+ if not tokens:
+ return None
+ result = []
+ for token in tokens:
+ if not token:
+ continue
+ escaped = False
+ l = []
+ for ch in token:
+ if ch == '.' and not escaped:
+ l.append('[^= ]')
+ else:
+ l.append(ch)
+ if ch == '\\':
+ escaped = not escaped
+ else:
+ escaped = False
+ result.append(''.join(l))
+ if len(result) == 1:
+ return '^%s$' % result[0]
+ return '^(%s)$' % '|'.join(result)
+
+
+FUNC_LEN = len('function')
+def is_function(buff, pos):
+ """@returns: start, end, pos or None, None, None tuple."""
+ isspace = str.isspace
+ while buff[pos] in ' \t':
+ pos += 1
+ if buff[pos:pos + FUNC_LEN] == 'function':
+ pos += FUNC_LEN
+ while isspace(buff[pos]):
+ pos += 1
+ start = pos
+ while buff[pos] not in '\0 \t\n="\'()':
+ pos += 1
+ end = pos
+ if end == start:
+ return None, None, None
+ while buff[pos] in ' \t':
+ pos += 1
+ if buff[pos] != '(':
+ return None, None, None
+ pos += 1
+ while buff[pos] in ' \t':
+ pos += 1
+ if buff[pos] != ')':
+ return None, None, None
+ pos += 1
+ while isspace(buff[pos]):
+ pos += 1
+ if buff[pos] != '{':
+ return None, None, None
+ return start, end, pos + 1
+
+
+def is_envvar(buff, pos):
+ """@returns: start, end, pos or None, None, None tuple."""
+ while buff[pos] in ' \t':
+ pos += 1
+ start = pos
+ while True:
+ if buff[pos] in '\0"\'()- \t\n':
+ return None, None, None
+ if buff[pos] == '=':
+ if pos == start:
+ return None, None, None
+ return start, pos, pos + 1
+ pos += 1
+
+
+def process_scope(out, buff, pos, var_match, func_match, endchar):
+ window_start = pos
+ window_end = None
+ isspace = str.isspace
+ end = len(buff)
+ while pos < end and buff[pos] != endchar:
+ # Wander forward to the next non space.
+ if window_end is not None:
+ if out is not None:
+ out.write(buff[window_start:window_end])
+ window_start = pos
+ window_end = None
+ com_start = pos
+ ch = buff[pos]
+ if isspace(ch):
+ pos += 1
+ continue
+
+ # Ignore comments.
+ if ch == '#':
+ pos = walk_statement_pound(buff, pos, endchar)
+ continue
+
+ new_start, new_end, new_p = is_function(buff, pos)
+ if new_p is not None:
+ func_name = buff[new_start:new_end]
+ logger.debug('matched func name %r', func_name)
+ new_p = process_scope(None, buff, new_p, None, None, '}')
+ logger.debug('ended processing %r', func_name)
+ if func_match is not None and func_match(func_name):
+ logger.debug('filtering func %r', func_name)
+ window_end = com_start
+ pos = new_p
+ pos += 1
+ continue
+ # Check for env assignment.
+ new_start, new_end, new_p = is_envvar(buff, pos)
+ if new_p is None:
+ # Non env assignment.
+ pos = walk_command_complex(buff, pos, endchar, COMMAND_PARSING)
+ # icky icky icky icky
+ if pos < end and buff[pos] != endchar:
+ pos += 1
+ else:
+ # Env assignment.
+ var_name = buff[new_start:new_end]
+ pos = new_p
+ logger.debug('matched env assign %r', var_name)
+
+ if var_match is not None and var_match(var_name):
+ # This would be filtered.
+ logger.info("filtering var '%s'", var_name)
+ window_end = com_start
+
+ if pos >= end:
+ return pos
+
+ while (pos < end and not isspace(buff[pos])
+ and buff[pos] != ';'):
+ if buff[pos] == "'":
+ pos = walk_statement_no_parsing(buff, pos + 1, "'") + 1
+ elif buff[pos] in '"`':
+ pos = walk_command_escaped_parsing(buff, pos + 1,
+ buff[pos]) + 1
+ elif buff[pos] == '(':
+ pos = walk_command_escaped_parsing(buff, pos + 1, ')') + 1
+ elif buff[pos] == '$':
+ pos += 1
+ if pos >= end:
+ continue
+ pos = walk_dollar_expansion(buff, pos, end, endchar)
+ continue
+ else:
+ # blah=cah ; single word
+ pos = walk_command_complex(buff, pos, ' ', SPACE_PARSING)
+
+ if out is not None:
+ if window_end is None:
+ window_end = pos
+ if window_end > end:
+ window_end = end
+ out.write(buff[window_start:window_end])
+
+ return pos
+
+
+def walk_statement_no_parsing(buff, pos, endchar):
+ pos = buff.find(endchar, pos)
+ if pos == -1:
+ pos = len(buff) - 1
+ return pos
+
+
+def walk_statement_dollared_quote_parsing(buff, pos, endchar):
+ end = len(buff)
+ while pos < end:
+ if buff[pos] == endchar:
+ return pos
+ elif buff[pos] == '\\':
+ pos += 1
+ pos += 1
+ return pos
+
+
+def walk_here_statement(buff, pos):
+ pos += 1
+ logger.debug('starting here processing for COMMAND for level 2 at p == %.10s',
+ pos)
+ if buff[pos] == '<':
+ logger.debug(
+ "correction, it's a third level here. Handing back to command "
+ 'parsing')
+ return pos + 1
+ isspace = str.isspace
+ end = len(buff)
+ while pos < end and (isspace(buff[pos]) or buff[pos] == '-'):
+ pos += 1
+ if buff[pos] in "'\"":
+ end_here = walk_statement_no_parsing(buff, pos + 1, buff[pos])
+ pos += 1
+ else:
+ end_here = walk_command_complex(buff, pos, ' ', SPACE_PARSING)
+ here_word = buff[pos:end_here]
+ logger.debug('matched len(%s)/%r for a here word',
+ len(here_word), here_word)
+ # XXX watch this. Potential for horkage. Need to do the quote
+ # removal thing. This sucks.
+ end_here += 1
+ if end_here >= end:
+ return end_here
+
+ here_len = len(here_word)
+ end_here = buff.find(here_word, end_here)
+ while end_here != -1:
+ i = here_len + end_here
+ if buff[i] in ';\n\r})':
+ i = end_here - 1
+ while i >= 0 and buff[i] in '\t ':
+ i -= 1
+ if i >= 0 and buff[i] == '\n':
+ break
+ end_here = buff.find(here_word, end_here + here_len)
+
+ if end_here == -1:
+ return end
+ return end_here + len(here_word)
+
+
+def walk_statement_pound(buff, pos, endchar=None):
+ if pos and not buff[pos-1].isspace():
+ return pos + 1
+ if endchar == '`':
+ i = buff.find('\n', pos)
+ i2 = buff.find(endchar, pos)
+ if i == -1:
+ if i2 != -1:
+ return i2
+ else:
+ if i2 != -1:
+ return min(i, i2)
+ return i
+ return len(buff) - 1
+
+ pos = buff.find('\n', pos)
+ if pos == -1:
+ pos = len(buff) - 1
+ return pos
+
+
+def walk_command_complex(buff, pos, endchar, interpret_level):
+ start = pos
+ isspace = str.isspace
+ end = len(buff)
+ while pos < end:
+ ch = buff[pos]
+ if ch == endchar:
+ if endchar != '}':
+ return pos
+ if start == pos:
+ return pos
+ if buff[pos - 1] in ";\n":
+ return pos
+ elif (interpret_level == COMMAND_PARSING and ch in ';\n') or \
+ (interpret_level == SPACE_PARSING and isspace(ch)):
+ return pos
+ elif ch == '\\':
+ pos += 1
+ elif ch == '<':
+ if (pos < end - 1 and buff[pos + 1] == '<' and
+ interpret_level == COMMAND_PARSING):
+ pos = walk_here_statement(buff, pos + 1)
+ # we continue immediately; walk_here deposits us at the end
+ # of the here op, not consuming the final delimiting char
+ # since it may be an endchar
+ continue
+ else:
+ logger.debug('noticed <, interpret_level=%s', interpret_level)
+ elif ch == '#':
+ if start == pos or isspace(buff[pos - 1]) or buff[pos - 1] == ';':
+ pos = walk_statement_pound(buff, pos)
+ continue
+ elif ch == '$':
+ pos = walk_dollar_expansion(buff, pos + 1, end, endchar)
+ continue
+ elif ch == '{':
+ pos = walk_command_escaped_parsing(buff, pos + 1, '}')
+ elif ch == '(' and interpret_level == COMMAND_PARSING:
+ pos = walk_command_escaped_parsing(buff, pos + 1, ')')
+ elif ch in '`"':
+ pos = walk_command_escaped_parsing(buff, pos + 1, ch)
+ elif ch == "'" and endchar != '"':
+ pos = walk_statement_no_parsing(buff, pos +1, "'")
+ pos += 1
+ return pos
+
+def raw_walk_command_escaped_parsing(buff, pos, endchar):
+ end = len(buff)
+ while pos < end:
+ ch = buff[pos]
+ if ch == endchar:
+ return pos
+ elif ch == '\\':
+ pos += 1
+ elif ch == '{':
+ if endchar != '"':
+ pos = raw_walk_command_escaped_parsing(
+ buff, pos + 1, '}')
+ elif ch == '(':
+ if endchar != '"':
+ pos = raw_walk_command_escaped_parsing(
+ buff, pos + 1, ')')
+ elif ch in '`"':
+ pos = raw_walk_command_escaped_parsing(buff, pos + 1, ch)
+ elif ch == "'" and endchar != '"':
+ pos = walk_statement_no_parsing(buff, pos + 1, "'")
+ elif ch == '$':
+ pos = walk_dollar_expansion(buff, pos + 1, end, endchar,
+ disable_quote = endchar == '"')
+ continue
+ elif ch == '#' and endchar != '"':
+ pos = walk_statement_pound(buff, pos, endchar)
+ continue
+ pos += 1
+ return pos
+
+walk_command_escaped_parsing = raw_walk_command_escaped_parsing
+
+def walk_dollar_expansion(buff, pos, end, endchar, disable_quote=False):
+ if buff[pos] == '(':
+ return process_scope(None, buff, pos + 1, None, None, ')') + 1
+ if buff[pos] == "'" and not disable_quote:
+ return walk_statement_dollared_quote_parsing(buff, pos +1, "'") + 1
+ if buff[pos] != '{':
+ if buff[pos] == '$':
+ # short circuit it.
+ return pos + 1
+ while pos < end and buff[pos] != endchar:
+ if buff[pos].isspace():
+ return pos
+ if buff[pos] == '$':
+ # shouldn't this be passing disable_quote ?
+ return walk_dollar_expansion(buff, pos + 1, end, endchar)
+ if not buff[pos].isalnum():
+ if buff[pos] != '_':
+ return pos
+ pos += 1
+
+ if pos >= end:
+ return end
+ return pos
+
+ pos += 1
+ # shortcut ${$} to avoid going too deep. ${$a} isn't valid, so no concern
+ if pos == '$':
+ return pos + 1
+ while pos < end and buff[pos] != '}':
+ if buff[pos] == '$':
+ # disable_quote?
+ pos = walk_dollar_expansion(buff, pos + 1, end, endchar)
+ else:
+ pos += 1
+ return pos + 1
diff --git a/pkgcore/ebuild/formatter.py b/pkgcore/ebuild/formatter.py
new file mode 100644
index 0000000..436706e
--- /dev/null
+++ b/pkgcore/ebuild/formatter.py
@@ -0,0 +1,486 @@
+# Copyright: 2006 Charlie Shepherd <masterdriverz@gentoo.org>
+# License: GPL2
+
+"""PMerge formatting module
+
+To add a new formatter, add the relevant class (which
+should be a subclass of Formatter). Documentation is
+a necessity - things can change/break easily between
+versions. Then add the class name (_not_ an instance) to
+the formatters dictionary - this will instantly make your
+formatter available on the commandline.
+"""
+
+import operator
+
+from pkgcore.config import configurable
+from snakeoil.demandload import demandload
+demandload(globals(), 'errno')
+
+class NoChoice(KeyboardInterrupt):
+ """Raised by L{userquery} if no choice was made.
+
+ HACK: this subclasses KeyboardInterrupt, so if you ignore this it
+ should do something reasonable.
+ """
+
+def userquery(prompt, out, err, responses=None, default_answer=None, limit=3):
+ """Ask the user to choose from a set of options.
+
+ Displays a prompt and a set of responses, then waits for a
+ response which is checked against the responses. If there is an
+ unambiguous match the value is returned.
+
+ If the user does not input a valid response after a number of
+ tries L{NoChoice} is raised. You can catch this if you want to do
+ something special. Because it subclasses C{KeyboardInterrupt}
+ the default behaviour is to abort as if the user hit ctrl+c.
+
+ @type prompt: C{basestring} or a tuple of things to pass to a formatter.
+ XXX this is a crummy api but I cannot think of a better one supporting
+ the very common case of wanting just a string as prompt.
+ @type out: formatter.
+ @type err: formatter.
+ @type responses: mapping with C{basestring} keys and tuple values.
+ @param responses: mapping of user input to function result.
+ The first item in the value tuple is returned, the rest is passed to
+ out.
+ Defaults to::
+ {
+ 'yes': (True, out.fg('green'), 'Yes'),
+ 'no': (False, out.fg('red'), 'No'),
+ }
+ @param default_answer: returned if there is no input
+ (user just hits enter). Defaults to True if responses is unset,
+ unused otherwise.
+ @param limit: number of allowed tries.
+ """
+ if responses is None:
+ responses = {
+ 'yes': (True, out.fg('green'), 'Yes'),
+ 'no': (False, out.fg('red'), 'No'),
+ }
+ if default_answer is None:
+ default_answer = True
+ if default_answer is not None:
+ for val in responses.itervalues():
+ if val[0] == default_answer:
+ default_answer_name = val[1:]
+ for i in xrange(limit):
+ # XXX see docstring about crummyness
+ if isinstance(prompt, tuple):
+ out.write(autoline=False, *prompt)
+ else:
+ out.write(prompt, autoline=False)
+ out.write(' [', autoline=False)
+ prompts = responses.values()
+ for choice in prompts[:-1]:
+ out.write(autoline=False, *choice[1:])
+ out.write(out.reset, '/', autoline=False)
+ out.write(autoline=False, *prompts[-1][1:])
+ out.write(out.reset, ']', autoline=False)
+ if default_answer is not None:
+ out.write(' (default: ', autoline=False)
+ out.write(autoline=False, *default_answer_name)
+ out.write(')', autoline=False)
+ out.write(': ', autoline=False)
+ try:
+ response = raw_input()
+ except EOFError:
+ out.write("\nNot answerable: EOF on STDIN")
+ raise NoChoice()
+ except IOError, e:
+ if e.errno == errno.EBADF:
+ out.write("\nNot answerable: STDIN is either closed, or not readable")
+ raise NoChoice()
+ raise
+ if not response:
+ return default_answer
+ results = set(
+ (key, value) for key, value in responses.iteritems()
+ if key[:len(response)].lower() == response.lower())
+ if not results:
+ err.write('Sorry, response "%s" not understood.' % (response,))
+ elif len(results) > 1:
+ err.write('Response "%s" is ambiguous (%s)' % (
+ response, ', '.join(key for key, val in results)))
+ else:
+ return list(results)[0][1][0]
+
+ raise NoChoice()
+
+
+class use_expand_filter(object):
+
+ def __init__(self, use_expand, use_expand_hidden):
+ """
+ @type use_expand: iterable of strings
+ @param use_expand: names of use-expanded variables.
+ @type use_expand_hidden: set of strings
+ @param use_expand_hidden: names of use-expanded vars that should not
+ be added to the dict.
+ """
+ self.expand_filters = dict((x.lower(), (x not in use_expand_hidden, x))
+ for x in use_expand)
+ self.use_expand = use_expand
+ self.use_expand_hidden = use_expand_hidden
+ self.known_flags = {}
+
+ def __call__(self, use):
+ """Split USE flags up into "normal" flags and use-expanded ones.
+ @type use: iterable of strings
+ @param use: flags that are set.
+ @rtype: sequence of strings, dict mapping a string to a list of strings
+ @return: set of normal flags and a mapping from use_expand name to
+ value (with the use-expanded bit stripped off, so
+ C{"video_cards_alsa"} becomes C{"{'video_cards': ['alsa']}"}).
+ """
+
+ # XXX: note this is fairly slow- actually takes up more time then chunks of
+ # the resolver
+ ue_dict = {}
+ usel = []
+ ef = self.expand_filters
+ kf = self.known_flags
+
+ for flag in use:
+ data = kf.get(flag)
+ if data is None:
+ split_flag = flag.rsplit("_", 1)
+ while len(split_flag) == 2:
+ if split_flag[0] not in ef:
+ split_flag = split_flag[0].rsplit("_", 1)
+ continue
+ expand_state = ef[split_flag[0]]
+ if expand_state[0]:
+ # not hidden
+ kf[flag] = data = (expand_state[1], flag[len(split_flag[0]) + 1:])
+ else:
+ kf[flag] = data = False
+ break
+ else:
+ kf[flag] = data = True
+ if data is True:
+ # straight use flag.
+ usel.append(flag)
+ elif data:
+ # non hidden flag.
+ if not data[0] in ue_dict:
+ ue_dict[data[0]] = set([data[1]])
+ else:
+ ue_dict[data[0]].add(data[1])
+
+ return frozenset(usel), ue_dict
+
+
+class Formatter(object):
+
+ """Base Formatter class: All formatters should be subclasses of this."""
+
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+ def format(self, op):
+ """Formats an op. Subclasses must define this method"""
+ raise NotImplementedError(self.format)
+
+ def ask(self, question, responses=None, default_answer=None, limit=3):
+ return userquery(
+ question, self.out, self.err, responses, default_answer, limit)
+
+ def end(self):
+ """Called at the end, normally for summary information"""
+
+
+class BasicFormatter(Formatter):
+ """A basic formatter, intended for scripts"""
+ def format(self, op):
+ self.out.write(op.pkg.key)
+
+
+class PkgcoreFormatter(Formatter):
+ """The original pkgcore output"""
+ def format(self, op):
+ repo = getattr(op.pkg.repo, 'repo_id', None)
+ if not repo:
+ p = str(op.pkg.cpvstr)
+ else:
+ p = "%s::%s" % (op.pkg.cpvstr, repo)
+ if op.desc == "replace":
+ self.out.write("replace %s, %s" % (op.old_pkg.cpvstr, p))
+ else:
+ self.out.write("%s %s" % (op.desc.ljust(7), p))
+
+
+class PortageFormatter(Formatter):
+
+ """Portage formatter
+
+ A Formatter designed to resemble Portage's output
+ as much as much as possible.
+ """
+
+ def __init__(self, **kwargs):
+ kwargs.setdefault("use_expand", set())
+ kwargs.setdefault("use_expand_hidden", set())
+ kwargs.setdefault("display_repo", False)
+ Formatter.__init__(self, **kwargs)
+ self.use_splitter = use_expand_filter(self.use_expand,
+ self.use_expand_hidden)
+ # Map repo location to an index.
+ self.repos = {}
+
+ def format(self, op):
+ # [<type> NRFDU]
+ # <type> - ebuild, block or nomerge (for --tree)
+ # N - New package
+ # R - Rebuild package
+ # F - Fetch restricted
+ # D - Downgrade
+ # U - Upgrade
+ # Caveats:
+ # - U and D are both displayed to show a downgrade - this is kept
+ # in order to be consistent with existing portage behaviour
+
+
+ out = self.out
+ origautoline = out.autoline
+ out.autoline = False
+
+ # This is for the summary at the end
+ reponr = self.repos.setdefault(
+ getattr(op.pkg.repo, "repo_id", "<unknown>"),
+ len(self.repos) + 1)
+
+ # We don't do blockers or --tree stuff yet
+ out.write('[ebuild ')
+
+ # Order is important here - look at the above diagram
+ type = op.desc
+ if op.desc == "add":
+ out.write(out.fg('green'), ' N')
+ if op.pkg.slot != '0':
+ out.write(out.fg('green'), 'S')
+ else:
+ out.write(' ')
+ elif op.desc == "replace" and op.pkg == op.old_pkg:
+ out.write(out.fg('yellow'), ' R')
+ else:
+ out.write(' ')
+ type = 'upgrade'
+
+ if 'fetch' in op.pkg.restrict:
+ out.write(out.fg('red'), 'F')
+ else:
+ out.write(' ')
+ if type == 'upgrade':
+ if op.pkg.fullver != op.old_pkg.fullver:
+ out.write(out.fg('cyan'), 'U')
+ if op.pkg > op.old_pkg:
+ out.write(' ')
+ else:
+ out.write(out.fg('blue'), 'D')
+ else:
+ out.write(' ')
+ out.write('] ')
+
+ out.write(out.fg('green'), '%s ' % op.pkg.cpvstr)
+
+ if type == 'upgrade':
+ out.write(out.fg('blue'), '[%s] ' % op.old_pkg.fullver)
+
+ # Build a list of (useflags, use_expand_dicts) tuples.
+ # HACK: if we are in "replace" mode we build a list of length
+ # 4, else this is a list of length 2. We then pass this to
+ # format_use which can take either 2 or 4 arguments.
+ if op.desc == 'replace':
+ uses = (op.pkg.iuse, op.pkg.use, op.old_pkg.iuse, op.old_pkg.use)
+ else:
+ uses = (op.pkg.iuse, op.pkg.use)
+ stuff = map(self.use_splitter, uses)
+
+ # Convert the list of tuples to a list of lists and a list of
+ # dicts (both length 2 or 4).
+ uselists, usedicts = zip(*stuff)
+ self.format_use('use', *uselists)
+ for expand in self.use_expand-self.use_expand_hidden:
+ flaglists = [d.get(expand, ()) for d in usedicts]
+ self.format_use(expand, *flaglists)
+
+ if self.display_repo:
+ out.write(out.fg('blue'), " [%d]" % (reponr,))
+
+ out.write('\n')
+ out.autoline = origautoline
+
+ def format_use(self, attr, selectable, choice, oldselectable=None,
+ oldchoice=None):
+ """Write the current selection from a set of flags to a formatter.
+
+ @type attr: string
+ @param attr: the name of the setting.
+ @type selectable: set of strings
+ @param selectable: the possible values.
+ @type choice: set of strings
+ @param choice: the chosen values.
+ @type oldselectable: set of strings
+ @param oldselectable: the values possible in the previous version.
+ @type oldchoice: set of strings
+ @param oldchoice: the previously chosen values.
+ """
+ out = self.out
+ red = out.fg('red')
+ green = out.fg('green')
+ blue = out.fg('blue')
+ yellow = out.fg('yellow')
+
+ flags = []
+ enabled = set(selectable) & set(choice)
+ disabled = set(selectable) - set(choice)
+ if oldselectable is not None and oldchoice is not None:
+ old_enabled = set(oldselectable) & set(oldchoice)
+ old_disabled = set(oldselectable) - set(oldchoice)
+ for flag in sorted(enabled):
+ assert flag
+ if flag in old_enabled:
+ # Unchanged flag.
+ flags.extend((red, flag, ' '))
+ elif flag in old_disabled:
+ # Toggled.
+ # Trailing single space is important, we can pop it below.
+ flags.extend((green, flag, '*', ' '))
+ else:
+ # Flag did not exist earlier.
+ flags.extend((yellow, flag, '%', ' '))
+ for flag in sorted(disabled | (set(oldselectable) - set(selectable))):
+ assert flag
+ if flag not in disabled:
+ # Removed flag.
+ flags.extend((yellow, '(-', flag, '%)', ' '))
+ elif flag in old_disabled:
+ # Unchanged.
+ flags.extend((blue, '-', flag, ' '))
+ elif flag in old_enabled:
+ # Toggled.
+ flags.extend((yellow, '-', flag, '*', ' '))
+ else:
+ # New.
+ flags.extend((yellow, '-', flag, '%', ' '))
+ else:
+ for flag in sorted(enabled):
+ flags.extend((red, flag, ' '))
+ for flag in sorted(disabled):
+ flags.extend((yellow, '-', flag, ' '))
+
+ # Only write this if we have something to write
+ if flags:
+ out.write(attr.upper(), '="')
+ # Omit the final space.
+ out.write(*flags[:-1])
+ out.write('" ')
+
+ def end(self):
+ if self.display_repo:
+ self.out.write()
+ repos = self.repos.items()
+ repos.sort(key=operator.itemgetter(1))
+ for k, v in repos:
+ self.out.write(self.out.fg('blue'), "[%d] %s" % (v, k))
+
+
+class PaludisFormatter(Formatter):
+
+ """Paludis formatter
+
+ A Formatter designed to resemble Paludis' output
+ as much as much as possible.
+ """
+
+ def __init__(self, **kwargs):
+ Formatter.__init__(self, **kwargs)
+ self.packages = self.new = self.upgrades = self.downgrades = 0
+ self.nslots = 0
+
+ def format(self, op):
+ out = self.out
+ origautoline = out.autoline
+ out.autoline = False
+ self.packages += 1
+
+ out.write('* ')
+ out.write(out.fg('blue'), op.pkg.key)
+ out.write("-%s" % op.pkg.fullver)
+ out.write("::%s " % op.pkg.repo.repo_id)
+ out.write(out.fg('blue'), "{:%s} " % op.pkg.slot)
+ if op.desc == 'add':
+ if op.pkg.slot != '0':
+ suffix = 'S'
+ self.nslots += 1
+ else:
+ suffix = 'N'
+ self.new += 1
+ out.write(out.fg('yellow'), "[%s]" % suffix)
+ elif op.desc == 'replace':
+ if op.pkg != op.old_pkg:
+ if op.pkg > op.old_pkg:
+ suffix = "U"
+ self.upgrades += 1
+ else:
+ suffix = "D"
+ self.downgrades += 1
+ out.write(out.fg('yellow'), "[%s %s]" % (
+ suffix, op.old_pkg.fullver))
+ else:
+ out.write(out.fg('yellow'), "[R]")
+
+ red = out.fg('red')
+ green = out.fg('green')
+ flags = []
+ use = set(op.pkg.use)
+ for flag in sorted(op.pkg.iuse):
+ if flag in use:
+ flags.extend((green, flag, ' '))
+ else:
+ flags.extend((red, '-', flag, ' '))
+ if flags:
+ out.write(' ')
+ # Throw away the final space.
+ out.write(*flags[:-1])
+ out.write('\n')
+ out.autoline = origautoline
+
+ def end(self):
+ self.out.write(
+ 'Total: %d packages '
+ '(%d new, %d upgrades, %d downgrades, %d in new slots)' % (
+ self.packages, self.new, self.upgrades, self.downgrades,
+ self.nslots))
+
+
+def formatter_factory_generator(cls):
+ """Factory for formatter factories that take no further arguments.
+
+ A formatter factory is a subclass of Formatter or a callable
+ taking the same keyword arguments.
+
+ This helper wraps such a subclass in an extra no-argument callable
+ that is usable by the configuration system.
+ """
+ @configurable(typename='pmerge_formatter')
+ def factory():
+ return cls
+ return factory
+
+
+basic_factory = formatter_factory_generator(BasicFormatter)
+pkgcore_factory = formatter_factory_generator(PkgcoreFormatter)
+portage_factory = formatter_factory_generator(PortageFormatter)
+paludis_factory = formatter_factory_generator(PaludisFormatter)
+
+@configurable(typename='pmerge_formatter')
+def portage_verbose_factory():
+ """Version of portage-formatter that is always in verbose mode."""
+ def factory(**kwargs):
+ kwargs['display_repo'] = True
+ return PortageFormatter(**kwargs)
+ return factory
diff --git a/pkgcore/ebuild/misc.py b/pkgcore/ebuild/misc.py
new file mode 100644
index 0000000..c1e17b3
--- /dev/null
+++ b/pkgcore/ebuild/misc.py
@@ -0,0 +1,131 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+misc. stuff we've not found a spot for yet.
+"""
+
+from pkgcore.restrictions import packages, restriction
+from pkgcore.ebuild.atom import atom
+from pkgcore.ebuild.profiles import incremental_expansion
+
+from snakeoil.lists import iflatten_instance
+from snakeoil.klass import generic_equality
+
+class collapsed_restrict_to_data(object):
+
+ __metaclass__ = generic_equality
+ __attr_comparison__ = ('defaults', 'freeform', 'atoms', '__class__')
+
+ def __init__(self, *restrict_sources):
+ """
+ descriptive, no?
+
+ Basically splits an iterable of restrict:data into
+ level of specificity, repo, cat, pkg, atom (dict) for use
+ in filters
+ """
+
+ always = []
+ repo = []
+ cat = []
+ pkg = []
+ atom_d = {}
+ for restrict_pairs in restrict_sources:
+ for a, data in restrict_pairs:
+ if not data:
+ continue
+ if isinstance(a, restriction.AlwaysBool):
+ # yes, odd attr name, but negate holds the val to return.
+ # note also, we're dropping AlwaysFalse; it'll never match.
+ if a.negate:
+ always.extend(data)
+ elif isinstance(a, atom):
+ atom_d.setdefault(a.key, []).append((a, data))
+ elif isinstance(a, packages.PackageRestriction):
+ if a.attr == "category":
+ cat.append((a, data))
+ elif a.attr == "package":
+ pkg.append((a, data))
+ else:
+ raise ValueError("%r doesn't operate on package/category: "
+ "data %r" % (a, data))
+ elif isinstance(a, restriction.AlwaysBool):
+ repo.append((a, data))
+ else:
+ raise ValueError("%r is not a AlwaysBool, PackageRestriction, "
+ "or atom: data %r" % (a, data))
+
+ if always:
+ s = set()
+ incremental_expansion(s, always)
+ always = s
+ else:
+ always = set()
+ self.defaults = always
+ self.freeform = tuple(x for x in (repo, cat, pkg) if x)
+ self.atoms = atom_d
+
+ def atom_intersects(self, atom):
+ return atom.key in self.atoms
+
+ def pull_data(self, pkg, force_copy=False):
+ l = []
+ for specific in self.freeform:
+ for restrict, data in specific:
+ if restrict.match(pkg):
+ l.append(data)
+ for atom, data in self.atoms.get(pkg.key, ()):
+ if atom.match(pkg):
+ l.append(data)
+ if not l:
+ if force_copy:
+ return set(self.defaults)
+ return self.defaults
+ s = set(self.defaults)
+ incremental_expansion(s, iflatten_instance(l))
+ return s
+
+ def iter_pull_data(self, pkg):
+ for item in self.defaults:
+ yield item
+ for specific in self.freeform:
+ for restrict, data in specific:
+ if restrict.match(pkg):
+ for item in data:
+ yield item
+ for atom, data in self.atoms.get(pkg.key, ()):
+ if atom.match(pkg):
+ for item in data:
+ yield item
+
+
+class non_incremental_collapsed_restrict_to_data(collapsed_restrict_to_data):
+
+ def pull_data(self, pkg, force_copy=False):
+ l = []
+ for specific in self.freeform:
+ for restrict, data in specific:
+ if restrict.match(pkg):
+ l.append(data)
+ for atom, data in self.atoms.get(pkg.key, ()):
+ if atom.match(pkg):
+ l.append(data)
+ if not l:
+ if force_copy:
+ return set(self.defaults)
+ return self.defaults
+ s = set(self.defaults)
+ s.update(iflatten_instance(l))
+ return s
+
+ def iter_pull_data(self, pkg):
+ l = [self.defaults]
+ for specific in self.freeform:
+ l.extend(data for restrict, data in specific if restrict.match(pkg))
+ for atom, data in self.atoms.get(pkg.key, ()):
+ if atom.match(pkg):
+ l.append(data)
+ if len(l) == 1:
+ return iter(self.defaults)
+ return iflatten_instance(l)
diff --git a/pkgcore/ebuild/overlay_repository.py b/pkgcore/ebuild/overlay_repository.py
new file mode 100644
index 0000000..c19f0c2
--- /dev/null
+++ b/pkgcore/ebuild/overlay_repository.py
@@ -0,0 +1,89 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+implementation of the standard PORTDIR + PORTDIR_OVERLAY repository stacking
+"""
+
+from pkgcore.repository import prototype
+from pkgcore.config import ConfigHint, errors
+from pkgcore.ebuild import repository
+
+from snakeoil.lists import unstable_unique
+from snakeoil.compatibility import all
+from itertools import chain
+
+class OverlayRepo(prototype.tree):
+
+ """
+ Collapse multiple trees into one.
+
+ Eclass dir is shared, the first package leftmost returned.
+ """
+
+ pkgcore_config_type = ConfigHint({'trees': 'refs:repo'}, typename='repo')
+
+ configured = False
+ configurables = ("domain", "settings",)
+ configure = repository.ConfiguredTree
+
+ # sucks a bit, need to work something better out here
+ format_magic = "ebuild_src"
+
+ def __init__(self, trees, **kwds):
+ """
+ @param trees: L{pkgcore.ebuild.repository.UnconfiguredTree} instances
+ to combine.
+ """
+
+ if not trees or len(trees) < 2:
+ raise errors.InstantiationError(
+ "Must specify at least two pathes to ebuild trees to overlay")
+
+ self.trees = tuple(trees)
+ self._rv_trees = tuple(reversed(trees))
+ self._version_owners = {}
+ prototype.tree.__init__(self)
+
+ def _get_categories(self, category=None):
+ if category is not None:
+ updates = (tree.categories.get(category) for tree in self.trees)
+ updates = [x for x in updates if x is not None]
+ if not updates:
+ raise KeyError(category)
+ else:
+ updates = [tree.categories for tree in self.trees]
+ return tuple(set(chain(*updates)))
+
+ def _get_packages(self, category):
+ updates = (tree.packages.get(category) for tree in self.trees)
+ updates = [x for x in updates if x is not None]
+ if not updates:
+ raise KeyError(category)
+ return tuple(set(chain(*updates)))
+
+ def _get_versions(self, catpkg):
+ ver_owners = {}
+ fails = 0
+ i = iter(self._rv_trees)
+ for tree in self._rv_trees:
+ new_vers = tree.versions.get(catpkg)
+ if new_vers is not None:
+ ver_owners.update((v, tree) for v in new_vers)
+ else:
+ fails += 1
+ if fails == len(self._rv_trees):
+ raise KeyError(catpkg)
+ self._version_owners[catpkg] = tuple(ver_owners.iteritems())
+ return tuple(ver_owners)
+
+ def _internal_gen_candidates(self, candidates, sorter):
+ for cp in candidates:
+ if cp not in self.versions:
+ self.versions.get(cp)
+ for pkg in sorter(repo[cp + (ver,)]
+ for ver, repo in self._version_owners.get(cp, ())):
+ yield pkg
+
+ def _visibility_limiters(self):
+ return [x for r in self.trees for x in r.default_visibility_limiters]
diff --git a/pkgcore/ebuild/portage_conf.py b/pkgcore/ebuild/portage_conf.py
new file mode 100644
index 0000000..37018ab
--- /dev/null
+++ b/pkgcore/ebuild/portage_conf.py
@@ -0,0 +1,491 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""make.conf translator.
+
+Converts portage configuration files into L{pkgcore.config} form.
+"""
+
+import os
+
+from pkgcore.config import basics, configurable
+from pkgcore import const
+from pkgcore.pkgsets.glsa import SecurityUpgrades
+
+from snakeoil.osutils import normpath, abspath, listdir_files, pjoin
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'errno',
+ 'pkgcore.config:errors',
+ 'pkgcore.log:logger',
+ 'ConfigParser:ConfigParser',
+ 'snakeoil.fileutils:read_bash_dict',
+ 'pkgcore.util:bzip2',
+ 'snakeoil.xml:etree',
+)
+
+
+def my_convert_hybrid(manager, val, arg_type):
+ """Modified convert_hybrid using a sequence of strings for section_refs."""
+ if arg_type.startswith('refs:'):
+ subtype = 'ref:' + arg_type.split(':', 1)[1]
+ return list(
+ basics.LazyNamedSectionRef(manager, subtype, name)
+ for name in val)
+ return basics.convert_hybrid(manager, val, arg_type)
+
+
+@configurable({'ebuild_repo': 'ref:repo', 'vdb': 'ref:repo',
+ 'profile': 'ref:profile'}, typename='pkgset')
+def SecurityUpgradesViaProfile(ebuild_repo, vdb, profile):
+ """
+ generate a GLSA vuln. pkgset limited by profile
+
+ @param ebuild_repo: L{pkgcore.ebuild.repository.UnconfiguredTree} instance
+ @param vdb: L{pkgcore.repository.prototype.tree} instance that is the livefs
+ @param profile: L{pkgcore.ebuild.profiles} instance
+ """
+ arch = profile.arch
+ if arch is None:
+ raise errors.InstantiationError("arch wasn't set in profiles")
+ return SecurityUpgrades(ebuild_repo, vdb, arch)
+
+
+def add_layman_syncers(new_config, rsync_opts, overlay_paths, config_root='/',
+ default_loc="etc/layman/layman.cfg",
+ default_conf='overlays.xml'):
+
+ try:
+ f = open(pjoin(config_root, default_loc))
+ except IOError, ie:
+ if ie.errno != errno.ENOENT:
+ raise
+ return {}
+
+ c = ConfigParser()
+ c.readfp(f)
+ storage_loc = c.get('MAIN', 'storage')
+ overlay_xml = pjoin(storage_loc, default_conf)
+ del c
+
+ try:
+ xmlconf = etree.parse(overlay_xml)
+ except IOError, ie:
+ if ie.errno != errno.ENOENT:
+ raise
+ return {}
+ overlays = xmlconf.getroot()
+ if overlays.tag != 'overlays':
+ return {}
+
+ new_syncers = {}
+ for overlay in overlays.findall('overlay'):
+ name = overlay.get('name')
+ src_type = overlay.get('type')
+ uri = overlay.get('src')
+ if None in (src_type, uri, name):
+ continue
+ path = pjoin(storage_loc, name)
+ if not os.path.exists(path):
+ continue
+ elif path not in overlay_paths:
+ continue
+ if src_type == 'tar':
+ continue
+ elif src_type == 'svn':
+ if uri.startswith('http://') or uri.startswith('https://'):
+ uri = 'svn+' + uri
+ elif src_type != 'rsync':
+ uri = '%s+%s' % (src_type, uri)
+
+ new_syncers[path] = make_syncer(new_config, path, uri, rsync_opts, False)
+ return new_syncers
+
+
+def isolate_rsync_opts(options):
+ """
+ pop the misc RSYNC related options litered in make.conf, returning
+ a base rsync dict, and the full SYNC config
+ """
+ base = {}
+ extra_opts = []
+
+ extra_opts.extend(options.pop('PORTAGE_RSYNC_EXTRA_OPTS', '').split())
+
+ ratelimit = options.pop('RSYNC_RATELIMIT', None)
+ if ratelimit is not None:
+ extra_opts.append('--bwlimit=%s' % ratelimit.strip())
+
+ # keep in mind this pops both potential vals.
+ retries = options.pop('PORTAGE_RSYNC_RETRIES',
+ options.pop('RSYNC_RETRIES', None))
+ if retries is not None:
+ base['retries'] = retries.strip()
+ timeout = options.pop('RSYNC_TIMEOUT', None)
+ if timeout is not None:
+ base['timeout'] = timeout.strip()
+
+ excludes = options.pop('RSYNC_EXCLUDEFROM', None)
+ if excludes is not None:
+ extra_opts.extend('--exclude-from=%s' % x
+ for x in excludes.split())
+
+ if extra_opts:
+ base['extra_opts'] = tuple(extra_opts)
+
+ return base
+
+
+def make_syncer(new_config, basedir, sync_uri, rsync_opts,
+ allow_timestamps=True):
+ d = {'basedir': basedir, 'uri': sync_uri}
+ if sync_uri.startswith('rsync'):
+ d.update(rsync_opts)
+ if allow_timestamps:
+ d['class'] = 'pkgcore.sync.rsync.rsync_timestamp_syncer'
+ else:
+ d['class'] = 'pkgcore.sync.rsync.rsync_syncer'
+ else:
+ d['class'] = 'pkgcore.sync.base.GenericSyncer'
+
+ name = '%s syncer' % basedir
+ new_config[name] = basics.AutoConfigSection(d)
+ return name
+
+
+def add_sets(config, root, portage_base_dir):
+ config["world"] = basics.AutoConfigSection({
+ "class": "pkgcore.pkgsets.filelist.WorldFile",
+ "location": pjoin(root, const.WORLD_FILE)})
+ config["system"] = basics.AutoConfigSection({
+ "class": "pkgcore.pkgsets.system.SystemSet",
+ "profile": "profile"})
+ config["installed"] = basics.AutoConfigSection({
+ "class": "pkgcore.pkgsets.installed.Installed",
+ "vdb": "vdb"})
+ config["versioned-installed"] = basics.AutoConfigSection({
+ "class": "pkgcore.pkgsets.installed.VersionedInstalled",
+ "vdb": "vdb"})
+
+ set_fp = pjoin(portage_base_dir, "sets")
+ try:
+ for setname in listdir_files(set_fp):
+ # Potential for name clashes here, those will just make
+ # the set not show up in config.
+ if setname in ("system", "world"):
+ logger.warn("user defined set %s is disallowed; ignoring" %
+ pjoin(set_fp, setname))
+ continue
+ config[setname] = basics.AutoConfigSection({
+ "class":"pkgcore.pkgsets.filelist.FileList",
+ "location":pjoin(set_fp, setname)})
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+
+
+def add_profile(config, base_path):
+ make_profile = pjoin(base_path, 'make.profile')
+ try:
+ profile = normpath(abspath(pjoin(
+ base_path, os.readlink(make_profile))))
+ except OSError, oe:
+ if oe.errno in (errno.ENOENT, errno.EINVAL):
+ raise errors.InstantiationError(
+ "%s must be a symlink pointing to a real target" % (
+ make_profile,))
+ raise errors.InstantiationError(
+ "%s: unexepect error- %s" % (make_profile, oe.strerror))
+
+ psplit = list(piece for piece in profile.split(os.path.sep) if piece)
+ # poor mans rindex.
+ try:
+ profile_start = psplit.index('profiles')
+ except ValueError:
+ raise errors.InstantiationError(
+ '%s expands to %s, but no profile detected' % (
+ pjoin(base_path, 'make.profile'), profile))
+
+ config["profile"] = basics.AutoConfigSection({
+ "class": "pkgcore.ebuild.profiles.OnDiskProfile",
+ "basepath": pjoin("/", *psplit[:profile_start + 1]),
+ "profile": pjoin(*psplit[profile_start + 1:])})
+
+
+def add_fetcher(config, conf_dict, distdir):
+ fetchcommand = conf_dict.pop("FETCHCOMMAND")
+ resumecommand = conf_dict.pop("RESUMECOMMAND", fetchcommand)
+
+ # copy it to prevent modification.
+ fetcher_dict = dict(conf_dict)
+ # map a config arg to an obj arg, pop a few values
+ if "FETCH_ATTEMPTS" in fetcher_dict:
+ fetcher_dict["attempts"] = fetcher_dict.pop("FETCH_ATTEMPTS")
+ fetcher_dict.pop("readonly", None)
+ fetcher_dict.update(
+ {"class": "pkgcore.fetch.custom.fetcher",
+ "distdir": distdir,
+ "command": fetchcommand,
+ "resume_command": resumecommand
+ })
+ config["fetcher"] = basics.AutoConfigSection(fetcher_dict)
+
+
+
+@configurable({'location': 'str'}, typename='configsection')
+def config_from_make_conf(location="/etc/"):
+ """
+ generate a config from a file location
+
+ @param location: location the portage configuration is based in,
+ defaults to /etc
+ """
+
+ # this actually differs from portage parsing- we allow
+ # make.globals to provide vars used in make.conf, portage keeps
+ # them seperate (kind of annoying)
+
+ config_root = os.environ.get("CONFIG_ROOT", "/")
+ base_path = pjoin(config_root, location.strip("/"))
+ portage_base = pjoin(base_path, "portage")
+
+ # this isn't preserving incremental behaviour for features/use
+ # unfortunately
+ conf_dict = read_bash_dict(pjoin(base_path, "make.globals"))
+ conf_dict.update(read_bash_dict(
+ pjoin(base_path, "make.conf"), vars_dict=conf_dict,
+ sourcing_command="source"))
+ conf_dict.setdefault("PORTDIR", "/usr/portage")
+ root = os.environ.get("ROOT", conf_dict.get("ROOT", "/"))
+ gentoo_mirrors = list(
+ x+"/distfiles" for x in conf_dict.pop("GENTOO_MIRRORS", "").split())
+ if not gentoo_mirrors:
+ gentoo_mirrors = None
+
+ features = conf_dict.get("FEATURES", "").split()
+
+ new_config = {}
+
+ # sets...
+ add_sets(new_config, root, portage_base)
+ add_profile(new_config, base_path)
+
+ kwds = {"class": "pkgcore.vdb.repository",
+ "location": pjoin(root, 'var', 'db', 'pkg')}
+ kwds["cache_location"] = pjoin(config_root, 'var', 'cache', 'edb',
+ 'dep', 'var', 'db', 'pkg')
+ new_config["vdb"] = basics.AutoConfigSection(kwds)
+
+ portdir = normpath(conf_dict.pop("PORTDIR").strip())
+ portdir_overlays = [
+ normpath(x) for x in conf_dict.pop("PORTDIR_OVERLAY", "").split()]
+
+
+ # define the eclasses now.
+ all_ecs = []
+ for x in [portdir] + portdir_overlays:
+ ec_path = pjoin(x, "eclass")
+ new_config[ec_path] = basics.AutoConfigSection({
+ "class": "pkgcore.ebuild.eclass_cache.cache",
+ "path": ec_path,
+ "portdir": portdir})
+ all_ecs.append(ec_path)
+
+ new_config['ebuild-repo-common'] = basics.AutoConfigSection({
+ 'class': 'pkgcore.ebuild.repository.tree',
+ 'default_mirrors': gentoo_mirrors,
+ 'inherit-only': True,
+ 'eclass_cache': 'eclass stack'})
+ new_config['cache-common'] = basics.AutoConfigSection({
+ 'class': 'pkgcore.cache.flat_hash.database',
+ 'inherit-only': True,
+ 'location': pjoin(config_root, 'var', 'cache', 'edb', 'dep'),
+ })
+
+
+ # used by PORTDIR syncer, and any layman defined syncers
+ rsync_opts = isolate_rsync_opts(conf_dict)
+ portdir_syncer = conf_dict.pop("SYNC", None)
+
+ if portdir_overlays and '-layman-sync' not in features:
+ overlay_syncers = add_layman_syncers(new_config, rsync_opts,
+ portdir_overlays, config_root=config_root)
+ else:
+ overlay_syncers = {}
+
+ for tree_loc in portdir_overlays:
+ kwds = {
+ 'inherit': ('ebuild-repo-common',),
+ 'location': tree_loc,
+ 'cache': (basics.AutoConfigSection({
+ 'inherit': ('cache-common',),
+ 'label': tree_loc}),),
+ 'class': 'pkgcore.ebuild.repository.SlavedTree',
+ 'parent_repo': 'portdir'
+ }
+ if tree_loc in overlay_syncers:
+ kwds['sync'] = overlay_syncers[tree_loc]
+ new_config[tree_loc] = basics.AutoConfigSection(kwds)
+
+ rsync_portdir_cache = os.path.exists(pjoin(portdir, "metadata", "cache")) \
+ and "metadata-transfer" not in features
+
+ # if a metadata cache exists, use it
+ if rsync_portdir_cache:
+ new_config["portdir cache"] = basics.AutoConfigSection({
+ 'class': 'pkgcore.cache.metadata.database',
+ 'location': portdir,
+ 'label': 'portdir cache',
+ 'readonly': 'yes'})
+ else:
+ new_config["portdir cache"] = basics.AutoConfigSection({
+ 'inherit': ('cache-common',),
+ 'label': portdir})
+
+ base_portdir_config = {}
+ if portdir_syncer is not None:
+ base_portdir_config = {"sync": make_syncer(new_config, portdir,
+ portdir_syncer, rsync_opts)}
+
+ # setup portdir.
+ cache = ('portdir cache',)
+ if not portdir_overlays:
+ d = dict(base_portdir_config)
+ d['inherit'] = ('ebuild-repo-common',)
+ d['location'] = portdir
+ d['cache'] = ('portdir cache',)
+
+ new_config[portdir] = basics.FakeIncrementalDictConfigSection(
+ my_convert_hybrid, d)
+ new_config["eclass stack"] = basics.section_alias(
+ pjoin(portdir, 'eclass'), 'eclass_cache')
+ new_config['portdir'] = basics.section_alias(portdir, 'repo')
+ new_config['repo-stack'] = basics.section_alias(portdir, 'repo')
+ else:
+ # There's always at least one (portdir) so this means len(all_ecs) > 1
+ new_config['%s cache' % (portdir,)] = basics.AutoConfigSection({
+ 'inherit': ('cache-common',),
+ 'label': portdir})
+ cache = ('portdir cache',)
+ if rsync_portdir_cache:
+ cache = ('%s cache' % (portdir,),) + cache
+
+ d = dict(base_portdir_config)
+ d['inherit'] = ('ebuild-repo-common',)
+ d['location'] = portdir
+ d['cache'] = cache
+
+ new_config[portdir] = basics.FakeIncrementalDictConfigSection(
+ my_convert_hybrid, d)
+
+ if rsync_portdir_cache:
+ # created higher up; two caches, writes to the local,
+ # reads (when possible) from pregenned metadata
+ cache = ('portdir cache',)
+ else:
+ cache = ('%s cache' % (portdir,),)
+ new_config['portdir'] = basics.FakeIncrementalDictConfigSection(
+ my_convert_hybrid, {
+ 'inherit': ('ebuild-repo-common',),
+ 'location': portdir,
+ 'cache': cache,
+ 'eclass_cache': pjoin(portdir, 'eclass')})
+
+ # reverse the ordering so that overlays override portdir
+ # (portage default)
+ new_config["eclass stack"] = basics.FakeIncrementalDictConfigSection(
+ my_convert_hybrid, {
+ 'class': 'pkgcore.ebuild.eclass_cache.StackedCaches',
+ 'eclassdir': pjoin(portdir, "eclass"),
+ 'caches': tuple(reversed(all_ecs))})
+
+ new_config['repo-stack'] = basics.FakeIncrementalDictConfigSection(
+ my_convert_hybrid, {
+ 'class': 'pkgcore.ebuild.overlay_repository.OverlayRepo',
+ 'trees': tuple(reversed([portdir] + portdir_overlays))})
+
+ # disabled code for using portage config defined cache modules;
+ # need to re-examine and see if they're still in sync with our cache subsystem
+# if os.path.exists(base_path+"portage/modules"):
+# pcache = read_dict(
+# base_path+"portage/modules").get("portdbapi.auxdbmodule", None)
+
+# cache_config = {"type": "cache",
+# "location": "%s/var/cache/edb/dep" %
+# config_root.rstrip("/"),
+# "label": "make_conf_overlay_cache"}
+# if pcache is None:
+# if portdir_overlays or ("metadata-transfer" not in features):
+# cache_config["class"] = "pkgcore.cache.flat_hash.database"
+# else:
+# cache_config["class"] = "pkgcore.cache.metadata.database"
+# cache_config["location"] = portdir
+# cache_config["readonly"] = "true"
+# else:
+# cache_config["class"] = pcache
+#
+# new_config["cache"] = basics.ConfigSectionFromStringDict(
+# "cache", cache_config)
+
+
+ new_config['vuln'] = basics.AutoConfigSection({
+ 'class': SecurityUpgradesViaProfile,
+ 'ebuild_repo': 'repo-stack',
+ 'vdb': 'vdb',
+ 'profile': 'profile'})
+ new_config['glsa'] = basics.section_alias('vuln',
+ SecurityUpgradesViaProfile.pkgcore_config_type.typename)
+ #binpkg.
+ pkgdir = conf_dict.pop('PKGDIR', None)
+ default_repos = ('repo-stack',)
+ if pkgdir is not None:
+ try:
+ pkgdir = abspath(pkgdir)
+ except OSError, oe:
+ if oe.errno != errno.ENOENT:
+ raise
+ pkgdir = None
+ # If we are not using the native bzip2 then the Tarfile.bz2open
+ # the binpkg repository uses will fail.
+ if pkgdir and os.path.isdir(pkgdir):
+ if not bzip2.native:
+ logger.warn("python's bz2 module isn't available: "
+ "disabling binpkg support")
+ else:
+ new_config['binpkg'] = basics.ConfigSectionFromStringDict({
+ 'class': 'pkgcore.binpkg.repository.tree',
+ 'location': pkgdir})
+ default_repos += ('binpkg',)
+
+ # now add the fetcher- we delay it till here to clean out the environ
+ # it passes to the command.
+ # *everything* in the conf_dict must be str values also.
+ distdir = normpath(conf_dict.pop("DISTDIR", pjoin(portdir, "distdir")))
+ add_fetcher(new_config, conf_dict, distdir)
+
+ # finally... domain.
+ conf_dict.update({
+ 'class': 'pkgcore.ebuild.domain.domain',
+ 'repositories': default_repos,
+ 'fetcher': 'fetcher',
+ 'default': True,
+ 'vdb': ('vdb',),
+ 'profile': 'profile',
+ 'name': 'livefs domain',
+ 'root':root})
+ for f in (
+ "package.mask", "package.unmask", "package.keywords", "package.use",
+ "bashrc"):
+ fp = pjoin(portage_base, f)
+ try:
+ os.stat(fp)
+ except OSError, oe:
+ if oe.errno != errno.ENOENT:
+ raise
+ else:
+ conf_dict[f] = fp
+
+ new_config['livefs domain'] = basics.FakeIncrementalDictConfigSection(
+ my_convert_hybrid, conf_dict)
+
+ return new_config
diff --git a/pkgcore/ebuild/processor.py b/pkgcore/ebuild/processor.py
new file mode 100644
index 0000000..047d00e
--- /dev/null
+++ b/pkgcore/ebuild/processor.py
@@ -0,0 +1,703 @@
+# Copyright: 2004-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+
+"""
+low level ebuild processor.
+
+This basically is a coprocessor that controls a bash daemon for actual
+ebuild execution. Via this, the bash side can reach into the python
+side (and vice versa), enabling remote trees (piping data from python
+side into bash side for example).
+
+A couple of processors are left lingering while pkgcore is running for
+the purpose of avoiding spawning overhead, this (and the general
+design) reduces regen time by over 40% compared to portage-2.1
+"""
+
+# this needs work. it's been pruned heavily from what ebd used
+# originally, but it still isn't what I would define as 'right'
+
+
+__all__ = (
+ "request_ebuild_processor", "release_ebuild_processor", "EbuildProcessor"
+ "UnhandledCommand", "expected_ebuild_env")
+
+
+inactive_ebp_list = []
+active_ebp_list = []
+
+import pkgcore.spawn, os, signal, errno, sys
+from pkgcore.const import (
+ depends_phase_path, EBUILD_DAEMON_PATH, EBUILD_ENV_PATH, EBD_ENV_PATH)
+from pkgcore.os_data import portage_uid, portage_gid
+
+from snakeoil.currying import post_curry, partial
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'pkgcore.log:logger',
+ 'snakeoil:osutils',
+)
+
+import traceback
+
+def shutdown_all_processors():
+ """kill off all known processors"""
+ try:
+ while active_ebp_list:
+ try:
+ active_ebp_list.pop().shutdown_processor(
+ ignore_keyboard_interrupt=True)
+ except (IOError, OSError):
+ pass
+
+ while inactive_ebp_list:
+ try:
+ inactive_ebp_list.pop().shutdown_processor(
+ ignore_keyboard_interrupt=True)
+ except (IOError, OSError):
+ pass
+ except Exception,e:
+ traceback.print_exc()
+ print e
+ raise
+
+pkgcore.spawn.atexit_register(shutdown_all_processors)
+
+def request_ebuild_processor(userpriv=False, sandbox=None, fakeroot=False,
+ save_file=None):
+ """
+ request an ebuild_processor instance, creating a new one if needed.
+
+ Note that fakeroot processes are B{never} reused due to the fact
+ the fakeroot env becomes localized to the pkg it's handling.
+
+ @return: L{EbuildProcessor}
+ @param userpriv: should the processor be deprived to
+ L{pkgcore.os_data.portage_gid} and L{pkgcore.os_data.portage_uid}?
+ @param sandbox: should the processor be sandboxed?
+ @param fakeroot: should the processor be fakerooted? This option is
+ mutually exclusive to sandbox, and requires save_file to be set.
+ @param save_file: location to store fakeroot state dumps
+ """
+
+ if sandbox is None:
+ sandbox = pkgcore.spawn.is_sandbox_capable()
+
+ if not fakeroot:
+ for x in inactive_ebp_list:
+ if x.userprived() == userpriv and (x.sandboxed() or not sandbox):
+ if not x.is_alive:
+ inactive_ebp_list.remove(x)
+ continue
+ inactive_ebp_list.remove(x)
+ active_ebp_list.append(x)
+ return x
+
+ e = EbuildProcessor(userpriv, sandbox, fakeroot, save_file)
+ active_ebp_list.append(e)
+ return e
+
+
+def release_ebuild_processor(ebp):
+ """
+ the inverse of request_ebuild_processor.
+
+ Any processor requested via request_ebuild_processor B{must} be released
+ via this function once it's no longer in use.
+ This includes fakerooted processors.
+
+ @param ebp: L{EbuildProcessor} instance
+ @return: boolean indicating release results- if the processor isn't known
+ as active, False is returned.
+ If a processor isn't known as active, this means either calling
+ error or an internal error.
+ """
+
+ try:
+ active_ebp_list.remove(ebp)
+ except ValueError:
+ return False
+
+ assert ebp not in inactive_ebp_list
+ # if it's a fakeroot'd process, we throw it away.
+ # it's not useful outside of a chain of calls
+ if ebp.onetime() or ebp.locked:
+ # ok, so the thing is not reusable either way.
+ ebp.shutdown_processor()
+ else:
+ inactive_ebp_list.append(ebp)
+ return True
+
+
+class ProcessingInterruption(Exception):
+ pass
+
+class FinishedProcessing(ProcessingInterruption):
+
+ def __init__(self, val, msg=None):
+ ProcessingInterruption.__init__(
+ self, "Finished processing with val, %s" % (val,))
+ self.val, self.msg = val, msg
+
+class UnhandledCommand(ProcessingInterruption):
+
+ def __init__(self, line=None):
+ ProcessingInterruption.__init__(
+ self, "unhandled command, %s" % (line,))
+ self.line = line
+
+def chuck_KeyboardInterrupt(*arg):
+ raise KeyboardInterrupt("ctrl+c encountered")
+
+def chuck_UnhandledCommand(processor, line):
+ raise UnhandledCommand(line)
+
+def chuck_StoppingCommand(val, processor, *args):
+ if callable(val):
+ raise FinishedProcessing(val(args[0]))
+ raise FinishedProcessing(val)
+
+
+class InitializationError(Exception):
+ pass
+
+
+
+
+class EbuildProcessor(object):
+
+ """abstraction of a running ebuild.sh instance.
+
+ Contains the env, functions, etc that ebuilds expect.
+ """
+
+ def __init__(self, userpriv, sandbox, fakeroot, save_file):
+ """
+ @param sandbox: enables a sandboxed processor
+ @param userpriv: enables a userpriv'd processor
+ @param fakeroot: enables a fakeroot'd processor-
+ this is a mutually exclusive option to sandbox, and
+ requires userpriv to be enabled. Violating this will
+ result in nastyness.
+ """
+
+ self.lock()
+ self.ebd = EBUILD_DAEMON_PATH
+ spawn_opts = {}
+
+ if fakeroot and (sandbox or not userpriv):
+ traceback.print_stack()
+ print "warning, was asking to enable fakeroot but-"
+ print "sandbox", sandbox, "userpriv", userpriv
+ print "this isn't valid. bailing"
+ raise InitializationError("cannot initialize with sandbox and fakeroot")
+
+ if userpriv:
+ self.__userpriv = True
+ spawn_opts.update({
+ "uid":portage_uid, "gid":portage_gid,
+ "groups":[portage_gid], "umask":002})
+ else:
+ if pkgcore.spawn.is_userpriv_capable():
+ spawn_opts.update({"gid":portage_gid,
+ "groups":[0, portage_gid]})
+ self.__userpriv = False
+
+ # open the pipes to be used for chatting with the new daemon
+ cread, cwrite = os.pipe()
+ dread, dwrite = os.pipe()
+ self.__sandbox = False
+ self.__fakeroot = False
+
+ # since it's questionable which spawn method we'll use (if
+ # sandbox or fakeroot fex), we ensure the bashrc is invalid.
+ env = dict((x, "/etc/portage/spork/not/valid/ha/ha")
+ for x in ("BASHRC", "BASH_ENV"))
+ args = []
+ if sandbox:
+ if not pkgcore.spawn.is_sandbox_capable():
+ raise ValueError("spawn lacks sandbox capabilities")
+ if fakeroot:
+ raise InitializationError('fakeroot was on, but sandbox was also on')
+ self.__sandbox = True
+ spawn_func = pkgcore.spawn.spawn_sandbox
+# env.update({"SANDBOX_DEBUG":"1", "SANDBOX_DEBUG_LOG":"/var/tmp/test"})
+
+ elif fakeroot:
+ if not pkgcore.spawn.is_fakeroot_capable():
+ raise ValueError("spawn lacks fakeroot capabilities")
+ self.__fakeroot = True
+ spawn_func = pkgcore.spawn.spawn_fakeroot
+ args.append(save_file)
+ else:
+ spawn_func = pkgcore.spawn.spawn
+
+ # force to a neutral dir so that sandbox/fakeroot won't explode if
+ # ran from a nonexistant dir
+ spawn_opts["chdir"] = "/tmp"
+ # little trick. we force the pipes to be high up fd wise so
+ # nobody stupidly hits 'em.
+ max_fd = min(pkgcore.spawn.max_fd_limit, 1024)
+ env.update({
+ "EBD_READ_FD": str(max_fd -2), "EBD_WRITE_FD": str(max_fd -1)})
+ self.pid = spawn_func("/bin/bash %s daemonize" % self.ebd, \
+ fd_pipes={0:0, 1:1, 2:2, max_fd-2:cread, max_fd-1:dwrite}, \
+ returnpid=True, env=env, *args, **spawn_opts)[0]
+
+ os.close(cread)
+ os.close(dwrite)
+ self.ebd_write = os.fdopen(cwrite, "w")
+ self.ebd_read = os.fdopen(dread, "r")
+
+ # basically a quick "yo" to the daemon
+ self.write("dude?")
+ if not self.expect("dude!"):
+ print "error in server coms, bailing."
+ raise InitializationError(
+ "expected 'dude!' response from ebd, which wasn't received. "
+ "likely a bug")
+ self.write(EBD_ENV_PATH)
+ self.write(sys.executable)
+ self.write(osutils.normpath(osutils.abspath(osutils.join(
+ pkgcore.__file__, os.pardir, os.pardir))))
+ if self.__sandbox:
+ self.write("sandbox_log?")
+ self.__sandbox_log = self.read().split()[0]
+ self.dont_export_vars = self.read().split()
+ # locking isn't used much, but w/ threading this will matter
+ self.unlock()
+
+
+ def prep_phase(self, phase, env, sandbox=None, logging=None):
+ """
+ Utility function, to initialize the processor for a phase.
+
+ Used to combine multiple calls into one, leaving the processor
+ in a state where all that remains is a call start_processing
+ call, then generic_handler event loop.
+
+ @param phase: phase to prep for
+ @type phase: str
+ @param env: mapping of the environment to prep the processor with
+ @param sandbox: should the sandbox be enabled?
+ @param logging: None, or a filepath to log the output from the
+ processor to
+ @return: True for success, False for everything else
+ """
+
+ self.write("process_ebuild %s" % phase)
+ if not self.send_env(env):
+ return False
+ if sandbox:
+ self.set_sandbox_state(sandbox)
+ if logging:
+ if not self.set_logfile(logging):
+ return False
+ return True
+
+ def sandboxed(self):
+ """is this instance sandboxed?"""
+ return self.__sandbox
+
+ def userprived(self):
+ """is this instance userprived?"""
+ return self.__userpriv
+
+ def fakerooted(self):
+ """is this instance fakerooted?"""
+ return self.__fakeroot
+
+ def onetime(self):
+ """Is this instance going to be discarded after usage (fakerooted)?"""
+ return self.__fakeroot
+
+ def write(self, string, flush=True, disable_runtime_exceptions=False):
+ """send something to the bash side.
+
+ @param string: string to write to the bash processor.
+ All strings written are automatically \\n terminated.
+ @param flush: boolean controlling whether the data is flushed
+ immediately. Disabling flush is useful when dumping large
+ amounts of data.
+ """
+ string = str(string)
+ try:
+ if string == "\n":
+ self.ebd_write.write(string)
+ else:
+ self.ebd_write.write(string +"\n")
+ if flush:
+ self.ebd_write.flush()
+ except IOError, ie:
+ if ie.errno == errno.EPIPE and not disable_runtime_exceptions:
+ raise RuntimeError(ie)
+ raise
+
+ def expect(self, want):
+ """read from the daemon, check if the returned string is expected.
+
+ @param want: string we're expecting
+ @return: boolean, was what was read == want?
+ """
+ got = self.read()
+ return want == got.rstrip("\n")
+
+ def read(self, lines=1, ignore_killed=False):
+ """
+ read data from the daemon. Shouldn't be called except internally
+ """
+ mydata = []
+ while lines > 0:
+ mydata.append(self.ebd_read.readline())
+ if mydata[-1].startswith("killed"):
+# self.shutdown_processor()
+ chuck_KeyboardInterrupt()
+ lines -= 1
+ return "\n".join(mydata)
+
+ def sandbox_summary(self, move_log=False):
+ """
+ if the instance is sandboxed, print the sandbox access summary
+
+ @param move_log: location to move the sandbox log to if a failure
+ occured
+ """
+ if not os.path.exists(self.__sandbox_log):
+ self.write("end_sandbox_summary")
+ return 0
+ violations = [x.strip()
+ for x in open(self.__sandbox_log, "r") if x.strip()]
+ if not violations:
+ self.write("end_sandbox_summary")
+ return 0
+ if not move_log:
+ move_log = self.__sandbox_log
+ elif move_log != self.__sandbox_log:
+ myf = open(move_log)
+ for x in violations:
+ myf.write(x+"\n")
+ myf.close()
+ # XXX this is fugly, use a colorizer or something
+ # (but it is better than "from output import red" (portage's output))
+ def red(text):
+ return '\x1b[31;1m%s\x1b[39;49;00m' % (text,)
+ self.ebd_write.write(red(
+ "--------------------------- ACCESS VIOLATION SUMMARY "
+ "---------------------------")+"\n")
+ self.ebd_write.write(red("LOG FILE = \"%s\"" % move_log)+"\n\n")
+ for x in violations:
+ self.ebd_write.write(x+"\n")
+ self.write(red(
+ "-----------------------------------------------------"
+ "---------------------------")+"\n")
+ self.write("end_sandbox_summary")
+ try:
+ os.remove(self.__sandbox_log)
+ except (IOError, OSError), e:
+ print "exception caught when cleansing sandbox_log=%s" % str(e)
+ return 1
+
+ def preload_eclasses(self, ec_file):
+ """
+ Preload an eclass into a bash function.
+
+ Avoids the cost of going to disk on inherit. Preloading eutils
+ (which is heaviliy inherited) speeds up regen times for
+ example.
+
+ @param ec_file: filepath of eclass to preload
+ @return: boolean, True for success
+ """
+ if not os.path.exists(ec_file):
+ return 1
+ self.write("preload_eclass %s" % ec_file)
+ if self.expect("preload_eclass succeeded"):
+ self.preloaded_eclasses = True
+ return True
+ return False
+
+ def lock(self):
+ """
+ lock the processor. Currently doesn't block any access, but will
+ """
+ self.processing_lock = True
+
+ def unlock(self):
+ """
+ unlock the processor
+ """
+ self.processing_lock = False
+
+ @property
+ def locked(self):
+ """
+ is the processor locked?
+ """
+ return self.processing_lock
+
+ @property
+ def is_alive(self):
+ """
+ returns if it's known if the processor has been shutdown.
+
+ Currently doesn't check to ensure the pid is still running,
+ yet it should.
+ """
+ try:
+ if self.pid is None:
+ return False
+ try:
+ os.kill(self.pid, 0)
+ return True
+ except OSError:
+ # pid is dead.
+ pass
+ self.pid = None
+ return False
+
+ except AttributeError:
+ # thrown only if failure occured instantiation.
+ return False
+
+ def shutdown_processor(self, ignore_keyboard_interrupt=False):
+ """
+ tell the daemon to shut itself down, and mark this instance as dead
+ """
+ try:
+ if self.is_alive:
+ self.write("shutdown_daemon", disable_runtime_exceptions=True)
+ self.ebd_write.close()
+ self.ebd_read.close()
+ else:
+ return
+ except (IOError, OSError, ValueError):
+ os.kill(self.pid, signal.SIGTERM)
+
+ # now we wait.
+ try:
+ os.waitpid(self.pid, 0)
+ except KeyboardInterrupt:
+ if not ignore_keyboard_interrupt:
+ raise
+
+ # currently, this assumes all went well.
+ # which isn't always true.
+ self.pid = None
+
+ def set_sandbox_state(self, state):
+ """
+ tell the daemon whether to enable the sandbox, or disable it
+ @param state: boolean, if True enable sandbox
+ """
+ if state:
+ self.write("set_sandbox_state 1")
+ else:
+ self.write("set_sandbox_state 0")
+
+ def send_env(self, env_dict):
+ """
+ transfer the ebuild's desired env (env_dict) to the running daemon
+
+ @type env_dict: mapping with string keys and values.
+ @param env_dict: the bash env.
+ """
+
+ self.write("start_receiving_env\n")
+ exported_keys = []
+ data = []
+ for x in env_dict:
+ if x not in self.dont_export_vars:
+ if not x[0].isalpha():
+ raise KeyError(x)
+ s = env_dict[x].replace("\\", "\\\\\\\\")
+ s = s.replace("'", "\\\\'")
+ s = s.replace("\n", "\\\n")
+ data.append("%s=$'%s'\n" % (x, s))
+ exported_keys.append(x)
+ if exported_keys:
+ data.append("export %s\n" % ' '.join(exported_keys))
+ data.append("end_receiving_env")
+ self.write(''.join(data), flush=True)
+ return self.expect("env_received")
+
+ def set_logfile(self, logfile=''):
+ """
+ Set the logfile (location to log to).
+
+ Relevant only when the daemon is sandbox'd,
+
+ @param logfile: filepath to log to
+ """
+ self.write("logging %s" % logfile)
+ return self.expect("logging_ack")
+
+ def __del__(self):
+ """simply attempts to notify the daemon to die"""
+ # for this to be reached means we ain't in a list no more.
+ if self.is_alive:
+ # I'd love to know why the exception wrapping is required...
+ try:
+ self.shutdown_processor()
+ except TypeError:
+ pass
+
+ def get_keys(self, package_inst, eclass_cache):
+ """
+ request the metadata be regenerated from an ebuild
+
+ @param package_inst: L{pkgcore.ebuild.ebuild_src.package} instance
+ to regenerate
+ @param eclass_cache: L{pkgcore.ebuild.eclass_cache} instance to use
+ for eclass access
+ @return: dict when successful, None when failed
+ """
+
+ self.write("process_ebuild depend")
+ e = expected_ebuild_env(package_inst)
+ e["PATH"] = depends_phase_path
+ self.send_env(e)
+ self.set_sandbox_state(True)
+ self.write("start_processing")
+
+ metadata_keys = {}
+ val = self.generic_handler(additional_commands={
+ "request_inherit": post_curry(
+ self.__class__._inherit, eclass_cache),
+ "key": post_curry(self.__class__._receive_key, metadata_keys)})
+
+ if not val:
+ logger.error("returned val from get_keys was '%s'" % str(val))
+ raise Exception(val)
+
+ return metadata_keys
+
+ def _receive_key(self, line, keys_dict):
+ """
+ internal function used for receiving keys from the bash processor
+ """
+ line = line.split("=", 1)
+ if len(line) != 2:
+ raise FinishedProcessing(True)
+ else:
+ keys_dict[line[0]] = line[1]
+
+ def _inherit(self, line, ecache):
+ """
+ Callback for implementing inherit digging into eclass_cache.
+
+ Not for normal consumption.
+ """
+ if line is None:
+ self.write("failed")
+ raise UnhandledCommand(
+ "inherit requires an eclass specified, none specified")
+
+ line = line.strip()
+ eclass = ecache.get_eclass(line)
+ if eclass is None:
+ self.write("failed")
+ raise UnhandledCommand(
+ "inherit requires an unknown eclass, %s cannot be found" % line)
+
+ if eclass.get_path is not None:
+ value = eclass.get_path()
+ self.write("path")
+ self.write(value)
+ else:
+ # XXX $10 this doesn't work.
+ value = eclass.get_fileobj().read()
+ self.write("transfer")
+ self.write(value)
+
+ # this basically handles all hijacks from the daemon, whether
+ # confcache or portageq.
+ def generic_handler(self, additional_commands=None):
+ """
+ internal event handler responding to the running processor's requests.
+
+ @type additional_commands: mapping from string to callable.
+ @param additional_commands: Extra command handlers.
+ Command names cannot have spaces.
+ The callable is called with the processor as first arg, and
+ remaining string (None if no remaining fragment) as second arg.
+ If you need to split the args to command, whitespace splitting
+ falls to your func.
+
+ @raise UnhandledCommand: thrown when an unknown command is encountered.
+ """
+
+ # note that self is passed in. so... we just pass in the
+ # unbound instance. Specifically, via digging through
+ # __class__ if you don't do it, sandbox_summary (fex) cannot
+ # be overriden, this func will just use this classes version.
+ # so dig through self.__class__ for it. :P
+
+ handlers = {"request_sandbox_summary":self.__class__.sandbox_summary}
+ f = chuck_UnhandledCommand
+ for x in ("prob", "env_receiving_failed", "failed"):
+ handlers[x] = f
+ del f
+
+ handlers["phases"] = partial(
+ chuck_StoppingCommand, lambda f: f.lower().strip() == "succeeded")
+
+ handlers["killed"] = chuck_KeyboardInterrupt
+
+ if additional_commands is not None:
+ for x in additional_commands:
+ if not callable(additional_commands[x]):
+ raise TypeError(additional_commands[x])
+
+ handlers.update(additional_commands)
+
+ self.lock()
+
+ try:
+ while True:
+ line = self.read().strip()
+ # split on first whitespace.
+ s = line.split(None, 1)
+ if s[0] in handlers:
+ if len(s) == 1:
+ s.append(None)
+ handlers[s[0]](self, s[1])
+ else:
+ logger.error("unhandled command '%s', line '%s'" %
+ (s[0], line))
+ raise UnhandledCommand(line)
+
+ except FinishedProcessing, fp:
+ v = fp.val
+ self.unlock()
+ return v
+
+
+def expected_ebuild_env(pkg, d=None, env_source_override=None):
+ """
+ setup expected ebuild vars
+
+ @param d: if None, generates a dict, else modifies a passed in mapping
+ @return: mapping
+ """
+ if d is None:
+ d = {}
+ d["CATEGORY"] = pkg.category
+ d["PF"] = "-".join((pkg.package, pkg.fullver))
+ d["P"] = "-".join((pkg.package, pkg.version))
+ d["PN"] = pkg.package
+ d["PV"] = pkg.version
+ if pkg.revision is None:
+ d["PR"] = "r0"
+ else:
+ d["PR"] = "r%i" % pkg.revision
+ d["PVR"] = pkg.fullver
+ if env_source_override:
+ path = env_source_override.get_path
+ if path is not None:
+ d["EBUILD"] = path()
+ else:
+ d["EBUILD"] = pkg.ebuild.get_path()
+ d["PATH"] = ":".join(EBUILD_ENV_PATH + d.get("PATH", "").split(":"))
+ return d
+
diff --git a/pkgcore/ebuild/profiles.py b/pkgcore/ebuild/profiles.py
new file mode 100644
index 0000000..7654989
--- /dev/null
+++ b/pkgcore/ebuild/profiles.py
@@ -0,0 +1,562 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+import errno, os
+from itertools import chain
+
+from pkgcore.config import ConfigHint
+from pkgcore.ebuild import const
+from pkgcore.ebuild import ebuild_src
+from pkgcore.repository import virtual
+
+from snakeoil.osutils import abspath, join as pjoin, readlines
+from snakeoil.containers import InvertedContains
+from snakeoil.fileutils import iter_read_bash, read_bash_dict
+from snakeoil.caching import WeakInstMeta
+from snakeoil.currying import partial
+from snakeoil.demandload import demandload
+
+demandload(globals(),
+ 'pkgcore.interfaces.data_source:local_source',
+ 'pkgcore.ebuild:cpv',
+ 'pkgcore.ebuild:atom',
+ 'pkgcore.repository:util',
+ 'pkgcore.restrictions:packages',
+)
+
+class ProfileError(Exception):
+
+ def __init__(self, path, filename, error):
+ self.path, self.filename, self.error = path, filename, error
+
+ def __str__(self):
+ return "ProfileError: profile %r, file %r, error %s" % (
+ self.path, self.filename, self.error)
+
+def load_decorator(filename, handler=iter_read_bash, fallback=()):
+ def f(func):
+ def f2(self, *args):
+ path = pjoin(self.path, filename)
+ try:
+ data = readlines(path, False, True, True)
+ if data is None:
+ return func(self, fallback, *args)
+ return func(self, handler(data), *args)
+ except (KeyboardInterrupt, RuntimeError, SystemExit):
+ raise
+ except Exception, e:
+ raise ProfileError(self.path, filename, e)
+ return f2
+ return f
+
+def split_negations(data, func):
+ neg, pos = [], []
+ for line in data:
+ if line[0] == '-':
+ if len(line) == 1:
+ raise ValueError("'-' negation without a token")
+ neg.append(func(line[1:]))
+ else:
+ pos.append(func(line))
+ return (tuple(neg), tuple(pos))
+
+
+class ProfileNode(object):
+
+ __metaclass__ = WeakInstMeta
+ __inst_caching__ = True
+
+ def __init__(self, path):
+ if not os.path.isdir(path):
+ raise ProfileError(path, "", "profile doesn't exist")
+ self.path = path
+
+ def __str__(self):
+ return "Profile at %r" % self.path
+
+ def __repr__(self):
+ return '<%s path=%r, @%#8x>' % (self.__class__.__name__, self.path,
+ id(self))
+
+ @load_decorator("packages")
+ def _load_packages(self, data):
+ # sys packages and visibility
+ sys, neg_sys, vis, neg_vis = [], [], [], []
+ for line in data:
+ if line[0] == '-':
+ if line[1] == '*':
+ neg_sys.append(atom.atom(line[2:]))
+ else:
+ neg_vis.append(atom.atom(line[1:], negate_vers=True))
+ else:
+ if line[0] == '*':
+ sys.append(atom.atom(line[1:]))
+ else:
+ vis.append(atom.atom(line, negate_vers=True))
+
+ self.system = (tuple(neg_sys), tuple(sys))
+ self.visibility = (tuple(neg_vis), tuple(vis))
+
+ @load_decorator("parent")
+ def _load_parents(self, data):
+ self.parents = tuple(ProfileNode(abspath(pjoin(self.path, x)))
+ for x in data)
+ return self.parents
+
+ @load_decorator("package.provided")
+ def _load_pkg_provided(self, data):
+ self.pkg_provided = split_negations(data, cpv.CPV)
+ return self.pkg_provided
+
+ @load_decorator("virtuals")
+ def _load_virtuals(self, data):
+ d = {}
+ for line in data:
+ l = line.split()
+ if len(l) != 2:
+ raise ValueError("%r is malformated" % line)
+ d[cpv.CPV(l[0]).package] = atom.atom(l[1])
+ self.virtuals = d
+ return d
+
+ @load_decorator("package.mask")
+ def _load_masks(self, data):
+ self.masks = split_negations(data, atom.atom)
+ return self.masks
+
+ @load_decorator("deprecated", lambda i:i, None)
+ def _load_deprecated(self, data):
+ if data is not None:
+ data = iter(data)
+ try:
+ replacement = data.next().strip()
+ msg = "\n".join(x.lstrip("#").strip()
+ for x in data)
+ data = (replacement, msg)
+ except StopIteration:
+ # only an empty replacement could trigger this; thus
+ # formatted badly.
+ raise ValueError("didn't specify a replacement profile")
+ self.deprecated = data
+ return data
+
+ @load_decorator("use.mask")
+ def _load_masked_use(self, data):
+ d = self._load_pkg_use_mask()
+ neg, pos = split_negations(data, str)
+ if neg or pos:
+ d[packages.AlwaysTrue] = (neg, pos)
+ self.masked_use = d
+ return d
+
+ @load_decorator("package.use.mask")
+ def _load_pkg_use_mask(self, data):
+ d = {}
+ for line in data:
+ i = iter(line.split())
+ a = atom.atom(i.next())
+ neg, pos = d.setdefault(a, ([], []))
+ for x in i:
+ if x[0] == '-':
+ neg.append(x[1:])
+ else:
+ pos.append(x)
+ for k, v in d.iteritems():
+ d[k] = tuple(tuple(x) for x in v)
+ self.pkg_use_mask = d
+ return d
+
+ @load_decorator("package.use")
+ def _load_pkg_use(self, data):
+ d = {}
+ for line in data:
+ i = iter(line.split())
+ a = atom.atom(i.next())
+ neg, pos = d.setdefault(a, ([], []))
+ for x in i:
+ if x[0] == '-':
+ neg.append(x[1:])
+ else:
+ pos.append(x)
+ for k, v in d.iteritems():
+ d[k] = tuple(tuple(x) for x in v)
+ self.pkg_use = d
+ return d
+
+ @load_decorator("use.force")
+ def _load_forced_use(self, data):
+ d = self._load_pkg_use_force()
+ neg, pos = split_negations(data, str)
+ if neg or pos:
+ d[packages.AlwaysTrue] = (neg, pos)
+ self.forced_use = d
+ return d
+
+ @load_decorator("package.use.force")
+ def _load_pkg_use_force(self, data):
+ d = {}
+ for line in data:
+ i = iter(line.split())
+ a = atom.atom(i.next())
+ neg, pos = d.setdefault(a, ([], []))
+ for x in i:
+ if x[0] == '-':
+ neg.append(x[1:])
+ else:
+ pos.append(x)
+ for k, v in d.iteritems():
+ d[k] = tuple(tuple(x) for x in v)
+ self.pkg_use_force = d
+ return d
+
+ def _load_default_env(self):
+ path = pjoin(self.path, "make.defaults")
+ try:
+ f = open(path, "r")
+ except IOError, ie:
+ if ie.errno != errno.ENOENT:
+ raise ProfileError(self.path, "make.defaults", ie)
+ self.default_env = {}
+ return self.default_env
+ try:
+ try:
+ d = read_bash_dict(f)
+ finally:
+ f.close()
+ except (KeyboardInterrupt, RuntimeError, SystemExit):
+ raise
+ except Exception ,e:
+ raise ProfileError(self.path, "make.defaults", e)
+ self.default_env = d
+ return d
+
+ def _load_bashrc(self):
+ path = pjoin(self.path, "profile.bashrc")
+ if os.path.exists(path):
+ self.bashrc = local_source(path)
+ else:
+ self.bashrc = None
+ return self.bashrc
+
+ def __getattr__(self, attr):
+ if attr in ("system", "visibility"):
+ self._load_packages()
+ return getattr(self, attr)
+ # use objects getattr to bypass our own; prevents infinite recursion
+ # if they request something non existant
+ try:
+ func = object.__getattribute__(self, "_load_%s" % attr)
+ except AttributeError:
+ raise AttributeError(self, attr)
+ if func is None:
+ raise AttributeError(attr)
+ return func()
+
+
+class EmptyRootNode(ProfileNode):
+
+ __inst_caching__ = True
+
+ parents = ()
+ deprecated = None
+ forced_use = masked_use = {}
+ pkg_provided = visibility = system = ((), ())
+ virtuals = {}
+
+
+def incremental_expansion(orig, iterable, msg_prefix=''):
+ for i in iterable:
+ if i[0] == '-':
+ i = i[1:]
+ if not i:
+ raise ValueError("%sencountered an incomplete negation, '-'"
+ % msg_prefix)
+ orig.discard(i)
+ else:
+ orig.add(i)
+
+
+class OnDiskProfile(object):
+
+ pkgcore_config_type = ConfigHint({'basepath':'str', 'profile':'str',
+ 'incrementals':'list'}, required=('basepath', 'profile'),
+ typename='profile')
+
+ def __init__(self, basepath, profile, incrementals=const.incrementals,
+ load_profile_base=True):
+ self.basepath = basepath
+ self.profile = profile
+ self.node = ProfileNode(pjoin(basepath, profile))
+ self.incrementals = incrementals
+ self.load_profile_base = load_profile_base
+
+ @property
+ def arch(self):
+ return self.default_env.get("ARCH")
+
+ @property
+ def deprecated(self):
+ return self.node.deprecated
+
+ def _load_stack(self):
+ def f(node):
+ for x in node.parents:
+ for y in f(x):
+ yield y
+ yield node
+
+ l = list(f(self.node))
+ if self.load_profile_base:
+ l = [EmptyRootNode(self.basepath)] + l
+ return tuple(l)
+
+ def _collapse_use_dict(self, attr):
+
+ stack = [getattr(x, attr) for x in self.stack]
+
+ global_on = set()
+ puse_on = {}
+ puse_off = {}
+ atrue = packages.AlwaysTrue
+ for mapping in stack:
+ # process globals (use.(mask|force) first)
+ val = mapping.get(atrue)
+ if val is not None:
+ # global wipes everything affecting the flag thus far.
+ global_on.difference_update(val[0])
+ for u in val[0]:
+ puse_on.pop(u, None)
+ puse_off.pop(u, None)
+
+ # this *is* right; if it's global, it stomps everything prior.
+ global_on.update(val[1])
+ for u in val[1]:
+ puse_on.pop(u, None)
+ puse_off.pop(u, None)
+
+ # process less specific...
+ for key, val in mapping.iteritems():
+ if key == atrue:
+ continue
+ for u in val[0]:
+ # is it even on? if not, don't level it.
+ if u in global_on:
+ if u not in puse_off:
+ puse_off[u] = set([key])
+ else:
+ puse_off[u].add(key)
+ else:
+ s = puse_on.get(u)
+ if s is not None:
+ # chuck the previous override.
+ s.discard(key)
+
+ for u in val[1]:
+ # if it's on already, no need to set it.
+ if u not in global_on:
+ if u not in puse_on:
+ puse_on[u] = set([key])
+ else:
+ puse_on[u].add(key)
+ else:
+ s = puse_off.get(u)
+ if s is not None:
+ s.discard(key)
+
+ # now we recompose it into a global on, and a stream of global trins.
+ d = {}
+ if global_on:
+ d[atrue] = set(global_on)
+ # reverse the mapping.
+ for data in (puse_on.iteritems(),
+ (("-"+k, v) for k,v in puse_off.iteritems())):
+ for use, key_set in data:
+ for key in key_set:
+ s = d.get(key)
+ if s is None:
+ d[key] = set([use])
+ else:
+ s.add(use)
+ for k, v in d.iteritems():
+ d[k] = tuple(v)
+ return d
+
+ def _collapse_generic(self, attr):
+ s = set()
+ for node in self.stack:
+ val = getattr(node, attr)
+ s.difference_update(val[0])
+ s.update(val[1])
+ return s
+
+ def _collapse_env(self):
+ d = {}
+ inc = self.incrementals
+ for profile in self.stack:
+ for key, val in profile.default_env.iteritems():
+ if key in inc:
+ val = val.split()
+ s = d.get(key)
+ if s is None:
+ s = d[key] = set()
+ incremental_expansion(s, val,
+ "expanding %s make.defaults: " % profile)
+ if not s:
+ del d[key]
+ else:
+ d[key] = val
+ return d
+
+ @property
+ def use_expand(self):
+ if "USE_EXPAND" in self.incrementals:
+ return tuple(self.default_env["USE_EXPAND"])
+ return tuple(self.default_env["USE_EXPAND"].split())
+
+ @property
+ def use_expand_hidden(self):
+ if "USE_EXPAND_HIDDEN" in self.incrementals:
+ return tuple(self.default_env["USE_EXPAND_HIDDEN"])
+ return tuple(self.default_env["USE_EXPAND_HIDDEN"].split())
+
+ def _collapse_virtuals(self):
+ d = {}
+ for profile in self.stack:
+ d.update(profile.virtuals)
+ self.virtuals = d
+ self.make_virtuals_repo = partial(AliasedVirtuals, d)
+
+ def _collapse_pkg_provided(self):
+ d = {}
+ for pkg in self._collapse_generic("pkg_provided"):
+ d.setdefault(pkg.category, {}).setdefault(pkg.package,
+ []).append(pkg.fullver)
+ return util.SimpleTree(d, pkg_klass=PkgProvided)
+
+ def _collapse_masks(self):
+ return frozenset(chain(self._collapse_generic("masks"),
+ self._collapse_generic("visibility")))
+
+ def __getattr__(self, attr):
+ if attr == "stack":
+ self.stack = obj = self._load_stack()
+ elif attr in ('forced_use', 'masked_use', 'pkg_use'):
+ obj = self._collapse_use_dict(attr)
+ setattr(self, attr, obj)
+ elif attr == 'bashrc':
+ obj = self.bashrc = tuple(x.bashrc
+ for x in self.stack if x.bashrc is not None)
+ elif attr == 'system':
+ obj = self.system = self._collapse_generic(attr)
+ elif attr == 'masks':
+ obj = self.masks = self._collapse_masks()
+ elif attr == 'default_env':
+ obj = self.default_env = self._collapse_env()
+ elif attr == 'virtuals':
+ self._collapse_virtuals()
+ obj = self.virtuals
+ elif attr == 'make_virtuals_repo':
+ self._collapse_virtuals()
+ obj = self.make_virtuals_repo
+ elif attr == 'provides_repo':
+ obj = self.provides_repo = self._collapse_pkg_provided()
+ elif attr == 'path':
+ obj = self.node.path
+ else:
+ raise AttributeError(attr)
+ return obj
+
+
+class PkgProvided(ebuild_src.base):
+
+ package_is_real = False
+ __inst_caching__ = True
+
+ keywords = InvertedContains(())
+
+ def __init__(self, *a, **kwds):
+ # 'None' repo.
+ ebuild_src.base.__init__(self, None, *a, **kwds)
+ object.__setattr__(self, "use", [])
+ object.__setattr__(self, "data", {})
+
+
+class ForgetfulDict(dict):
+
+ def __setitem__(self, key, attr):
+ return
+
+ def update(self, other):
+ return
+
+
+class AliasedVirtuals(virtual.tree):
+
+ """
+ repository generated from a profiles default virtuals
+ """
+
+ def __init__(self, virtuals, repo, *overrides):
+ """
+ @param virtuals: dict of virtual -> providers
+ @param repo: L{pkgcore.ebuild.repository.UnconfiguredTree} parent repo
+ @keyword overrides: mapping of virtual pkgname -> matches to override defaults
+ """
+ virtual.tree.__init__(self, livefs=False)
+ self._original_virtuals = virtuals
+ self._overrides = tuple(overrides)
+ if not overrides:
+ # no point in delaying.
+ self.packages._cache['virtuals'] = tuple(virtuals.iterkeys())
+ self._virtuals = virtuals
+ self.aliased_repo = repo
+ self._versions_map = {}
+
+ def _load_data(self):
+ self._virtuals = self._delay_apply_overrides(self._original_virtuals,
+ self._overrides)
+ self.packages._cache['virtual'] = tuple(self._virtuals.iterkeys())
+
+ @staticmethod
+ def _delay_apply_overrides(virtuals, overrides):
+ d = {}
+ for vtree in overrides:
+ for virt, provider in vtree.default_providers.iteritems():
+ if virt in d:
+ d[virt] &= d[virt] & provider
+ else:
+ d[virt] = provider
+
+ if not d:
+ return virtuals
+ for k, v in d.iteritems():
+ if len(v) == 1:
+ d[k] = tuple(v)[0]
+ else:
+ d[k] = packages.OrRestriction(finalize=True, *v)
+ virtuals = virtuals.copy()
+ virtuals.update(d)
+ return virtuals
+
+ def _get_versions(self, catpkg):
+ if catpkg[0] != "virtual":
+ raise KeyError("no %s package in this repository" % catpkg)
+ vers = set()
+ for pkg in self.aliased_repo.itermatch(self._virtuals[catpkg[1]]):
+ self._versions_map.setdefault(catpkg[1], {}).setdefault(pkg.fullver, []).append(
+ pkg.versioned_atom)
+ vers.add(pkg.fullver)
+ return tuple(vers)
+
+ def _expand_vers(self, cp, ver):
+ return self._versions_map.get(cp[1], {}).get(ver, ())
+
+ def _fetch_metadata(self, pkg):
+ import pdb;pdb.set_trace()
+ data = self._virtuals[pkg.package]
+ if isinstance(data, atom.atom):
+ data = [data]
+ data = [atom.atom("=%s-%s" % (x.key, pkg.fullver)) for x in data]
+ if len(data) == 1:
+ return data[0]
+ return packages.OrRestriction(finalize=True, *data)
diff --git a/pkgcore/ebuild/repo_objs.py b/pkgcore/ebuild/repo_objs.py
new file mode 100644
index 0000000..00923b9
--- /dev/null
+++ b/pkgcore/ebuild/repo_objs.py
@@ -0,0 +1,165 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+package class for buildable ebuilds
+"""
+
+from snakeoil.currying import post_curry
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'snakeoil.xml:etree',
+ 'pkgcore.ebuild:digest',
+ 'snakeoil:mappings',
+ 'errno',
+)
+
+
+class Maintainer(object):
+
+ """Data on a single maintainer.
+
+ At least one of email and name is not C{None}.
+
+ @type email: C{unicode} object or C{None}
+ @ivar email: email address.
+ @type name: C{unicode} object or C{None}
+ @ivar name: full name
+ @type description: C{unicode} object or C{None}
+ @ivar description: description of maintainership.
+ """
+
+ __slots__ = ('email', 'description', 'name')
+
+ def __init__(self, email=None, name=None, description=None):
+ if email is None and name is None:
+ raise ValueError('need at least one of name and email')
+ self.email = email
+ self.name = name
+ self.description = description
+
+ def __str__(self):
+ if self.name is not None:
+ if self.email is not None:
+ res = '%s <%s>' % (self.name, self.email)
+ else:
+ res = self.name
+ else:
+ res = self.email
+ if self.description is not None:
+ return '%s (%s)' % (res, self.description)
+ return res
+
+
+class MetadataXml(object):
+ """metadata.xml parsed results
+
+ attributes are set to -1 if unloaded, None if no entry, or the value
+ if loaded
+ """
+
+ __slots__ = ("__weakref__", "_maintainers", "_herds", "_longdescription",
+ "_source")
+
+ def __init__(self, source):
+ self._source = source
+
+ def _generic_attr(self, attr):
+ if self._source is not None:
+ self._parse_xml()
+ return getattr(self, attr)
+
+ for attr in ("herds", "maintainers", "longdescription"):
+ locals()[attr] = property(post_curry(_generic_attr, "_"+attr))
+ del attr
+
+ def _parse_xml(self, source=None):
+ if source is None:
+ source = self._source.get_fileobj()
+ tree = etree.parse(source)
+ maintainers = []
+ for x in tree.findall("maintainer"):
+ name = email = description = None
+ for e in x:
+ if e.tag == "name":
+ name = e.text
+ elif e.tag == "email":
+ email = e.text
+ elif e.tag == 'description':
+ description = e.text
+ maintainers.append(Maintainer(
+ name=name, email=email, description=description))
+
+ self._maintainers = tuple(maintainers)
+ self._herds = tuple(x.text for x in tree.findall("herd"))
+
+ # Could be unicode!
+ longdesc = tree.findtext("longdescription")
+ if longdesc:
+ longdesc = ' '.join(longdesc.strip().split())
+ self._longdescription = longdesc
+ self._source = None
+
+
+class LocalMetadataXml(MetadataXml):
+
+ __slots__ = ()
+
+ def _parse_xml(self):
+ try:
+ MetadataXml._parse_xml(self, open(self._source, "rb", 32768))
+ except IOError, oe:
+ if oe.errno != errno.ENOENT:
+ raise
+ self._maintainers = ()
+ self._herds = ()
+ self._longdescription = None
+ self._source = None
+
+
+
+class Manifest(object):
+
+ def __init__(self, source, enforce_gpg=False):
+ self._source = (source, not enforce_gpg)
+
+ def _pull_manifest(self):
+ if self._source is None:
+ return
+ source, gpg = self._source
+ data = digest.parse_manifest(source, ignore_gpg=gpg,
+ kls_override=mappings.ImmutableDict)
+ self._dist, self._aux, self._ebuild, self._misc = data[0]
+ self._version = data[1]
+ self._source = None
+
+ @property
+ def version(self):
+ self._pull_manifest()
+ return self._version
+
+ @property
+ def required_files(self):
+ self._pull_manifest()
+ return mappings.StackedDict(self._ebuild, self._misc)
+
+ @property
+ def aux_files(self):
+ self._pull_manifest()
+ return self._aux
+
+ @property
+ def distfiles(self):
+ self._pull_manifest()
+ if self.version != 2:
+ raise TypeError("only manifest2 instances carry digest data")
+ return self._dist
+
+
+class SharedPkgData(object):
+
+ __slots__ = ("__weakref__", "metadata_xml", "manifest")
+
+ def __init__(self, metadata_xml, manifest):
+ self.metadata_xml = metadata_xml
+ self.manifest = manifest
diff --git a/pkgcore/ebuild/repository.py b/pkgcore/ebuild/repository.py
new file mode 100644
index 0000000..48e928a
--- /dev/null
+++ b/pkgcore/ebuild/repository.py
@@ -0,0 +1,316 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+ebuild repository, specific to gentoo ebuild trees (whether cvs or rsync)
+"""
+
+import os, stat
+from itertools import imap, ifilterfalse
+
+from pkgcore.repository import prototype, errors, configured, syncable
+from pkgcore.ebuild import eclass_cache as eclass_cache_module
+from pkgcore.config import ConfigHint
+from pkgcore.plugin import get_plugin
+
+from snakeoil.fileutils import read_dict, iter_read_bash
+from snakeoil import currying
+from snakeoil.osutils import (listdir_files, readfile, listdir_dirs, pjoin,
+ readlines)
+from snakeoil.containers import InvertedContains
+from snakeoil.obj import make_kls
+from snakeoil.weakrefs import WeakValCache
+
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'pkgcore.ebuild.ebd:buildable',
+ 'pkgcore.interfaces.data_source:local_source',
+ 'pkgcore.ebuild:digest',
+ 'pkgcore.ebuild:repo_objs',
+ 'pkgcore.ebuild:atom',
+ 'random:shuffle',
+ 'errno',
+)
+
+
+metadata_offset = "profiles"
+
+class UnconfiguredTree(syncable.tree_mixin, prototype.tree):
+
+ """
+ raw implementation supporting standard ebuild tree.
+
+ return packages don't have USE configuration bound to them.
+ """
+
+ false_packages = frozenset(["CVS", ".svn"])
+ false_categories = frozenset([
+ "eclass", "profiles", "packages", "distfiles", "metadata",
+ "licenses", "scripts", "CVS", "local"])
+ configured = False
+ configurables = ("domain", "settings")
+ configure = None
+ format_magic = "ebuild_src"
+ enable_gpg = False
+
+ pkgcore_config_type = ConfigHint(
+ {'location': 'str', 'cache': 'refs:cache',
+ 'eclass_cache': 'ref:eclass_cache',
+ 'default_mirrors': 'list', 'sync': 'lazy_ref:syncer',
+ 'override_repo_id':'str'},
+ typename='repo')
+
+ def __init__(self, location, cache=(), eclass_cache=None,
+ default_mirrors=None, sync=None, override_repo_id=None):
+
+ """
+ @param location: on disk location of the tree
+ @param cache: sequence of L{pkgcore.cache.template.database} instances
+ to use for storing metadata
+ @param eclass_cache: If not None, L{pkgcore.ebuild.eclass_cache}
+ instance representing the eclasses available,
+ if None, generates the eclass_cache itself
+ @param default_mirrors: Either None, or sequence of mirrors to try
+ fetching from first, then falling back to other uri
+ """
+
+ prototype.tree.__init__(self)
+ syncable.tree_mixin.__init__(self, sync)
+ self._repo_id = override_repo_id
+ self.base = self.location = location
+ try:
+ if not stat.S_ISDIR(os.stat(self.base).st_mode):
+ raise errors.InitializationError(
+ "base not a dir: %s" % self.base)
+
+ except OSError:
+ raise errors.InitializationError(
+ "lstat failed on base %s" % self.base)
+ if eclass_cache is None:
+ self.eclass_cache = eclass_cache_module.cache(
+ pjoin(self.base, "eclass"), self.base)
+ else:
+ self.eclass_cache = eclass_cache
+
+ fp = pjoin(self.base, metadata_offset, "thirdpartymirrors")
+ mirrors = {}
+ if os.path.exists(fp):
+ f = open(fp, "r")
+ try:
+ for k, v in read_dict(f, splitter=None,
+ source_isiter=True).iteritems():
+ v = v.split()
+ shuffle(v)
+ mirrors[k] = v
+ finally:
+ f.close()
+ if isinstance(cache, (tuple, list)):
+ cache = tuple(cache)
+ else:
+ cache = (cache,)
+
+ self.mirrors = mirrors
+ self.default_mirrors = default_mirrors
+ self.cache = cache
+ self.package_class = get_plugin("format." + self.format_magic)(
+ self, cache, self.eclass_cache, self.mirrors, self.default_mirrors)
+ self._shared_pkg_cache = WeakValCache()
+
+ @property
+ def repo_id(self):
+ if self._repo_id is None:
+ # thank you spb for a stupid location, and stupid file name.
+ r = readfile(pjoin(self.location, "profiles",
+ "repo_name"), True)
+ if r is None:
+ self._repo_id = self.location
+ else:
+ self._repo_id = r.strip()
+ return self._repo_id
+
+ def rebind(self, **kwds):
+
+ """
+ generate a new tree instance with the same location using new keywords.
+
+ @param kwds: see __init__ for valid values
+ """
+
+ o = self.__class__(self.location, **kwds)
+ o.categories = self.categories
+ o.packages = self.packages
+ o.versions = self.versions
+ return o
+
+ def _get_categories(self, *optional_category):
+ # why the auto return? current porttrees don't allow/support
+ # categories deeper then one dir.
+ if optional_category:
+ #raise KeyError
+ return ()
+
+ try:
+ # try reading $LOC/profiles/categories if it's available.
+ cats = readlines(pjoin(self.base, 'profiles', 'categories'), True, True,
+ True)
+ if cats is not None:
+ return tuple(imap(intern, cats))
+
+ return tuple(imap(intern,
+ ifilterfalse(self.false_categories.__contains__,
+ (x for x in listdir_dirs(self.base) if x[0:1] != ".")
+ )))
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching categories: %s" % str(e))
+
+ def _get_packages(self, category):
+ cpath = pjoin(self.base, category.lstrip(os.path.sep))
+ try:
+ return tuple(ifilterfalse(self.false_packages.__contains__,
+ listdir_dirs(cpath)))
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching packages for category %s: %s" % \
+ (pjoin(self.base, category.lstrip(os.path.sep)), \
+ str(e)))
+
+ def _get_versions(self, catpkg):
+ cppath = pjoin(self.base, catpkg[0], catpkg[1])
+ # 7 == len(".ebuild")
+ pkg = catpkg[-1] + "-"
+ lp = len(pkg)
+ try:
+ return tuple(x[lp:-7] for x in listdir_files(cppath)
+ if x[-7:] == '.ebuild' and x[:lp] == pkg)
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching versions for package %s: %s" % \
+ (pjoin(self.base, catpkg.lstrip(os.path.sep)), str(e)))
+
+ def _get_ebuild_path(self, pkg):
+ return pjoin(self.base, pkg.category, pkg.package, \
+ "%s-%s.ebuild" % (pkg.package, pkg.fullver))
+
+ def _get_ebuild_src(self, pkg):
+ return local_source(self._get_ebuild_path(pkg))
+
+ def _get_shared_pkg_data(self, category, package):
+ key = (category, package)
+ o = self._shared_pkg_cache.get(key)
+ if o is None:
+ mxml = self._get_metadata_xml(category, package)
+ manifest = self._get_manifest(category, package)
+ o = repo_objs.SharedPkgData(mxml, manifest)
+ self._shared_pkg_cache[key] = o
+ return o
+
+ def _get_metadata_xml(self, category, package):
+ return repo_objs.LocalMetadataXml(pjoin(self.base, category,
+ package, "metadata.xml"))
+
+ def _get_manifest(self, category, package):
+ return repo_objs.Manifest(pjoin(self.base, category, package,
+ "Manifest"), enforce_gpg=self.enable_gpg)
+
+ def _get_digests(self, pkg, force_manifest1=False):
+ manifest = pkg._shared_pkg_data.manifest
+ if manifest.version == 2 and not force_manifest1:
+ return manifest.distfiles
+ return digest.parse_digest(pjoin(
+ os.path.dirname(self._get_ebuild_path(pkg)), "files",
+ "digest-%s-%s" % (pkg.package, pkg.fullver)))
+
+ def __str__(self):
+ return "%s.%s: location %s" % (
+ self.__class__.__module__, self.__class__.__name__, self.base)
+
+ def __repr__(self):
+ return "<ebuild %s location=%r @%#8x>" % (self.__class__.__name__,
+ self.base, id(self))
+
+ def _visibility_limiters(self):
+ try:
+ return [atom.atom(x.strip())
+ for x in iter_read_bash(
+ pjoin(self.base, "profiles", "package.mask"))]
+ except IOError, i:
+ if i.errno != errno.ENOENT:
+ raise
+ del i
+ return []
+
+
+class SlavedTree(UnconfiguredTree):
+
+ """
+ repository that pulls repo metadata from a parent repo; mirrors
+ being the main metadata pulled at this point
+ """
+
+ orig_hint = UnconfiguredTree.pkgcore_config_type
+ d = dict(orig_hint.types.iteritems())
+ d["parent_repo"] = 'ref:repo'
+ pkgcore_config_type = orig_hint.clone(types=d,
+ required=list(orig_hint.required) + ["parent_repo"],
+ positional=list(orig_hint.positional) + ["parent_repo"])
+ del d, orig_hint
+
+ def __init__(self, parent_repo, *args, **kwds):
+ UnconfiguredTree.__init__(self, *args, **kwds)
+ for k, v in parent_repo.mirrors.iteritems():
+ if k not in self.mirrors:
+ self.mirrors[k] = v
+ self.package_class = get_plugin("format." + self.format_magic)(
+ self, self.cache, self.eclass_cache, self.mirrors,
+ self.default_mirrors)
+
+
+class ConfiguredTree(configured.tree):
+
+ """
+ wrapper around a L{UnconfiguredTree} binding build/configuration data (USE)
+ """
+
+ configurable = "use"
+ config_wrappables = dict(
+ (x, currying.alias_class_method("evaluate_depset"))
+ for x in ["depends", "rdepends", "post_rdepends", "fetchables",
+ "license", "src_uri", "license", "provides"])
+
+ def __init__(self, raw_repo, domain, domain_settings, fetcher=None):
+ """
+ @param raw_repo: L{UnconfiguredTree} instance
+ @param domain_settings: environment settings to bind
+ @param fetcher: L{pkgcore.fetch.base.fetcher} instance to use
+ for getting access to fetchable files
+ """
+ if "USE" not in domain_settings:
+ raise errors.InitializationError(
+ "%s requires the following settings: 'USE', not supplied" % (
+ self.__class__,))
+
+ configured.tree.__init__(self, raw_repo, self.config_wrappables)
+ self._get_pkg_use = domain.get_package_use
+ self.domain_settings = domain_settings
+ if fetcher is None:
+ self.fetcher = self.domain_settings["fetcher"]
+ else:
+ self.fetcher = fetcher
+ self._delayed_iuse = currying.partial(make_kls(InvertedContains),
+ InvertedContains)
+
+ def _get_delayed_immutable(self, pkg, immutable):
+ return InvertedContains(pkg.iuse.difference(immutable))
+
+ def _get_pkg_kwds(self, pkg):
+ immutable, enabled = self._get_pkg_use(pkg)
+ return {
+ "initial_settings": enabled,
+ "unchangable_settings": self._delayed_iuse(
+ self._get_delayed_immutable, pkg, immutable),
+ "build_callback":self.generate_buildop}
+
+ def generate_buildop(self, pkg, **kwds):
+ return buildable(pkg, self.domain_settings, pkg.repo.eclass_cache,
+ self.fetcher, **kwds)
+
+UnconfiguredTree.configure = ConfiguredTree
+tree = UnconfiguredTree
diff --git a/pkgcore/ebuild/resolver.py b/pkgcore/ebuild/resolver.py
new file mode 100644
index 0000000..a6cd78d
--- /dev/null
+++ b/pkgcore/ebuild/resolver.py
@@ -0,0 +1,136 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+resolver configuration to match portage behaviour (misbehaviour in a few spots)
+"""
+
+__all__ = ["upgrade_resolver", "min_install_resolver"]
+
+from pkgcore.repository import virtual
+from pkgcore.repository.misc import nodeps_repo
+from pkgcore.resolver import plan
+
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'pkgcore.restrictions:packages,values',
+ 'pkgcore.pkgsets.glsa:KeyedAndRestriction',
+)
+
+def upgrade_resolver(vdb, dbs, verify_vdb=True, nodeps=False,
+ force_replacement=False,
+ resolver_cls=plan.merge_plan, **kwds):
+
+ """
+ generate and configure a resolver for upgrading all processed nodes.
+
+ @param vdb: list of L{pkgcore.repository.prototype.tree} instances
+ that represents the livefs
+ @param dbs: list of L{pkgcore.repository.prototype.tree} instances
+ representing sources of pkgs
+ @param verify_vdb: should we stop resolving once we hit the vdb,
+ or do full resolution?
+ @param force_vdb_virtuals: old style portage virtuals (non metapkgs)
+ cannot be technically sorted since their versions are from multiple
+ packages bleeding through- results make no sense essentially.
+ You want this option enabled if you're dealing in old style virtuals.
+ @return: L{pkgcore.resolver.plan.merge_plan} instance
+ """
+
+ f = plan.merge_plan.prefer_highest_version_strategy
+ # hack.
+ vdb = list(vdb.trees)
+ if not isinstance(dbs, (list, tuple)):
+ dbs = [dbs]
+ if nodeps:
+ vdb = map(nodeps_repo, vdb)
+ dbs = map(nodeps_repo, dbs)
+ elif not verify_vdb:
+ vdb = map(nodeps_repo, vdb)
+
+ if force_replacement:
+ resolver_cls = generate_replace_resolver_kls(resolver_cls)
+ return resolver_cls(dbs + vdb, plan.pkg_sort_highest, f, **kwds)
+
+
+def min_install_resolver(vdb, dbs, verify_vdb=True, force_vdb_virtuals=True,
+ force_replacement=False, resolver_cls=plan.merge_plan,
+ nodeps=False, **kwds):
+ """
+ Resolver that tries to minimize the number of changes while installing.
+
+ generate and configure a resolver that is focused on just
+ installing requests- installs highest version it can build a
+ solution for, but tries to avoid building anything not needed
+
+ @param vdb: list of L{pkgcore.repository.prototype.tree} instances
+ that represents the livefs
+ @param dbs: list of L{pkgcore.repository.prototype.tree} instances
+ representing sources of pkgs
+ @param verify_vdb: should we stop resolving once we hit the vdb,
+ or do full resolution?
+ @param force_vdb_virtuals: old style portage virtuals (non metapkgs)
+ cannot be technically sorted since their versions are from multiple
+ packages bleeding through- results make no sense essentially.
+ You want this option enabled if you're dealing in old style virtuals.
+ @return: L{pkgcore.resolver.plan.merge_plan} instance
+ """
+
+ # nothing fancy required for force_vdb_virtuals, we just silently ignore it.
+ vdb = list(vdb.trees)
+ if not isinstance(dbs, (list, tuple)):
+ dbs = [dbs]
+ if nodeps:
+ vdb = map(nodeps_repo, vdb)
+ dbs = map(nodeps_repo, dbs)
+ elif not verify_vdb:
+ vdb = map(nodeps_repo, vdb)
+
+ if force_replacement:
+ resolver_cls = generate_replace_resolver_kls(resolver_cls)
+ return resolver_cls(vdb + dbs, plan.pkg_sort_highest,
+ plan.merge_plan.prefer_reuse_strategy, **kwds)
+
+_vdb_restrict = packages.OrRestriction(
+ packages.PackageRestriction("repo.livefs", values.EqualityMatch(False)),
+ packages.AndRestriction(
+ packages.PackageRestriction(
+ "category", values.StrExactMatch("virtual")),
+ packages.PackageRestriction(
+ "package_is_real", values.EqualityMatch(False))
+ )
+ )
+
+class empty_tree_merge_plan(plan.merge_plan):
+
+ _vdb_restriction = _vdb_restrict
+
+ def __init__(self, *args, **kwds):
+ """
+ @param args: see L{pkgcore.resolver.plan.merge_plan.__init__}
+ for valid args
+ @param kwds: see L{pkgcore.resolver.plan.merge_plan.__init__}
+ for valid args
+ """
+ plan.merge_plan.__init__(self, *args, **kwds)
+ # XXX *cough*, hack.
+ self._empty_dbs = self.dbs
+
+ def add_atom(self, atom):
+ return plan.merge_plan.add_atom(
+ self, atom, dbs=self._empty_dbs)
+
+
+def generate_replace_resolver_kls(resolver_kls):
+
+
+ class replace_resolver(resolver_kls):
+ overriding_resolver_kls = resolver_kls
+ _vdb_restriction = _vdb_restrict
+
+ def add_atom(self, atom, **kwds):
+ return self.overriding_resolver_kls.add_atom(
+ self, KeyedAndRestriction(
+ self._vdb_restriction, atom, key=atom.key), **kwds)
+
+ return replace_resolver
diff --git a/pkgcore/ebuild/triggers.py b/pkgcore/ebuild/triggers.py
new file mode 100644
index 0000000..0964f28
--- /dev/null
+++ b/pkgcore/ebuild/triggers.py
@@ -0,0 +1,458 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+gentoo/ebuild specific triggers
+"""
+
+import os, errno
+from pkgcore.merge import triggers, const, errors
+from pkgcore.fs import livefs
+from pkgcore.restrictions import values
+
+from snakeoil.osutils import normpath
+from snakeoil.fileutils import read_bash_dict, AtomicWriteFile
+from snakeoil.osutils import listdir_files
+from snakeoil.lists import stable_unique, iflatten_instance
+from snakeoil.osutils import join as pjoin
+
+from snakeoil.demandload import demandload
+demandload(globals(), "fnmatch", 'pkgcore:os_data')
+
+colon_parsed = frozenset(
+ ["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH", "INFODIR", "INFOPATH",
+ "LDPATH", "MANPATH", "PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
+ "PYTHONPATH", "PKG_CONFIG_PATH", "ROOTPATH"])
+
+incrementals = frozenset(
+ ['ADA_INCLUDE_PATH', 'ADA_OBJECTS_PATH', 'CLASSPATH', 'CONFIG_PROTECT',
+ 'CONFIG_PROTECT_MASK', 'INFODIR', 'INFOPATH', 'KDEDIRS', 'LDPATH',
+ 'MANPATH', 'PATH', 'PRELINK_PATH', 'PRELINK_PATH_MASK', 'PYTHONPATH',
+ 'ROOTPATH', 'PKG_CONFIG_PATH'])
+
+default_ldpath = ('/lib', '/lib64', '/lib32',
+ '/usr/lib', '/usr/lib64', '/usr/lib32')
+
+def collapse_envd(base):
+ collapsed_d = {}
+ try:
+ env_d_files = sorted(listdir_files(base))
+ except OSError, oe:
+ if oe.errno != errno.ENOENT:
+ raise
+ else:
+ for x in env_d_files:
+ if x.endswith(".bak") or x.endswith("~") or x.startswith("._cfg") \
+ or len(x) <= 2 or not x[0:2].isdigit():
+ continue
+ d = read_bash_dict(pjoin(base, x))
+ # inefficient, but works.
+ for k, v in d.iteritems():
+ collapsed_d.setdefault(k, []).append(v)
+ del d
+
+ loc_incrementals = set(incrementals)
+ loc_colon_parsed = set(colon_parsed)
+
+ # split out env.d defined incrementals..
+ # update incrementals *and* colon parsed for colon_seperated;
+ # incrementals on it's own is space seperated.
+
+ for x in collapsed_d.pop("COLON_SEPARATED", []):
+ v = x.split()
+ if v:
+ loc_colon_parsed.update(v)
+
+ loc_incrementals.update(loc_colon_parsed)
+
+ # now space.
+ for x in collapsed_d.pop("SPACE_SEPARATED", []):
+ v = x.split()
+ if v:
+ loc_incrementals.update(v)
+
+ # now reinterpret.
+ for k, v in collapsed_d.iteritems():
+ if k not in loc_incrementals:
+ collapsed_d[k] = v[-1]
+ continue
+ if k in loc_colon_parsed:
+ collapsed_d[k] = filter(None, iflatten_instance(
+ x.split(':') for x in v))
+ else:
+ collapsed_d[k] = filter(None, iflatten_instance(
+ x.split() for x in v))
+
+ return collapsed_d, loc_incrementals, loc_colon_parsed
+
+
+def string_collapse_envd(envd_dict, incrementals, colon_incrementals):
+ """transform a passed in dict to strictly strings"""
+ for k, v in envd_dict.iteritems():
+ if k not in incrementals:
+ continue
+ if k in colon_incrementals:
+ envd_dict[k] = ':'.join(v)
+ else:
+ envd_dict[k] = ' '.join(v)
+
+
+def update_ldso(ld_search_path, offset='/'):
+ # we do an atomic rename instead of open and write quick
+ # enough (avoid the race iow)
+ fp = pjoin(offset, 'etc', 'ld.so.conf')
+ new_f = AtomicWriteFile(fp, uid=os_data.root_uid, gid=os_data.root_uid, perms=0644)
+ new_f.write("# automatically generated, edit env.d files instead\n")
+ new_f.writelines(x.strip()+"\n" for x in ld_search_path)
+ new_f.close()
+
+
+class env_update(triggers.base):
+
+ required_csets = ()
+ _hooks = ('post_unmerge', 'post_merge')
+ _priority = 5
+
+ def trigger(self, engine):
+ offset = engine.offset
+ d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d"))
+
+ l = d.pop("LDPATH", None)
+ if l is not None:
+ update_ldso(l, engine.offset)
+
+ string_collapse_envd(d, inc, colon)
+
+ new_f = AtomicWriteFile(pjoin(offset, "etc", "profile.env"), uid=os_data.root_uid, gid=os_data.root_gid, perms=0644)
+ new_f.write("# autogenerated. update env.d instead\n")
+ new_f.writelines('export %s="%s"\n' % (k, d[k]) for k in sorted(d))
+ new_f.close()
+ new_f = AtomicWriteFile(pjoin(offset, "etc", "profile.csh"), uid=os_data.root_uid, gid=os_data.root_gid, perms=0644)
+ new_f.write("# autogenerated, update env.d instead\n")
+ new_f.writelines('setenv %s="%s"\n' % (k, d[k]) for k in sorted(d))
+ new_f.close()
+
+
+def simple_chksum_compare(x, y):
+ found = False
+ for k, v in x.chksums.iteritems():
+ if k == "size":
+ continue
+ o = y.chksums.get(k)
+ if o is not None:
+ if o != v:
+ return False
+ found = True
+ if "size" in x.chksums and "size" in y.chksums:
+ return x.chksums["size"] == y.chksums["size"]
+ return found
+
+
+def gen_config_protect_filter(offset, extra_protects=(), extra_disables=()):
+ collapsed_d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d"))
+ collapsed_d.setdefault("CONFIG_PROTECT", []).extend(extra_protects)
+ collapsed_d.setdefault("CONFIG_PROTECT_MASK", []).extend(extra_disables)
+
+ r = [values.StrGlobMatch(normpath(x).rstrip("/") + "/")
+ for x in set(stable_unique(collapsed_d["CONFIG_PROTECT"] + ["/etc"]))]
+ if len(r) > 1:
+ r = values.OrRestriction(*r)
+ else:
+ r = r[0]
+ neg = stable_unique(collapsed_d["CONFIG_PROTECT_MASK"])
+ if neg:
+ if len(neg) == 1:
+ r2 = values.StrGlobMatch(normpath(neg[0]).rstrip("/") + "/",
+ negate=True)
+ else:
+ r2 = values.OrRestriction(
+ negate=True,
+ *[values.StrGlobMatch(normpath(x).rstrip("/") + "/")
+ for x in set(neg)])
+ r = values.AndRestriction(r, r2)
+ return r
+
+
+class ConfigProtectInstall(triggers.base):
+
+ required_csets = ('install_existing', 'install')
+ _hooks = ('pre_merge',)
+ _priority = 90
+
+ def __init__(self, extra_protects=(), extra_disables=()):
+ triggers.base.__init__(self)
+ self.renames = {}
+ self.extra_protects = extra_protects
+ self.extra_disables = extra_disables
+
+ def register(self, engine):
+ triggers.base.register(self, engine)
+ t2 = ConfigProtectInstall_restore(self.renames)
+ t2.register(engine)
+
+ def trigger(self, engine, existing_cset, install_cset):
+ # hackish, but it works.
+ protected_filter = gen_config_protect_filter(engine.offset,
+ self.extra_protects, self.extra_disables).match
+ protected = {}
+
+ for x in existing_cset.iterfiles():
+ if x.location.endswith("/.keep"):
+ continue
+ elif protected_filter(x.location):
+ replacement = install_cset[x]
+ if not simple_chksum_compare(replacement, x):
+ protected.setdefault(
+ pjoin(engine.offset,
+ os.path.dirname(x.location).lstrip(os.path.sep)),
+ []).append((os.path.basename(replacement.location),
+ replacement))
+
+ for dir_loc, entries in protected.iteritems():
+ updates = dict((x[0], []) for x in entries)
+ try:
+ existing = sorted(x for x in os.listdir(dir_loc)
+ if x.startswith("._cfg"))
+ except OSError, oe:
+ if oe.errno != errno.ENOENT:
+ raise
+ # this shouldn't occur.
+ continue
+
+ for x in existing:
+ try:
+ # ._cfg0000_filename
+ count = int(x[5:9])
+ if x[9] != "_":
+ raise ValueError
+ fn = x[10:]
+ except (ValueError, IndexError):
+ continue
+ if fn in updates:
+ updates[fn].append((count, fn))
+
+
+ # now we rename.
+ for fname, entry in entries:
+ # check for any updates with the same chksums.
+ count = 0
+ for cfg_count, cfg_fname in updates[fname]:
+ if simple_chksum_compare(livefs.gen_obj(
+ pjoin(dir_loc, cfg_fname)), entry):
+ count = cfg_count
+ break
+ count = max(count, cfg_count + 1)
+ try:
+ install_cset.remove(entry)
+ except KeyError:
+ # this shouldn't occur...
+ continue
+ new_fn = pjoin(dir_loc, "._cfg%04i_%s" % (count, fname))
+ new_entry = entry.change_attributes(location=new_fn)
+ install_cset.add(new_entry)
+ self.renames[new_entry] = entry
+ del updates
+
+
+class ConfigProtectInstall_restore(triggers.base):
+
+ required_csets = ('install',)
+ _hooks = ('post_merge',)
+ _priority = 10
+
+ def __init__(self, renames_dict):
+ triggers.base.__init__(self)
+ self.renames = renames_dict
+
+ def trigger(self, engine, install_cset):
+ for new_entry, old_entry in self.renames.iteritems():
+ try:
+ install_cset.remove(new_entry)
+ except KeyError:
+ continue
+ install_cset.add(old_entry)
+ self.renames.clear()
+
+
+class ConfigProtectUninstall(triggers.base):
+
+ required_csets = ('uninstall_existing', 'uninstall')
+ _hooks = ('pre_unmerge',)
+
+ def trigger(self, engine, existing_cset, uninstall_cset):
+ protected_restrict = gen_config_protect_filter(engine.offset)
+
+ remove = []
+ for x in existing_cset.iterfiles():
+ if x.location.endswith("/.keep"):
+ continue
+ if protected_restrict.match(x.location):
+ recorded_ent = uninstall_cset[x]
+ if not simple_chksum_compare(recorded_ent, x):
+ # chksum differs. file stays.
+ remove.append(recorded_ent)
+
+ for x in remove:
+ del uninstall_cset[x]
+
+
+class preinst_contents_reset(triggers.base):
+
+ required_csets = ('install',)
+ _hooks = ('pre_merge',)
+ _priority = 1
+
+ def __init__(self, format_op):
+ triggers.base.__init__(self)
+ self.format_op = format_op
+
+ def trigger(self, engine, cset):
+ # wipe, and get data again.
+ cset.clear()
+ cs = engine.new._parent.scan_contents(self.format_op.env["D"])
+ if engine.offset != '/':
+ cs = cs.insert_offset(engine.offset)
+ cset.update(cs)
+
+
+class collision_protect(triggers.base):
+
+ required_csets = {
+ const.INSTALL_MODE:('install', 'install_existing'),
+ const.REPLACE_MODE:('install', 'install_existing', 'old_cset')
+ }
+
+ _hooks = ('sanity_check',)
+ _engine_types = triggers.INSTALLING_MODES
+
+ def __init__(self, extra_protects=(), extra_disables=()):
+ triggers.base.__init__(self)
+ self.extra_protects = extra_protects
+ self.extra_disables = extra_disables
+
+ def trigger(self, engine, install, existing, old_cset=()):
+ if not existing:
+ return
+
+ # for the moment, we just care about files
+ colliding = existing.difference(install.iterdirs())
+
+ # filter out daft .keep files.
+
+ # hackish, but it works.
+ protected_filter = gen_config_protect_filter(engine.offset,
+ self.extra_protects, self.extra_disables).match
+
+ l = []
+ for x in colliding:
+ if x.location.endswith(".keep"):
+ l.append(x)
+ elif protected_filter(x.location):
+ l.append(x)
+
+ colliding.difference_update(l)
+ del l, protected_filter
+ if not colliding:
+ return
+
+ colliding.difference_update(old_cset)
+ if colliding:
+ raise errors.BlockModification(self,
+ "collision-protect: file(s) already exist: ( %s )" %
+ ', '.join(repr(x) for x in sorted(colliding)))
+
+
+class InfoRegen(triggers.InfoRegen):
+
+ _label = "ebuild info regen"
+
+ def register(self, engine):
+ # wipe pre-existing info triggers.
+ for x in self._hooks:
+ if x not in engine.hooks:
+ continue
+ # yucky, but works.
+ wipes = [y for y in engine.hooks[x]
+ if y.label == triggers.InfoRegen._label]
+ for y in wipes:
+ engine.hooks[x].remove(y)
+ triggers.InfoRegen.register(self, engine)
+
+ def trigger(self, engine, *args):
+ self.engine = engine
+ self.path = pjoin(engine.offset, "etc/env.d")
+ triggers.InfoRegen.trigger(self, engine, *args)
+
+ @property
+ def locations(self):
+ collapsed_d = collapse_envd(self.path)[0]
+ l = collapsed_d.get("INFOPATH", ())
+ if not l:
+ return triggers.InfoRegen.locations
+ elif isinstance(l, basestring):
+ l = l.split()
+ return l
+
+
+class FixImageSymlinks(triggers.base):
+ required_csets = ('new_cset',)
+ _hooks = ('pre_merge',)
+
+ def __init__(self, format_op):
+ triggers.base.__init__(self)
+ self.format_op = format_op
+
+ def trigger(self, engine, cset):
+ d = self.format_op.env["D"].rstrip("/") + "/"
+ l = [x for x in cset.iterlinks() if x.target.startswith(d)]
+ if engine.observer:
+ o = engine.observer
+ for x in l:
+ o.warn("correcting %s sym pointing into $D: %s" %
+ (x.location, x.target))
+ d_len = len(d)
+
+ # drop the leading ${D}, and force an abspath via '/'
+ cset.update(x.change_attributes(target=pjoin('/', x.target[d_len:]))
+ for x in l)
+
+
+def customize_engine(domain_settings, engine):
+ env_update().register(engine)
+
+ protect = domain_settings.get('CONFIG_PROTECT', ())
+ if isinstance(protect, basestring):
+ protect = protect.split()
+ mask = domain_settings.get('CONFIG_PROTECT_MASK', ())
+ if isinstance(protect, basestring):
+ protect = protect.split()
+
+ ConfigProtectInstall(protect, mask).register(engine)
+ ConfigProtectUninstall().register(engine)
+
+ features = domain_settings.get("FEATURES", ())
+ if "collision-protect" in features:
+ collision_protect(protect, mask).register(engine)
+
+ install_mask = domain_settings.get("INSTALL_MASK", '').split()
+
+ for x in ("man", "info", "doc"):
+ if "no%s" % x in features:
+ install_mask.append("/usr/share/%s" % x)
+ l = []
+ for x in install_mask:
+ x = x.rstrip("/")
+ l.append(values.StrRegex(fnmatch.translate(x)))
+ l.append(values.StrRegex(fnmatch.translate("%s/*" % x)))
+ install_mask = l
+
+ if install_mask:
+ if len(install_mask) == 1:
+ install_mask = install_mask[0]
+ else:
+ install_mask = values.OrRestriction(*install_mask)
+ triggers.PruneFiles(install_mask.match).register(engine)
+ # note that if this wipes all /usr/share/ entries, should
+ # wipe the empty dir.
+
+ InfoRegen().register(engine)
diff --git a/pkgcore/fetch/__init__.py b/pkgcore/fetch/__init__.py
new file mode 100644
index 0000000..c9065b7
--- /dev/null
+++ b/pkgcore/fetch/__init__.py
@@ -0,0 +1,126 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+functionality related to downloading files
+"""
+
+from snakeoil.klass import generic_equality
+
+class fetchable(object):
+
+ """class representing uri sources for a file and chksum information."""
+
+ __slots__ = ("filename", "uri", "chksums")
+ __attr_comparison__ = __slots__
+ __metaclass__ = generic_equality
+
+ def __init__(self, filename, uri=(), chksums=None):
+ """
+ @param filename: filename...
+ @param uri: either None (no uri),
+ or a sequence of uri where the file is available
+ @param chksums: either None (no chksum data),
+ or a dict of chksum_type -> value for this file
+ """
+ self.uri = uri
+ if chksums is None:
+ self.chksums = {}
+ else:
+ self.chksums = chksums
+ self.filename = filename
+
+ def __str__(self):
+ return "('%s', '%s', (%s))" % (
+ self.filename, self.uri, ', '.join(self.chksums))
+
+ def __repr__(self):
+ return "<%s filename=%r uri=%r chksums=%r @%#8x>" % (
+ self.__class__.__name__, self.filename, self.uri, self.chksums,
+ id(self))
+
+
+class mirror(object):
+ """
+ uri source representing a mirror tier
+ """
+ __metaclass__ = generic_equality
+ __attr_comparison__ = ('mirror_name', 'mirrors')
+
+ __slots__ = ("mirrors", "mirror_name")
+
+ def __init__(self, mirrors, mirror_name):
+ """
+ @param mirrors: list of hosts that comprise this mirror tier
+ @param mirror_name: name of the mirror tier
+ """
+
+ if not isinstance(mirrors, tuple):
+ mirrors = tuple(mirrors)
+ self.mirrors = mirrors
+ self.mirror_name = mirror_name
+
+ def __iter__(self):
+ return iter(self.mirrors)
+
+ def __str__(self):
+ return "mirror://%s" % self.mirror_name
+
+ def __len__(self):
+ return len(self.mirrors)
+
+ def __nonzero__(self):
+ return bool(self.mirrors)
+
+ def __getitem__(self, idx):
+ return self.mirrors[idx]
+
+ def __repr__(self):
+ return "<%s mirror tier=%r>" % (self.__class__, self.mirror_name)
+
+
+class default_mirror(mirror):
+ pass
+
+
+class uri_list(object):
+
+ __slots__ = ("_uri_source", "filename", "__weakref__")
+
+ def __init__(self, filename):
+ self._uri_source = []
+ self.filename = filename
+
+ def add_mirror(self, mirror_inst, suburi=None):
+ if not isinstance(mirror_inst, mirror):
+ raise TypeError("mirror must be a pkgcore.fetch.mirror instance")
+ if suburi is not None and '/' in suburi:
+ self._uri_source.append((mirror_inst, suburi.lstrip('/')))
+ else:
+ self._uri_source.append(mirror_inst)
+
+ def add_uri(self, uri):
+ self._uri_source.append(uri)
+
+ def finalize(self):
+ self._uri_source = tuple(self._uri_source)
+
+ def __iter__(self):
+ fname = self.filename
+ for entry in self._uri_source:
+ if isinstance(entry, basestring):
+ yield entry
+ elif isinstance(entry, tuple):
+ # mirror with suburi
+ for base_uri in entry[0]:
+ yield '%s/%s' % (base_uri.rstrip('/'), entry[1])
+ else:
+ for base_uri in entry:
+ yield "%s/%s" % (base_uri.rstrip('/'), fname)
+
+ def __str__(self):
+ return "file: %s, uri: %s" % (self.filename,
+ ', '.join(str(x) for x in self._uri_source))
+
+ def __nonzero__(self):
+ return bool(self._uri_source)
diff --git a/pkgcore/fetch/base.py b/pkgcore/fetch/base.py
new file mode 100644
index 0000000..8b86c9f
--- /dev/null
+++ b/pkgcore/fetch/base.py
@@ -0,0 +1,80 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+prototype fetcher class, all fetchers should derive from this
+"""
+
+import os
+from pkgcore.chksum import get_handlers, get_chksums
+from pkgcore.fetch import errors
+
+
+class fetcher(object):
+
+ def _verify(self, file_location, target, all_chksums=True, handlers=None):
+ """
+ internal function for derivatives.
+
+ digs through chksums, and returns:
+ - -2: file doesn't exist.
+ - -1: if (size chksum is available, and
+ file is smaller than stated chksum)
+ - 0: if all chksums match
+ - 1: if file is too large (if size chksums are available)
+ or else size is right but a chksum didn't match.
+
+ if all_chksums is True, all chksums must be verified; if false, all
+ a handler can be found for are used.
+ """
+
+ nondefault_handlers = handlers
+ if handlers is None:
+ handlers = get_handlers(target.chksums)
+ if all_chksums:
+ for x in target.chksums:
+ if x not in handlers:
+ raise errors.RequiredChksumDataMissing(target, x)
+
+ if "size" in handlers:
+ val = handlers["size"](file_location)
+ if val is None:
+ return -2
+ c = cmp(val, target.chksums["size"])
+ if c:
+ if c < 0:
+ return -1
+ return 1
+ elif not os.path.exists(file_location):
+ return -2
+
+ chfs = set(target.chksums).intersection(handlers)
+ chfs.discard("size")
+ chfs = list(chfs)
+ if nondefault_handlers:
+ for x in chfs:
+ if not handlers[x](file_location) == target.chksums[x]:
+ return 1
+ elif [target.chksums[x] for x in chfs] != \
+ get_chksums(file_location, *chfs):
+ return 1
+
+ return 0
+
+ def __call__(self, *a, **kw):
+ return self.fetch(*a, **kw)
+
+ def get_path(self, fetchable):
+ """
+ return the on disk path to a fetchable if it's available, and fully
+ fetched.
+
+ If it isn't, return None
+ """
+ raise NotImplementedError(self.get_path)
+
+ def get_storage_path(self):
+ """return the directory files are stored in
+ returns None if not applicable
+ """
+ return None
diff --git a/pkgcore/fetch/custom.py b/pkgcore/fetch/custom.py
new file mode 100644
index 0000000..478a0dd
--- /dev/null
+++ b/pkgcore/fetch/custom.py
@@ -0,0 +1,158 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+fetcher class that pulls files via executing another program to do the fetching
+"""
+
+import os
+from pkgcore.spawn import spawn, is_userpriv_capable
+from pkgcore.os_data import portage_uid, portage_gid
+from pkgcore.fetch import errors, base, fetchable
+from pkgcore.config import ConfigHint
+from snakeoil.osutils import ensure_dirs, join as pjoin
+
+class MalformedCommand(errors.base):
+
+ def __init__(self, command):
+ errors.base.__init__(self,
+ "fetchcommand is malformed: %s" % (command,))
+ self.command = command
+
+
+class fetcher(base.fetcher):
+
+ pkgcore_config_type = ConfigHint(
+ {'userpriv': 'bool', 'required_chksums': 'list',
+ 'distdir': 'str', 'command': 'str', 'resume_command': 'str'},
+ allow_unknowns=True)
+
+ def __init__(self, distdir, command, resume_command=None,
+ required_chksums=None, userpriv=True, attempts=10,
+ readonly=False, **extra_env):
+ """
+ @param distdir: directory to download files to
+ @type distdir: string
+ @param command: shell command to execute to fetch a file
+ @type command: string
+ @param resume_command: if not None, command to use for resuming-
+ if None, command is reused
+ @param required_chksums: if None, all chksums must be verified,
+ else only chksums listed
+ @type required_chksums: None or sequence
+ @param userpriv: depriv for fetching?
+ @param attempts: max number of attempts before failing the fetch
+ @param readonly: controls whether fetching is allowed
+ """
+ base.fetcher.__init__(self)
+ self.distdir = distdir
+ if required_chksums is not None:
+ required_chksums = [x.lower() for x in required_chksums]
+ else:
+ required_chksums = []
+ if len(required_chksums) == 1 and required_chksums[0] == "all":
+ self.required_chksums = None
+ else:
+ self.required_chksums = required_chksums
+ def rewrite_command(string):
+ new_command = string.replace("${DISTDIR}", self.distdir)
+ new_command = new_command.replace("$DISTDIR", self.distdir)
+ new_command = new_command.replace("${URI}", "%(URI)s")
+ new_command = new_command.replace("$URI", "%(URI)s")
+ new_command = new_command.replace("${FILE}", "%(FILE)s")
+ new_command = new_command.replace("$FILE", "%(FILE)s")
+ if new_command == string:
+ raise MalformedCommand(string)
+ try:
+ new_command % {"URI":"blah", "FILE":"blah"}
+ except KeyError, k:
+ raise Malformedcommand("%s: unexpected key %s" % (command, k.args[0]))
+ return new_command
+
+ self.command = rewrite_command(command)
+ if resume_command is None:
+ self.resume_command = self.command
+ else:
+ self.resume_command = rewrite_command(resume_command)
+
+ self.attempts = attempts
+ self.userpriv = userpriv
+ kw = {"mode":0775}
+ if readonly:
+ kw["mode"] = 0555
+ if userpriv:
+ kw["gid"] = portage_gid
+ kw["minimal"] = True
+ if not ensure_dirs(self.distdir, **kw):
+ raise errors.distdirPerms(
+ self.distdir, "if userpriv, uid must be %i, gid must be %i. "
+ "if not readonly, directory must be 0775, else 0555" % (
+ portage_uid, portage_gid))
+
+ self.extra_env = extra_env
+
+ def fetch(self, target):
+ """
+ fetch a file
+
+ @type target: L{pkgcore.fetch.fetchable} instance
+ @return: None if fetching failed,
+ else on disk location of the copied file
+ """
+
+
+ if not isinstance(target, fetchable):
+ raise TypeError(
+ "target must be fetchable instance/derivative: %s" % target)
+
+ fp = pjoin(self.distdir, target.filename)
+ filename = os.path.basename(fp)
+
+ uri = iter(target.uri)
+ if self.userpriv and is_userpriv_capable():
+ extra = {"uid":portage_uid, "gid":portage_gid}
+ else:
+ extra = {}
+ extra["umask"] = 0002
+ extra["env"] = self.extra_env
+ attempts = self.attempts
+ try:
+ while attempts >= 0:
+ c = self._verify(fp, target)
+ if c == 0:
+ return fp
+ elif c > 0:
+ try:
+ os.unlink(fp)
+ command = self.command
+ except OSError, oe:
+ raise errors.UnmodifiableFile(fp, oe)
+ elif c == -2:
+ command = self.command
+ else:
+ command = self.resume_command
+
+ # yeah, it's funky, but it works.
+ if attempts > 0:
+ u = uri.next()
+ # note we're not even checking the results. the
+ # verify portion of the loop handles this. iow,
+ # don't trust their exit code. trust our chksums
+ # instead.
+ spawn(command % {"URI":u, "FILE":filename}, **extra)
+ attempts -= 1
+
+ except StopIteration:
+ # ran out of uris
+ return None
+
+ return None
+
+ def get_path(self, fetchable):
+ fp = pjoin(self.distdir, fetchable.filename)
+ if 0 == self._verify(fp, fetchable):
+ return fp
+ return None
+
+ def get_storage_path(self):
+ return self.distdir
diff --git a/pkgcore/fetch/errors.py b/pkgcore/fetch/errors.py
new file mode 100644
index 0000000..1365089
--- /dev/null
+++ b/pkgcore/fetch/errors.py
@@ -0,0 +1,35 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+errors fetch subsystem may throw
+"""
+
+class base(Exception):
+ pass
+
+class distdirPerms(base):
+ def __init__(self, distdir, required):
+ base.__init__(
+ self, "distdir '%s' required fs attributes weren't enforcable: %s"
+ % (distdir, required))
+ self.distdir, self.required = distdir, required
+
+class UnmodifiableFile(base):
+ def __init__(self, filename, extra=''):
+ base.__init__(self, "Unable to update file %s, unmodifiable %s"
+ % (filename, extra))
+ self.file = filename
+
+class UnknownMirror(base):
+ def __init__(self, host, uri):
+ base.__init__(self, "uri mirror://%s/%s is has no known mirror tier"
+ % (host, uri))
+ self.host, self.uri = host, uri
+
+class RequiredChksumDataMissing(base):
+ def __init__(self, fetchable, chksum):
+ base.__init__(self, "chksum %s was configured as required, "
+ "but the data is missing from fetchable '%s'"
+ % (chksum, fetchable))
+ self.fetchable, self.missing_chksum = fetchable, chksum
diff --git a/pkgcore/fs/__init__.py b/pkgcore/fs/__init__.py
new file mode 100644
index 0000000..c2527ce
--- /dev/null
+++ b/pkgcore/fs/__init__.py
@@ -0,0 +1,7 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+# $Id:$
+
+"""
+filesystem abstractions, and select operations
+"""
diff --git a/pkgcore/fs/contents.py b/pkgcore/fs/contents.py
new file mode 100644
index 0000000..ec1281f
--- /dev/null
+++ b/pkgcore/fs/contents.py
@@ -0,0 +1,279 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+contents set- container of fs objects
+"""
+
+from pkgcore.fs import fs
+from snakeoil.compatibility import all
+from snakeoil.klass import generic_equality
+from snakeoil.demandload import demandload
+from snakeoil.osutils import normpath
+demandload(globals(),
+ 'pkgcore.fs.ops:offset_rewriter,change_offset_rewriter',
+)
+from itertools import ifilter
+from operator import attrgetter
+
+def check_instance(obj):
+ if not isinstance(obj, fs.fsBase):
+ raise TypeError("'%s' is not a fs.fsBase deriviative" % obj)
+ return obj.location, obj
+
+
+class contentsSet(object):
+ """set of L{fs<pkgcore.fs.fs>} objects"""
+
+ __metaclass__ = generic_equality
+ __attr_comparison__ = ('_dict',)
+
+ def __init__(self, initial=None, mutable=True):
+
+ """
+ @param initial: initial fs objs for this set
+ @type initial: sequence
+ @param mutable: controls if it modifiable after initialization
+ """
+ self._dict = {}
+ if initial is not None:
+ self._dict.update(check_instance(x) for x in initial)
+ self.mutable = mutable
+
+ def __str__(self):
+ return "%s([%s])" % (self.__class__.__name__,
+ ', '.join(str(x) for x in self))
+
+ def __repr__(self):
+ return "%s([%s])" % (self.__class__.__name__,
+ ', '.join(repr(x) for x in self))
+
+ def add(self, obj):
+
+ """
+ add a new fs obj to the set
+
+ @param obj: must be a derivative of L{pkgcore.fs.fs.fsBase}
+ """
+
+ if not self.mutable:
+ # weird, but keeping with set.
+ raise AttributeError(
+ "%s is frozen; no add functionality" % self.__class__)
+ if not fs.isfs_obj(obj):
+ raise TypeError("'%s' is not a fs.fsBase class" % str(obj))
+ self._dict[obj.location] = obj
+
+ def __delitem__(self, obj):
+
+ """
+ remove a fs obj to the set
+
+ @type obj: a derivative of L{pkgcore.fs.fs.fsBase}
+ or a string location of an obj in the set.
+ @raise KeyError: if the obj isn't found
+ """
+
+ if not self.mutable:
+ # weird, but keeping with set.
+ raise AttributeError(
+ "%s is frozen; no remove functionality" % self.__class__)
+ if fs.isfs_obj(obj):
+ del self._dict[obj.location]
+ else:
+ del self._dict[normpath(obj)]
+
+ def remove(self, obj):
+ del self[obj]
+
+ def discard(self, obj):
+ if fs.isfs_obj(obj):
+ self._dict.pop(obj.location, None)
+ else:
+ self._dict.pop(obj, None)
+
+ def __getitem__(self, obj):
+ if fs.isfs_obj(obj):
+ return self._dict[obj.location]
+ return self._dict[normpath(obj)]
+
+ def __contains__(self, key):
+ if fs.isfs_obj(key):
+ return key.location in self._dict
+ return normpath(key) in self._dict
+
+ def clear(self):
+ """
+ clear the set
+ @raise ttributeError: if the instance is frozen
+ """
+ if not self.mutable:
+ # weird, but keeping with set.
+ raise AttributeError(
+ "%s is frozen; no clear functionality" % self.__class__)
+ self._dict.clear()
+
+ @staticmethod
+ def _convert_loc(iterable):
+ f = fs.isfs_obj
+ for x in iterable:
+ if f(x):
+ yield x.location
+ else:
+ yield x
+
+ @staticmethod
+ def _ensure_fsbase(iterable):
+ f = fs.isfs_obj
+ for x in iterable:
+ if not f(x):
+ raise ValueError("must be an fsBase derivative: got %r" % x)
+ yield x
+
+ def difference(self, other):
+ if not hasattr(other, '__contains__'):
+ other = set(self._convert_loc(other))
+ return contentsSet((x for x in self if x.location not in other),
+ mutable=self.mutable)
+
+ def difference_update(self, other):
+ if not self.mutable:
+ raise TypeError("%r isn't mutable" % self)
+
+ rem = self.remove
+ for x in other:
+ if x in self:
+ rem(x)
+
+ def intersection(self, other):
+ return contentsSet((x for x in other if x in self),
+ mutable=self.mutable)
+
+ def intersection_update(self, other):
+ if not self.mutable:
+ raise TypeError("%r isn't mutable" % self)
+ if not hasattr(other, '__contains__'):
+ other = set(self._convert_loc(other))
+
+ l = [x for x in self if x.location not in other]
+ for x in l:
+ self.remove(x)
+
+ def issubset(self, other):
+ if not hasattr(other, '__contains__'):
+ other = set(self._convert_loc(other))
+ return all(x.location in other for x in self._dict)
+
+ def issuperset(self, other):
+ return all(x in self for x in other)
+
+ def union(self, other):
+ c = contentsSet(other)
+ c.update(self)
+ return c
+
+ def __iter__(self):
+ return self._dict.itervalues()
+
+ def __len__(self):
+ return len(self._dict)
+
+ def symmetric_difference(self, other):
+ c = contentsSet(mutable=True)
+ c.update(self)
+ c.symmetric_difference_update(other)
+ object.__setattr__(c, 'mutable', self.mutable)
+ return c
+
+ def symmetric_difference_update(self, other):
+ if not self.mutable:
+ raise TypeError("%r isn't mutable" % self)
+ if not hasattr(other, '__contains__'):
+ other = contentsSet(self._ensure_fsbase(other))
+ l = []
+ for x in self:
+ if x in other:
+ l.append(x)
+ add = self.add
+ for x in other:
+ if x not in self:
+ add(x)
+ rem = self.remove
+ for x in l:
+ rem(x)
+ del l, rem
+
+ def update(self, iterable):
+ self._dict.update((x.location, x) for x in iterable)
+
+ def iterfiles(self, invert=False):
+ if invert:
+ return (x for x in self if not x.is_reg)
+ return ifilter(attrgetter('is_reg'), self)
+
+ def files(self, invert=False):
+ return list(self.iterfiles(invert=invert))
+
+ def iterdirs(self, invert=False):
+ if invert:
+ return (x for x in self if not x.is_dir)
+ return ifilter(attrgetter('is_dir'), self)
+
+ def dirs(self, invert=False):
+ return list(self.iterdirs(invert=invert))
+
+ def iterlinks(self, invert=False):
+ if invert:
+ return (x for x in self if not x.is_sym)
+ return ifilter(attrgetter('is_sym'), self)
+
+ def links(self, invert=False):
+ return list(self.iterlinks(invert=invert))
+
+ def iterdevs(self, invert=False):
+ if invert:
+ return (x for x in self if not x.is_dev)
+ return ifilter(attrgetter('is_dev'), self)
+
+ def devs(self, invert=False):
+ return list(self.iterdevs(invert=invert))
+
+ def iterfifos(self, invert=False):
+ if invert:
+ return (x for x in self if not x.is_fifo)
+ return ifilter(attrgetter('is_fifo'), self)
+
+ def fifos(self, invert=False):
+ return list(self.iterfifos(invert=invert))
+
+ for k in ("files", "dirs", "links", "devs", "fifos"):
+ s = k.capitalize()
+ locals()[k].__doc__ = \
+ """
+ returns a list of just L{pkgcore.fs.fs.fs%s} instances
+ @param invert: if True, yield everything that isn't a
+ fs%s instance, else yields just fs%s
+ """ % (s.rstrip("s"), s, s)
+ locals()["iter"+k].__doc__ = \
+ """
+ a generator yielding just L{pkgcore.fs.fs.fs%s} instances
+ @param invert: if True, yield everything that isn't a
+ fs%s instance, else yields just fs%s
+ """ % (s.rstrip("s"), s, s)
+ del s
+ del k
+
+ def clone(self, empty=False):
+ if empty:
+ return self.__class__([], mutable=True)
+ return self.__class__(self._dict.itervalues(), mutable=True)
+
+ def insert_offset(self, offset):
+ cset = self.clone(empty=True)
+ cset.update(offset_rewriter(offset, self))
+ return cset
+
+ def change_offset(self, old_offset, new_offset):
+ cset = self.clone(empty=True)
+ cset.update(change_offset_rewriter(old_offset, new_offset, self))
+ return cset
diff --git a/pkgcore/fs/fs.py b/pkgcore/fs/fs.py
new file mode 100644
index 0000000..07244cc
--- /dev/null
+++ b/pkgcore/fs/fs.py
@@ -0,0 +1,285 @@
+# Copyright 2004-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL
+
+"""
+filesystem entry abstractions
+"""
+
+import stat
+from pkgcore.chksum import get_handlers, get_chksums
+from os.path import sep as path_seperator, realpath, abspath
+from pkgcore.interfaces.data_source import local_source
+from snakeoil.mappings import LazyFullValLoadDict
+from snakeoil.osutils import normpath
+
+# goofy set of classes representating the fs objects pkgcore knows of.
+
+__all__ = [
+ "fsFile", "fsDir", "fsSymlink", "fsDev", "fsFifo"]
+__all__.extend("is%s" % x for x in ("dir", "reg", "sym", "fifo", "dev",
+ "fs_obj"))
+
+# following are used to generate appropriate __init__, wiped from the
+# namespace at the end of the module
+
+_fs_doc = {
+ "mode":"""@keyword mode: int, the mode of this entry. """
+ """required if strict is set""",
+ "mtime":"""@keyword mtime: long, the mtime of this entry. """
+ """required if strict is set""",
+ "uid":"""@keyword uid: int, the uid of this entry. """
+ """required if strict is set""",
+ "gid":"""@keyword gid: int, the gid of this entry. """
+ """required if strict is set""",
+}
+
+def gen_doc_additions(init, slots):
+ if init.__doc__ is None:
+ d = raw_init_doc.split("\n")
+ else:
+ d = init.__doc__.split("\n")
+ init.__doc__ = "\n".join(k.lstrip() for k in d) + \
+ "\n".join(_fs_doc[k] for k in _fs_doc if k in slots)
+
+
+raw_init_doc = \
+"""
+@param location: location (real or intended) for this entry
+@param strict: is this fully representative of the entry, or only partially
+@raise KeyError: if strict is enabled, and not all args are passed in
+"""
+
+class fsBase(object):
+
+ """base class, all extensions must derive from this class"""
+ __slots__ = ("location", "mtime", "mode", "uid", "gid")
+ __attrs__ = __slots__
+ __default_attrs__ = {}
+
+ locals().update((x.replace("is", "is_"), False) for x in
+ __all__ if x.startswith("is") and x.islower() and not
+ x.endswith("fs_obj"))
+
+ def __init__(self, location, strict=True, **d):
+
+ d["location"] = normpath(location)
+
+ s = object.__setattr__
+ if strict:
+ for k in self.__attrs__:
+ s(self, k, d[k])
+ else:
+ for k, v in d.iteritems():
+ s(self, k, v)
+ gen_doc_additions(__init__, __attrs__)
+
+ def change_attributes(self, **kwds):
+ d = dict((x, getattr(self, x))
+ for x in self.__attrs__ if hasattr(self, x))
+ d.update(kwds)
+ # split location out
+ location = d.pop("location")
+ if not location.startswith(path_seperator):
+ location = abspath(location)
+ d["strict"] = False
+ return self.__class__(location, **d)
+
+ def __setattr__(self, key, value):
+ raise AttributeError(key)
+
+ def __getattr__(self, attr):
+ # we would only get called if it doesn't exist.
+ if attr in self.__attrs__:
+ return self.__default_attrs__.get(attr)
+ raise AttributeError(attr)
+
+ def __hash__(self):
+ return hash(self.location)
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return False
+ return self.location == other.location
+
+ def __ne__(self, other):
+ return not self == other
+
+ def realpath(self, cache=None):
+ """calculate the abspath/canonicalized path for this entry, returning
+ a new instance if the path differs.
+
+ @keyword cache: Either None (no cache), or a data object of path->
+ resolved. Currently unused, but left in for forwards compatibility
+ """
+ new_path = realpath(self.location)
+ if new_path == self.location:
+ return self
+ return self.change_attributes(location=new_path)
+
+
+known_handlers = tuple(get_handlers())
+
+class fsFile(fsBase):
+
+ """file class"""
+
+ __slots__ = ("chksums", "data_source")
+ __attrs__ = fsBase.__attrs__ + __slots__
+ __default_attrs__ = {"mtime":0l}
+
+ is_reg = True
+
+ def __init__(self, location, chksums=None, data_source=None, **kwds):
+ """
+ @param chksums: dict of checksums, key chksum_type: val hash val.
+ See L{pkgcore.chksum}.
+ """
+ if "mtime" in kwds:
+ kwds["mtime"] = long(kwds["mtime"])
+ if data_source is None:
+ data_source = local_source(location)
+ kwds["data_source"] = data_source
+
+ if chksums is None:
+ # this can be problematic offhand if the file is modified
+ # but chksum not triggered
+ chksums = LazyFullValLoadDict(known_handlers, self._chksum_callback)
+ kwds["chksums"] = chksums
+ fsBase.__init__(self, location, **kwds)
+ gen_doc_additions(__init__, __slots__)
+
+ def __repr__(self):
+ return "file:%s" % self.location
+
+ def _chksum_callback(self, chfs):
+ return zip(chfs, get_chksums(self.data, *chfs))
+
+ @property
+ def data(self):
+ return self.data_source
+
+
+class fsDir(fsBase):
+
+ """dir class"""
+
+ __slots__ = ()
+ is_dir = True
+
+ def __repr__(self):
+ return "dir:%s" % self.location
+
+ def __cmp__(self, other):
+ return cmp(
+ self.location.split(path_seperator),
+ other.location.split(path_seperator))
+
+
+class fsLink(fsBase):
+
+ """symlink class"""
+
+ __slots__ = ("target",)
+ __attrs__ = fsBase.__attrs__ + __slots__
+ is_sym = True
+
+ def __init__(self, location, target, **kwargs):
+ """
+ @param target: string, filepath of the symlinks target
+ """
+ kwargs["target"] = target
+ fsBase.__init__(self, location, **kwargs)
+ gen_doc_additions(__init__, __slots__)
+
+ def change_attributes(self, **kwds):
+ d = dict((x, getattr(self, x))
+ for x in self.__attrs__ if hasattr(self, x))
+ d.update(kwds)
+ # split location out
+ location = d.pop("location")
+ if not location.startswith(path_seperator):
+ location = abspath(location)
+ target = d.pop("target")
+ d["strict"] = False
+ return self.__class__(location, target, **d)
+
+ def __repr__(self):
+ return "symlink:%s->%s" % (self.location, self.target)
+
+
+fsSymlink = fsLink
+
+
+class fsDev(fsBase):
+
+ """dev class (char/block objects)"""
+
+ __slots__ = ("major", "minor")
+ __attrs__ = fsBase.__attrs__ + __slots__
+ __default_attrs__ = {"major":-1, "minor":-1}
+ is_dev = True
+
+ def __init__(self, path, major=-1, minor=-1, **kwds):
+ if kwds.get("strict", True):
+ if major == -1 or minor == -1:
+ raise TypeError(
+ "major/minor must be specified and positive ints")
+ if not stat.S_IFMT(kwds["mode"]):
+ raise TypeError(
+ "mode %o: must specify the device type (got %o)" % (
+ kwds["mode"], stat.S_IFMT(kwds["mode"])))
+ kwds["major"] = major
+ kwds["minor"] = minor
+ else:
+ if major != -1:
+ major = int(major)
+ if major < 0:
+ raise TypeError(
+ "major/minor must be specified and positive ints")
+ kwds["major"] = major
+
+ if minor != -1:
+ minor = int(minor)
+ if minor < 0:
+ raise TypeError(
+ "major/minor must be specified and positive ints")
+ kwds["minor"] = minor
+
+ fsBase.__init__(self, path, **kwds)
+
+ def __repr__(self):
+ return "device:%s" % self.location
+
+
+def get_major_minor(stat_inst):
+ """get major/minor from a stat instance
+ @return: major,minor tuple of ints
+ """
+ return ( stat_inst.st_rdev >> 8 ) & 0xff, stat_inst.st_rdev & 0xff
+
+
+class fsFifo(fsBase):
+
+ """fifo class (socket objects)"""
+
+ __slots__ = ()
+ is_fifo = True
+
+ def __repr__(self):
+ return "fifo:%s" % self.location
+
+def mk_check(target, name):
+ def f(obj):
+ return isinstance(obj, target)
+ f.__name__ = name
+ f.__doc__ = "return True if obj is an instance of L{%s}, else False" % target.__name__
+ return f
+
+isdir = mk_check(fsDir, 'isdir')
+isreg = mk_check(fsFile, 'isreg')
+issym = mk_check(fsSymlink, 'issym')
+isfifo = mk_check(fsFifo, 'isfifo')
+isdev = mk_check(fsDev, 'isdev')
+isfs_obj = mk_check(fsBase, 'isfs_obj')
+
+del raw_init_doc, gen_doc_additions, _fs_doc, mk_check
diff --git a/pkgcore/fs/livefs.py b/pkgcore/fs/livefs.py
new file mode 100644
index 0000000..7bc445a
--- /dev/null
+++ b/pkgcore/fs/livefs.py
@@ -0,0 +1,154 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+interaction with the livefs: generating fs objects to represent the livefs.
+"""
+
+import os, collections, errno
+from stat import S_IMODE, S_ISDIR, S_ISREG, S_ISLNK, S_ISFIFO
+
+from pkgcore.fs.fs import (
+ fsFile, fsDir, fsSymlink, fsDev, fsFifo, get_major_minor)
+from pkgcore.fs.contents import contentsSet
+from pkgcore.chksum import get_handlers
+from pkgcore.interfaces.data_source import local_source
+
+from snakeoil.osutils import normpath, join as pjoin
+from snakeoil.mappings import LazyValDict
+from snakeoil.osutils import listdir
+
+__all__ = ["gen_obj", "scan", "iter_scan"]
+
+
+def gen_chksums(handlers, location):
+ def f(key):
+ return handlers[key](location)
+ return LazyValDict(handlers, f)
+
+
+def gen_obj(path, stat=None, chksum_handlers=None, real_location=None):
+
+ """
+ given a fs path, and an optional stat, create an appropriate fs obj.
+
+ @param stat: stat object to reuse if available
+ @param real_location: real path to the object if path is the desired
+ location, rather then existant location.
+ @raise KeyError: if no obj type matches the stat checks
+ @return: L{pkgcore.fs.fs.fsBase} derivative
+ """
+
+ if real_location is None:
+ real_location = path
+ if stat is None:
+ stat = os.lstat(real_location)
+ if chksum_handlers is None:
+ chksum_handlers = get_handlers()
+
+ mode = stat.st_mode
+ d = {"mtime":stat.st_mtime, "mode":S_IMODE(mode),
+ "uid":stat.st_uid, "gid":stat.st_gid}
+ if S_ISDIR(mode):
+ return fsDir(path, **d)
+ elif S_ISREG(mode):
+ d["size"] = stat.st_size
+ d["data_source"] = local_source(real_location)
+ return fsFile(path, **d)
+ elif S_ISLNK(mode):
+ d["target"] = os.readlink(real_location)
+ return fsSymlink(path, **d)
+ elif S_ISFIFO(mode):
+ return fsFifo(path, **d)
+ else:
+ major, minor = get_major_minor(stat)
+ d["minor"] = minor
+ d["major"] = major
+ d["mode"] = mode
+ return fsDev(path, **d)
+
+
+# hmm. this code is roughly 25x slower then find.
+# make it less slow somehow. the obj instantiation is a bit of a
+# killer I'm afraid; without obj, looking at 2.3ms roughly best of 3
+# 100 iterations, obj instantiation, 58ms.
+# also, os.path.join is rather slow.
+# in this case, we know it's always pegging one more dir on, so it's
+# fine doing it this way (specially since we're relying on
+# os.path.sep, not '/' :P)
+
+def _internal_iter_scan(path, chksum_handlers):
+ dirs = collections.deque([normpath(path)])
+ yield gen_obj(dirs[0], chksum_handlers=chksum_handlers)
+ while dirs:
+ base = dirs.popleft()
+ for x in listdir(base):
+ path = pjoin(base, x)
+ o = gen_obj(path, chksum_handlers=chksum_handlers,
+ real_location=path)
+ yield o
+ if isinstance(o, fsDir):
+ dirs.append(path)
+
+
+def _internal_offset_iter_scan(path, chksum_handlers, offset):
+ offset = normpath(offset)
+ path = normpath(path)
+ dirs = collections.deque([path[len(offset):]])
+ if dirs[0]:
+ yield gen_obj(dirs[0], chksum_handlers=chksum_handlers)
+
+ sep = os.path.sep
+ while dirs:
+ base = dirs.popleft()
+ real_base = pjoin(offset, base.lstrip(sep))
+ base = base.rstrip(sep) + sep
+ for x in listdir(real_base):
+ path = pjoin(base, x)
+ o = gen_obj(path, chksum_handlers=chksum_handlers,
+ real_location=pjoin(real_base, x))
+ yield o
+ if isinstance(o, fsDir):
+ dirs.append(path)
+
+
+def iter_scan(path, offset=None):
+ """
+ Recursively scan a path.
+
+ Does not follow symlinks pointing at dirs, just merely yields an
+ obj representing said symlink
+
+ @return: an iterator of L{pkgcore.fs.fs.fsBase} objects.
+
+ @param path: str path of what directory to scan in the livefs
+ @param offset: if not None, prefix to strip from each objects location.
+ if offset is /tmp, /tmp/blah becomes /blah
+ """
+ chksum_handlers = get_handlers()
+
+ if offset is None:
+ return _internal_iter_scan(path, chksum_handlers)
+ return _internal_offset_iter_scan(path, chksum_handlers, offset)
+
+
+def scan(*a, **kw):
+ """
+ calls list(iter_scan(*a, **kw))
+ Look at iter_scan for valid args
+ """
+ mutable = kw.pop("mutable", True)
+ return contentsSet(iter_scan(*a, **kw), mutable=mutable)
+
+
+def intersect(cset):
+ """generate the intersect of a cset and the livefs"""
+ f = gen_obj
+ for x in cset:
+ try:
+ yield f(x.location)
+ except OSError, oe:
+ if oe.errno != errno.ENOENT:
+ raise
+ del oe
+
diff --git a/pkgcore/fs/ops.py b/pkgcore/fs/ops.py
new file mode 100644
index 0000000..d72f934
--- /dev/null
+++ b/pkgcore/fs/ops.py
@@ -0,0 +1,323 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+default fs ops.
+
+Shouldn't be accessed directly for the most part, use
+L{pkgcore.plugins} to get at these ops.
+"""
+
+import os, errno
+
+from pkgcore.fs import contents, fs
+from pkgcore.fs.livefs import gen_obj
+from pkgcore.spawn import spawn
+from pkgcore.const import COPY_BINARY
+from pkgcore.plugin import get_plugin
+
+from snakeoil.currying import partial
+from snakeoil.osutils import ensure_dirs, pjoin, normpath
+
+__all__ = [
+ "merge_contents", "unmerge_contents", "default_ensure_perms",
+ "default_copyfile", "default_mkdir"]
+
+def default_ensure_perms(d1, d2=None):
+
+ """Enforce a fs objects attributes on the livefs.
+
+ Attributes enforced are permissions, mtime, uid, gid.
+
+ @param d2: if not None, an fs object for what's on the livefs now
+ @raise OSError: if fs object attributes can't be enforced
+ @return: True on success, else an exception is thrown
+ """
+
+ m, o, g, t = d1.mode, d1.uid, d1.gid, d1.mtime
+ if o is None:
+ o = -1
+ if g is None:
+ g = -1
+ if d2 is None:
+ do_mode, do_chown, do_mtime = True, True, True
+ else:
+
+ do_mode = False
+ try:
+ if fs.isdir(d1) and fs.isdir(d2):
+ # if it's preexisting, keep it's perms.
+ do_mode = False
+ else:
+ do_mode = (m is not None and m != d2.mode)
+ except AttributeError:
+ # yes. this _is_ stupid. vdb's don't always store all attributes
+ do_mode = False
+
+ do_chown = False
+ try:
+ do_chown = (o != d2.uid or g != d2.gid)
+ except AttributeError:
+ do_chown = True
+
+ try:
+ do_mtime = (t != d2.mtime)
+ except AttributeError:
+ do_mtime = True
+
+ if do_chown and (o != -1 or g != -1):
+ os.lchown(d1.location, o, g)
+ if do_mode and m is not None:
+ os.chmod(d1.location, m)
+ if do_mtime and t is not None:
+ os.utime(d1.location, (t, t))
+ return True
+
+
+def default_mkdir(d):
+ """
+ mkdir for a fsDir object
+
+ @param d: L{pkgcore.fs.fs.fsDir} instance
+ @raise OSError: if can't complete
+ @return: true if success, else an exception is thrown
+ """
+ if not d.mode:
+ mode = 0777
+ else:
+ mode = d.mode
+ os.mkdir(d.location, mode)
+ get_plugin("fs_ops.ensure_perms")(d)
+ return True
+
+# minor hack.
+
+class FailedCopy(TypeError):
+
+ def __init__(self, obj, msg):
+ self.obj = obj
+ self.msg = msg
+
+ def __str__(self):
+ return "failed copying %s:" % (self.obj, self.msg)
+
+
+class CannotOverwrite(FailedCopy):
+ def __init__(self, obj, existing):
+ self.obj, self.existing = obj, existing
+
+ def __str__(self):
+ return "cannot write %s due to %s existing" % (
+ self.obj, self.existing)
+
+
+def default_copyfile(obj, mkdirs=False):
+ """
+ copy a L{fs obj<pkgcore.fs.fs.fsBase>} to it's stated location.
+
+ @param obj: L{pkgcore.fs.fs.fsBase} instance, exempting fsDir
+ @raise OSError:, for non file objs, Exception (this needs to be fixed
+ @return: true if success, else an exception is thrown
+ """
+
+ existant = False
+ ensure_perms = get_plugin("fs_ops.ensure_perms")
+ if not fs.isfs_obj(obj):
+ raise TypeError("obj must be fsBase derivative: %r" % obj)
+ elif fs.isdir(obj):
+ raise TypeError("obj must not be a fsDir instance: %r" % obj)
+
+ try:
+ existing = gen_obj(obj.location)
+ if fs.isdir(existing):
+ raise CannotOverwrite(obj, existing)
+ existant = True
+ except OSError, oe:
+ # verify the parent dir is there at least
+ basefp = os.path.dirname(obj.location)
+ if basefp.strip(os.path.sep) and not os.path.exists(basefp):
+ if mkdirs:
+ if not ensure_dirs(basefp, mode=0750, minimal=True):
+ raise FailedCopy(obj, str(oe))
+ else:
+ raise
+ existant = False
+
+ if not existant:
+ fp = obj.location
+ else:
+ fp = existant_fp = obj.location + "#new"
+
+ if fs.isreg(obj):
+ src_f = obj.data.get_fileobj()
+ new_f = open(fp, "wb", 32768)
+ d = src_f.read(32768)
+ while d:
+ new_f.write(d)
+ d = src_f.read(32768)
+ new_f.close()
+ del src_f
+ elif fs.issym(obj):
+ os.symlink(obj.target, fp)
+ elif fs.isfifo(obj):
+ os.mkfifo(fp)
+ elif fs.isdev(obj):
+ dev = os.makedev(obj.major, obj.minor)
+ os.mknod(fp, obj.mode, dev)
+ else:
+ ret = spawn([COPY_BINARY, "-Rp", obj.location, fp])
+ if ret != 0:
+ raise FailedCopy(obj, "got %i from %s -Rp" % ret)
+ if not fs.issym(obj):
+ ensure_perms(obj.change_attributes(location=fp))
+
+ if existant:
+ os.rename(existant_fp, obj.location)
+ return True
+
+
+def offset_rewriter(offset, iterable):
+ sep = os.path.sep
+ for x in iterable:
+ yield x.change_attributes(
+ location=pjoin(offset, x.location.lstrip(sep)))
+
+
+def change_offset_rewriter(orig_offset, new_offset, iterable):
+ offset_len = len(orig_offset.rstrip(os.path.sep))
+ # localize it.
+ npf = normpath
+ for x in iterable:
+ # slip in the '/' default to force it to still generate a
+ # full path still
+ yield x.change_attributes(
+ location=npf(pjoin(new_offset, x.location[offset_len:])))
+
+
+def merge_contents(cset, offset=None, callback=lambda obj:None):
+
+ """
+ merge a L{pkgcore.fs.contents.contentsSet} instance to the livefs
+
+ @param cset: L{pkgcore.fs.contents.contentsSet} instance
+ @param offset: if not None, offset to prefix all locations with.
+ Think of it as target dir.
+ @param callback: callable to report each entry being merged
+ @raise OSError: see L{default_copyfile} and L{default_mkdir}
+ @return: True, or an exception is thrown on failure
+ (OSError, although see default_copyfile for specifics).
+ """
+
+ ensure_perms = get_plugin("fs_ops.ensure_perms")
+ copyfile = get_plugin("fs_ops.copyfile")
+ mkdir = get_plugin("fs_ops.mkdir")
+
+ if not isinstance(cset, contents.contentsSet):
+ raise TypeError("cset must be a contentsSet, got %r" % (cset,))
+
+ if offset is not None:
+ if os.path.exists(offset):
+ if not os.path.isdir(offset):
+ raise TypeError("offset must be a dir, or not exist: %s" % offset)
+ else:
+ mkdir(fs.fsDir(offset, strict=False))
+ iterate = partial(offset_rewriter, offset.rstrip(os.path.sep))
+ else:
+ iterate = iter
+
+ d = list(iterate(cset.iterdirs()))
+ d.sort()
+ for x in d:
+ callback(x)
+
+ try:
+ # we pass in the stat ourselves, using stat instead of
+ # lstat gen_obj uses internally; this is the equivalent of
+ # "deference that link"
+ obj = gen_obj(x.location, stat=os.stat(x.location))
+ if not fs.isdir(obj):
+ raise Exception(
+ "%s exists and needs to be a dir, but is a %s" %
+ (x.location, obj))
+ ensure_perms(x, obj)
+ except OSError, oe:
+ if oe.errno != errno.ENOENT:
+ raise
+ try:
+ # we do this form to catch dangling symlinks
+ mkdir(x)
+ except OSError, oe:
+ if oe.errno != errno.EEXIST:
+ raise
+ os.unlink(x.location)
+ mkdir(x)
+ ensure_perms(x)
+ del d
+
+ # might look odd, but what this does is minimize the try/except cost
+ # to one time, assuming everything behaves, rather then per item.
+ i = iterate(cset.iterdirs(invert=True))
+ while True:
+ try:
+ for x in i:
+ callback(x)
+ copyfile(x, mkdirs=True)
+ break
+ except CannotOverwrite, cf:
+ if not fs.issym(x):
+ raise
+
+ # by this time, all directories should've been merged.
+ # thus we can check the target
+ try:
+ if not fs.isdir(gen_obj(pjoin(x.location, x.target))):
+ raise
+ except OSError:
+ raise cf
+ return True
+
+
+def unmerge_contents(cset, offset=None, callback=lambda obj:None):
+
+ """
+ unmerge a L{pkgcore.fs.contents.contentsSet} instance to the livefs
+
+ @param cset: L{pkgcore.fs.contents.contentsSet} instance
+ @param offset: if not None, offset to prefix all locations with.
+ Think of it as target dir.
+ @param callback: callable to report each entry being unmerged
+ @raise OSError: see L{default_copyfile} and L{default_mkdir}
+ @return: True, or an exception is thrown on failure
+ (OSError, although see default_copyfile for specifics).
+ """
+
+ iterate = iter
+ if offset is not None:
+ iterate = partial(offset_rewriter, offset.rstrip(os.path.sep))
+
+ for x in iterate(cset.iterdirs(invert=True)):
+ callback(x)
+ try:
+ os.unlink(x.location)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ # this is a fair sight faster then using sorted/reversed
+ l = list(iterate(cset.iterdirs()))
+ l.sort(reverse=True)
+ for x in l:
+ try:
+ os.rmdir(x.location)
+ except OSError, e:
+ if not e.errno in (errno.ENOTEMPTY, errno.ENOENT, errno.ENOTDIR,
+ errno.EBUSY):
+ raise
+ else:
+ callback(x)
+ return True
+
+# Plugin system priorities
+for func in [default_copyfile, default_ensure_perms, default_mkdir,
+ merge_contents, unmerge_contents]:
+ func.priority = 1
+del func
diff --git a/pkgcore/fs/tar.py b/pkgcore/fs/tar.py
new file mode 100644
index 0000000..950c9bd
--- /dev/null
+++ b/pkgcore/fs/tar.py
@@ -0,0 +1,127 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+binpkg tar utilities
+"""
+import os, stat
+from pkgcore.fs.fs import fsFile, fsDir, fsSymlink, fsFifo, fsDev
+from pkgcore.fs import contents
+from pkgcore.interfaces.data_source import data_source
+
+from snakeoil.tar import tarfile
+from snakeoil.mappings import OrderedDict, StackedDict
+from snakeoil.currying import partial
+
+class tar_data_source(data_source):
+ def get_fileobj(self):
+ return self.data()
+
+class TarContentsSet(contents.contentsSet):
+
+ def __init__(self, initial=None, mutable=False):
+ contents.contentsSet.__init__(self, mutable=True)
+ self._dict = OrderedDict()
+ if initial:
+ self.update(initial)
+ self.mutable = mutable
+
+
+known_compressors = {"bz2": tarfile.TarFile.bz2open,
+ "gz": tarfile.TarFile.gzopen,
+ None: tarfile.TarFile.open}
+
+def write_set(contents_set, filepath, compressor='bz2'):
+ if compressor not in known_compressors:
+ raise ValueError("compression must be one of %r, got %r" %
+ (known_compressors.keys(), compressor))
+ tar_fd = known_compressors[compressor](filepath, mode="w")
+
+ # first add directories, then everything else
+ # this is just a pkgcore optimization, it prefers to see the dirs first.
+ dirs = contents_set.dirs()
+ dirs.sort()
+ for x in dirs:
+ tar_fd.addfile(fsobj_to_tarinfo(x))
+ del dirs
+ for x in contents_set.iterdirs(invert=True):
+ t = fsobj_to_tarinfo(x)
+ if t.isreg():
+ tar_fd.addfile(t, fileobj=x.data.get_fileobj())
+ else:
+ tar_fd.addfile(t)
+ tar_fd.close()
+
+def tarinfo_to_fsobj(src_tar):
+ psep = os.path.sep
+ for member in src_tar:
+ d = {
+ "uid":member.uid, "gid":member.gid,
+ "mtime":member.mtime, "mode":member.mode}
+ location = psep + member.name.strip(psep)
+ if member.isdir():
+ if member.name.strip(psep) == ".":
+ continue
+ yield fsDir(location, **d)
+ elif member.isreg():
+ d["data_source"] = tar_data_source(partial(
+ src_tar.extractfile, member.name))
+ # bit of an optimization; basically, we know size, so
+ # we stackdict it so that the original value is used, rather then
+ # triggering an full chksum run for size
+ f = fsFile(location, **d)
+ object.__setattr__(f, "chksums", StackedDict(
+ {"size":long(member.size)}, f.chksums))
+ yield f
+ elif member.issym() or member.islnk():
+ yield fsSymlink(location, member.linkname, **d)
+ elif member.isfifo():
+ yield fsFifo(location, **d)
+ elif member.isdev():
+ d["major"] = long(member.major)
+ d["minor"] = long(member.minor)
+ yield fsDev(location, **d)
+ else:
+ raise AssertionError(
+ "unknown type %r, %r was encounted walking tarmembers" %
+ (member, member.type))
+
+def fsobj_to_tarinfo(fsobj):
+ t = tarfile.TarInfo()
+ if isinstance(fsobj, fsFile):
+ t.type = tarfile.REGTYPE
+ t.size = fsobj.chksums["size"]
+ elif isinstance(fsobj, fsDir):
+ t.type = tarfile.DIRTYPE
+ elif isinstance(fsobj, fsSymlink):
+ t.type = tarfile.SYMTYPE
+ t.linkname = fsobj.target
+ elif isinstance(fsobj, fsFifo):
+ t.type = tarfile.FIFOTYPE
+ elif isinstance(fsobj, fsDev):
+ if stat.S_ISCHR(fsobj.mode):
+ t.type = tarfile.CHRTYPE
+ else:
+ t.type = tarfile.BLKTYPE
+ t.devmajor = fsobj.major
+ t.devminor = fsobj.minor
+ t.name = fsobj.location
+ t.mode = fsobj.mode
+ t.uid = fsobj.uid
+ t.gid = fsobj.gid
+ t.mtime = fsobj.mtime
+ return t
+
+def generate_contents(path, compressor="bz2"):
+ """
+ generate a contentset from a tarball
+
+ @param path: string path to location on disk
+ @param compressor: defaults to bz2; decompressor to use, see
+ L{known_compressors} for list of valid compressors
+ """
+ if compressor not in known_compressors:
+ raise ValueError("compressor needs to be one of %r, got %r" %
+ (known_compressors.keys(), compressor))
+ t = known_compressors[compressor](path, mode="r")
+ return TarContentsSet(tarinfo_to_fsobj(t), mutable=False)
diff --git a/pkgcore/interfaces/__init__.py b/pkgcore/interfaces/__init__.py
new file mode 100644
index 0000000..57eb691
--- /dev/null
+++ b/pkgcore/interfaces/__init__.py
@@ -0,0 +1,6 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+interface templates for package/repository/data source objects
+"""
diff --git a/pkgcore/interfaces/data_source.py b/pkgcore/interfaces/data_source.py
new file mode 100644
index 0000000..abd4e4b
--- /dev/null
+++ b/pkgcore/interfaces/data_source.py
@@ -0,0 +1,86 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+data source.
+
+Think of it as a far more minimal form of file protocol
+"""
+
+import StringIO
+from snakeoil.currying import pre_curry
+
+def generic_immutable_method(attr, self, *a, **kwds):
+ raise AttributeError("%s doesn't have %s" % (self.__class__, attr))
+
+class native_ro_StringIO(StringIO.StringIO):
+ locals().update([(k, pre_curry(generic_immutable_method, k)) for k in
+ ["write", "writelines", "truncate"]])
+
+del generic_immutable_method
+
+class write_StringIO(StringIO.StringIO):
+
+ def __init__(self, callback, *args, **kwds):
+ if not callable(callback):
+ raise TypeError("callback must be callable")
+ StringIO.StringIO.__init__(self, *args, **kwds)
+ self._callback = callback
+
+ def close(self):
+ self.flush()
+ if self._callback is not None:
+ self.seek(0)
+ self._callback(self.read())
+ self._callback = None
+ StringIO.StringIO.close(self)
+
+try:
+ import cStringIO
+ read_StringIO = cStringIO.StringIO
+except ImportError:
+ read_StringIO = native_ro_StringIO
+
+class base(object):
+ """base class, all implementations should match this protocol"""
+ get_fileobj = get_path = None
+
+
+class local_source(base):
+
+ """locally accessible data source"""
+
+ __slots__ = ("path", "mutable")
+
+ def __init__(self, path, mutable=False):
+ """@param path: file path of the data source"""
+ base.__init__(self)
+ self.path = path
+ self.mutable = mutable
+
+ def get_path(self):
+ return self.path
+
+ def get_fileobj(self):
+ if self.mutable:
+ return open(self.path, "rb+", 32768)
+ return open(self.path, "rb", 32768)
+
+
+class data_source(base):
+
+ def __init__(self, data, mutable=False):
+ """@param data: data to wrap"""
+ base.__init__(self)
+ self.data = data
+ self.mutable = mutable
+
+ get_path = None
+
+ def get_fileobj(self):
+ if self.mutable:
+ return write_StringIO(self._reset_data, self.data)
+ return read_StringIO(self.data)
+
+ def _reset_data(self, data):
+ self.data = data
diff --git a/pkgcore/interfaces/format.py b/pkgcore/interfaces/format.py
new file mode 100644
index 0000000..d4b4a84
--- /dev/null
+++ b/pkgcore/interfaces/format.py
@@ -0,0 +1,195 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+build operation
+"""
+
+from snakeoil.dependant_methods import ForcedDepends
+
+__all__ = ('build_base', 'base', 'install', 'uninstall', 'replace', 'fetch',
+ 'empty_build_op', 'FailedDirectory', 'GenericBuildError', 'errors')
+
+
+def _raw_fetch(self):
+ if not "files" in self.__dict__:
+ self.files = {}
+
+ # this is being anal, but protect against pkgs that don't collapse
+ # common uri down to a single file.
+ gotten_fetchables = set(x.filename for x in self.files.values())
+ for x in self.fetchables:
+ if x.filename in gotten_fetchables:
+ continue
+ fp = self.fetcher(x)
+ if fp is None:
+ return False
+ self.files[fp] = x
+ gotten_fetchables.add(x.filename)
+ return True
+
+
+class maintenance(object):
+ stage_depends = {}
+
+ __metaclass__ = ForcedDepends
+
+ def __init__(self, pkg, observer=None):
+ self.obvserver = observer
+ self.pkg = pkg
+
+ def config(self):
+ return True
+
+
+class build_base(object):
+ stage_depends = {}
+
+ __metaclass__ = ForcedDepends
+
+ def __init__(self, observer=None):
+ self.observer = observer
+
+
+class build(build_base):
+ stage_depends = {
+ "setup":"start",
+ "unpack":("fetch", "setup"),
+ "configure":"unpack",
+ "compile":"configure",
+ "test":"compile",
+ "install":"test",
+ "finalize":"install"}
+
+ def setup(self):
+ return True
+
+ fetch = _raw_fetch
+
+ def unpack(self):
+ return True
+
+ def configure(self):
+ return True
+
+ def compile(self):
+ return True
+
+ def test(self):
+ return True
+
+ def install(self):
+ return True
+
+ def finalize(self):
+ """finalize any build steps required"""
+ return True
+
+ def cleanup(self):
+ """cleanup any working files/dirs created during building"""
+ return True
+
+ for k in (
+ "setup", "fetch", "unpack", "configure", "compile", "test", "install"):
+ locals()[k].__doc__ = (
+ "execute any %s steps required; "
+ "implementations of this interface should overide this as needed"
+ % k)
+ for k in (
+ "setup", "fetch", "unpack", "configure", "compile", "test", "install",
+ "finalize"):
+ o = locals()[k]
+ o.__doc__ = "\n".join(x.lstrip() for x in o.__doc__.split("\n") + [
+ "@return: True on success, False on failure"])
+ del o, k
+
+
+class install(build_base):
+ stage_depends = {"preinst":"start", "postinst":"preinst", "finalize":"postinst"}
+
+ def preinst(self):
+ """any pre merge steps needed"""
+ return True
+
+ def postinst(self):
+ """any post merge steps needed"""
+ return True
+
+ def finalize(self):
+ """finalize any merge steps required"""
+ return True
+
+
+class uninstall(build_base):
+ stage_depends = {"prerm":"start", "postrm":"prerm", "finalize":"postrm"}
+
+ def prerm(self):
+ """any pre unmerge steps needed"""
+ return True
+
+ def postinst(self):
+ """any post unmerge steps needed"""
+ return True
+
+ def finalize(self):
+ """finalize any unmerge steps required"""
+ return True
+
+class replace(install, uninstall):
+
+ stage_depends = {"finalize":"postinst", "postinst":"postrm",
+ "postrm":"prerm", "prerm":"preinst", "preinst":"start"}
+
+
+class fetch(object):
+ __metaclass__ = ForcedDepends
+
+ stage_depends = {"finalize":"fetch"}
+
+ fetch = _raw_fetch
+
+ def __init__(self, pkg):
+ self.pkg = pkg
+ self.fetchables = pkg.fetchables
+
+ def finalize(self):
+ """finalize any build steps required"""
+ return self.pkg
+
+ def cleanup(self):
+ return True
+
+
+class empty_build_op(build_base):
+
+ stage_depends = {}
+
+# __metaclass__ = ForcedDepends
+
+ def __init__(self, pkg, observer=None, clean=False):
+ build_base.__init__(self, observer)
+ self.pkg = pkg
+
+ def cleanup(self):
+ return True
+
+ def finalize(self):
+ return self.pkg
+
+
+class BuildError(Exception):
+ pass
+
+class FailedDirectory(BuildError):
+ def __init__(self, path, text):
+ BuildError.__init__(
+ self, "failed creating/ensuring dir %s: %s" % (path, text))
+
+
+class GenericBuildError(BuildError):
+ def __init__(self, err):
+ BuildError.__init__(self, "Failed build operation: %s" % (err,))
+ self.err = str(err)
+
+
+errors = (FailedDirectory, GenericBuildError)
diff --git a/pkgcore/interfaces/observer.py b/pkgcore/interfaces/observer.py
new file mode 100644
index 0000000..a98377e
--- /dev/null
+++ b/pkgcore/interfaces/observer.py
@@ -0,0 +1,106 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from snakeoil.currying import pre_curry
+
+class base(object):
+
+ def warn(self, msg):
+ pass
+
+
+class phase_observer(object):
+
+ def phase_start(self, phase):
+ pass
+
+ def phase_end(self, phase, status):
+ pass
+
+
+class file_phase_observer(phase_observer):
+
+ def __init__(self, out, semiquiet=True):
+ self._out = out
+ self._semiquiet = semiquiet
+
+ def phase_start(self, phase):
+ if not self._semiquiet:
+ self._out.write("starting %s\n" % phase)
+
+ def info(self, msg):
+ if not self._semiquiet:
+ self._out.write("info: %s\n" % msg)
+
+ def warn(self, msg):
+ self._out.write("warning: %s\n" % msg)
+
+ def phase_end(self, phase, status):
+ if not self._semiquiet:
+ self._out.write("finished %s: %s\n" % (phase, status))
+
+
+class build_observer(base, phase_observer):
+ pass
+
+
+class repo_base(base):
+ pass
+
+
+class repo_observer(repo_base, phase_observer):
+
+ def trigger_start(self, hook, trigger):
+ pass
+
+ trigger_end = trigger_start
+
+ def installing_fs_obj(self, obj):
+ pass
+
+ removing_fs_obj = installing_fs_obj
+
+
+class file_build_observer(build_observer, file_phase_observer):
+ pass
+
+
+class file_repo_observer(file_phase_observer, repo_base):
+
+ def __init__(self, out, semiquiet=True):
+ self._out = out
+ self._semiquiet = semiquiet
+
+ def trigger_start(self, hook, trigger):
+ if not self._semiquiet:
+ self._out.write("hook %s: trigger: starting %r\n" % (hook, trigger))
+
+ def trigger_end(self, hook, trigger):
+ if not self._semiquiet:
+ self._out.write("hook %s: trigger: finished %r\n" % (hook, trigger))
+
+ def installing_fs_obj(self, obj):
+ self._out.write(">>> %s\n" % obj)
+
+ def removing_fs_obj(self, obj):
+ self._out.write("<<< %s\n" % obj)
+
+
+def wrap_build_method(phase, method, self, *args, **kwds):
+ disable_observer = kwds.pop("disable_observer", False)
+ if self.observer is None or disable_observer:
+ return method(self, *args, **kwds)
+ self.observer.phase_start(phase)
+ ret = False
+ try:
+ ret = method(self, *args, **kwds)
+ finally:
+ self.observer.phase_end(phase, ret)
+ return ret
+
+def decorate_build_method(phase):
+ def f(func):
+ return pre_curry(wrap_build_method, phase, func)
+ return f
+
+
diff --git a/pkgcore/interfaces/repo.py b/pkgcore/interfaces/repo.py
new file mode 100644
index 0000000..ac7c3cc
--- /dev/null
+++ b/pkgcore/interfaces/repo.py
@@ -0,0 +1,331 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+repository modifications (installing, removing, replacing)
+"""
+
+from pkgcore.merge import errors as merge_errors
+from pkgcore.merge.engine import MergeEngine
+from snakeoil.dependant_methods import ForcedDepends
+from snakeoil.demandload import demandload
+demandload(globals(), "pkgcore.log:logger")
+
+
+class fake_lock(object):
+ def __init__(self):
+ pass
+
+ acquire_write_lock = acquire_read_lock = __init__
+ release_read_lock = release_write_lock = __init__
+
+
+class base(object):
+ __metaclass__ = ForcedDepends
+
+ stage_depends = {}
+
+class Failure(Exception):
+ pass
+
+
+class nonlivefs_base(base):
+
+ stage_depends = {'finish': '_notify_repo', '_notify_repo': 'modify_repo',
+ 'modify_repo':'start'}
+
+ def __init__(self, repo, observer=None):
+ self.repo = repo
+ self.underway = False
+ self.observer = observer
+ self.lock = getattr(repo, "lock")
+ if self.lock is None:
+ self.lock = fake_lock()
+
+ def start(self):
+ self.underway = True
+ self.lock.acquire_write_lock()
+ return True
+
+ def modify_repo(self):
+ raise NotImplementedError(self, 'modify_repo')
+
+ def _notify_repo(self):
+ raise NotImplementedError(self, '_notify_repo')
+
+ def finish(self):
+ self._notify_repo()
+ self.lock.release_write_lock()
+ self.underway = False
+ return True
+
+
+class nonlivefs_install(nonlivefs_base):
+
+ def __init__(self, repo, pkg, **kwds):
+ nonlivefs_base.__init__(self, repo, **kwds)
+ self.new_pkg = pkg
+
+ def _notify_repo(self):
+ self.repo.notify_add_package(self.new_pkg)
+
+
+class nonlivefs_uninstall(nonlivefs_base):
+
+ def __init__(self, repo, pkg, **kwds):
+ nonlivefs_base.__init__(self, repo, **kwds)
+ self.old_pkg = pkg
+
+ def _notify_repo(self):
+ self.repo.notify_remove_package(self.old_pkg)
+
+
+class nonlivefs_replace(nonlivefs_install, nonlivefs_uninstall):
+
+ def __init__(self, repo, oldpkg, newpkg, **kwds):
+ # yes there is duplicate initialization here.
+ nonlivefs_uninstall.__init__(self, repo, oldpkg, **kwds)
+ nonlivefs_install.__init__(self, repo, newpkg, **kwds)
+
+ def _notify_repo(self):
+ nonlivefs_uninstall._notify_repo(self)
+ nonlivefs_install._notify_repo(self)
+
+
+class livefs_base(base):
+ stage_hooks = []
+
+ def __init__(self, repo, observer=None, offset=None):
+ self.repo = repo
+ self.underway = False
+ self.offset = offset
+ self.observer = observer
+ self.get_op()
+ self.lock = getattr(repo, "lock")
+ if self.lock is None:
+ self.lock = fake_lock()
+
+ def customize_engine(self, engine):
+ pass
+
+ def _get_format_op_args_kwds(self):
+ return (), {}
+
+ def start(self, engine):
+ self.me = engine
+ self.underway = True
+ self.lock.acquire_write_lock()
+ self.me.sanity_check()
+ return True
+
+ def finish(self):
+ """finish the transaction"""
+ self.me.final()
+ self._notify_repo()
+ self.lock.release_write_lock()
+ self.underway = False
+ return True
+
+ def _modify_repo_cache(self):
+ raise NotImplementedError
+
+ def __del__(self):
+ if self.underway:
+ print "warning: %s merge was underway, but wasn't completed"
+ self.lock.release_write_lock()
+
+
+class livefs_install(livefs_base):
+
+ """base interface for installing a pkg into a livefs repo.
+
+ repositories should override as needed.
+ """
+
+ stage_depends = {
+ "finish":"merge_metadata", "merge_metadata":"postinst",
+ "postinst":"transfer", "transfer":"preinst", "preinst":"start"}
+ stage_hooks = ["merge_metadata", "postinst", "preinst", "transfer"]
+ install_op_name = "_repo_install_op"
+
+ def __init__(self, repo, pkg, *args, **kwds):
+ self.new_pkg = pkg
+ livefs_base.__init__(self, repo, *args, **kwds)
+
+ install_get_format_op_args_kwds = livefs_base._get_format_op_args_kwds
+
+ def get_op(self):
+ op_args, op_kwds = self.install_get_format_op_args_kwds()
+ op_kwds["observer"] = self.observer
+ self.install_op = getattr(self.new_pkg,
+ self.install_op_name)(*op_args, **op_kwds)
+
+ def start(self):
+ """start the install transaction"""
+ engine = MergeEngine.install(self.new_pkg, offset=self.offset,
+ observer=self.observer)
+ self.new_pkg.add_format_triggers(self, self.install_op, engine)
+ self.customize_engine(engine)
+ return livefs_base.start(self, engine)
+
+ def preinst(self):
+ """execute any pre-transfer steps required"""
+ return self.install_op.preinst()
+
+ def transfer(self):
+ """execute the actual transfer"""
+ for x in (self.me.pre_merge, self.me.merge, self.me.post_merge):
+ try:
+ x()
+ except merge_errors.NonFatalModification, e:
+ print "warning caught: %s" % e
+ return True
+
+ def _notify_repo(self):
+ self.repo.notify_add_package(self.new_pkg)
+
+ def postinst(self):
+ """execute any post-transfer steps required"""
+ return self.install_op.postinst()
+
+ def merge_metadata(self):
+ """merge pkg metadata to the repository. Must be overrided"""
+ raise NotImplementedError
+
+ def finish(self):
+ ret = self.install_op.finalize()
+ if not ret:
+ logger.warn("ignoring unexpected result from install finalize- "
+ "%r" % ret)
+ return livefs_base.finish(self)
+
+
+class livefs_uninstall(livefs_base):
+
+ """base interface for uninstalling a pkg from a livefs repo.
+
+ Repositories should override as needed.
+ """
+
+ stage_depends = {
+ "finish":"unmerge_metadata", "unmerge_metadata":"postrm",
+ "postrm":"remove", "remove":"prerm", "prerm":"start"}
+ stage_hooks = ["merge_metadata", "postrm", "prerm", "remove"]
+ uninstall_op_name = "_repo_uninstall_op"
+
+ def __init__(self, repo, pkg, *args, **kwds):
+ self.old_pkg = pkg
+ livefs_base.__init__(self, repo, *args, **kwds)
+
+ uninstall_get_format_op_args_kwds = livefs_base._get_format_op_args_kwds
+
+ def get_op(self):
+ op_args, op_kwds = self.uninstall_get_format_op_args_kwds()
+ op_kwds["observer"] = self.observer
+ self.uninstall_op = getattr(self.old_pkg,
+ self.uninstall_op_name)(*op_args, **op_kwds)
+
+ def start(self):
+ """start the uninstall transaction"""
+ engine = MergeEngine.uninstall(self.old_pkg, offset=self.offset,
+ observer=self.observer)
+ self.old_pkg.add_format_triggers(self, self.uninstall_op, engine)
+ self.customize_engine(engine)
+ return livefs_base.start(self, engine)
+
+ def prerm(self):
+ """execute any pre-removal steps required"""
+ return self.uninstall_op.prerm()
+
+ def remove(self):
+ """execute any removal steps required"""
+ for x in (self.me.pre_unmerge, self.me.unmerge, self.me.post_unmerge):
+ try:
+ x()
+ except merge_errors.NonFatalModification, e:
+ print "warning caught: %s" % e
+ return True
+
+ def postrm(self):
+ """execute any post-removal steps required"""
+ return self.uninstall_op.postrm()
+
+ def _notify_repo(self):
+ self.repo.notify_remove_package(self.old_pkg)
+
+ def unmerge_metadata(self):
+ """unmerge pkg metadata from the repository. Must be overrided."""
+ raise NotImplementedError
+
+ def finish(self):
+ ret = self.uninstall_op.finalize()
+ self.uninstall_op.cleanup(disable_observer=True)
+ if not ret:
+ logger.warn("ignoring unexpected result from uninstall finalize- "
+ "%r" % ret)
+ return livefs_base.finish(self)
+
+ def __del__(self):
+ if self.underway:
+ print "warning: %s unmerge was underway, but wasn't completed" % \
+ self.old_pkg
+ self.lock.release_write_lock()
+
+
+class livefs_replace(livefs_install, livefs_uninstall):
+
+ """base interface for replacing a pkg in a livefs repo with another.
+
+ Repositories should override as needed.
+ """
+
+ stage_depends = {
+ "finish":"postinst", "postinst":"unmerge_metadata",
+ "unmerge_metadata":"postrm", "postrm":"remove",
+ "remove":"prerm", "prerm":"merge_metadata",
+ "merge_metadata":"transfer",
+ "transfer":"preinst", "preinst":"start"}
+
+ stage_hooks = [
+ "merge_metadata", "unmerge_metadata", "postrm", "prerm", "postinst",
+ "preinst", "unmerge_metadata", "merge_metadata"]
+
+ def __init__(self, repo, oldpkg, newpkg, **kwds):
+ self.old_pkg = oldpkg
+ self.new_pkg = newpkg
+ livefs_base.__init__(self, repo, **kwds)
+
+ def get_op(self):
+ livefs_install.get_op(self)
+ livefs_uninstall.get_op(self)
+
+ def start(self):
+ """start the transaction"""
+ engine = MergeEngine.replace(self.old_pkg, self.new_pkg,
+ offset=self.offset, observer=self.observer)
+ self.old_pkg.add_format_triggers(self, self.uninstall_op, engine)
+ self.new_pkg.add_format_triggers(self, self.install_op, engine)
+ self.customize_engine(engine)
+ return livefs_base.start(self, engine)
+
+ def _notify_repo(self):
+ self.repo.notify_remove_package(self.old_pkg)
+ self.repo.notify_add_package(self.new_pkg)
+
+ def finish(self):
+ ret = self.install_op.finalize()
+ if not ret:
+ logger.warn("ignoring unexpected result from install finalize- "
+ "%r" % ret)
+ ret = self.uninstall_op.finalize()
+ self.uninstall_op.cleanup(disable_observer=True)
+ if not ret:
+ logger.warn("ignoring unexpected result from uninstall finalize- "
+ "%r" % ret)
+ return livefs_base.finish(self)
+
+ def __del__(self):
+ if self.underway:
+ print "warning: %s -> %s replacement was underway, " \
+ "but wasn't completed" % (self.old_pkg, self.new_pkg)
+ self.lock.release_write_lock()
diff --git a/pkgcore/log.py b/pkgcore/log.py
new file mode 100644
index 0000000..5035430
--- /dev/null
+++ b/pkgcore/log.py
@@ -0,0 +1,21 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""Logging utilities.
+
+Currently just contains pkgcore's root logger.
+"""
+
+
+import logging
+
+# The logging system will call this automagically if its module-level
+# logging functions are used. We call it explicitly to make sure
+# something handles messages sent to our non-root logger. If the root
+# logger already has handlers this is a noop, and if someone attaches
+# a handler to our pkgcore logger that overrides the root logger handler.
+logging.basicConfig()
+
+# Our main logger.
+logger = logging.getLogger('pkgcore')
diff --git a/pkgcore/merge/__init__.py b/pkgcore/merge/__init__.py
new file mode 100644
index 0000000..b7fbd75
--- /dev/null
+++ b/pkgcore/merge/__init__.py
@@ -0,0 +1,6 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+package related livefs modification subsystem
+"""
diff --git a/pkgcore/merge/const.py b/pkgcore/merge/const.py
new file mode 100644
index 0000000..7ccb140
--- /dev/null
+++ b/pkgcore/merge/const.py
@@ -0,0 +1,6 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+REPLACE_MODE = 0
+INSTALL_MODE = 1
+UNINSTALL_MODE = 2
diff --git a/pkgcore/merge/engine.py b/pkgcore/merge/engine.py
new file mode 100644
index 0000000..60e261d
--- /dev/null
+++ b/pkgcore/merge/engine.py
@@ -0,0 +1,316 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+core engine for livefs modifications
+"""
+
+# need better documentation...
+
+# pre merge triggers
+# post merge triggers
+# ordering?
+
+import operator
+
+from pkgcore.fs import contents, livefs
+from pkgcore.plugin import get_plugins
+from pkgcore.merge import errors
+from pkgcore.interfaces import observer as observer_mod
+from pkgcore.merge.const import REPLACE_MODE, INSTALL_MODE, UNINSTALL_MODE
+
+from snakeoil.mappings import LazyValDict, ImmutableDict, StackedDict
+from snakeoil import currying
+
+from snakeoil.demandload import demandload
+demandload(globals(), 'errno')
+
+def alias_cset(alias, engine, csets):
+ """alias a cset to another"""
+ return csets[alias]
+
+
+class MergeEngine(object):
+
+ install_hooks = dict((x, []) for x in [
+ "sanity_check", "pre_merge", "merge", "post_merge", "final"])
+ uninstall_hooks = dict((x, []) for x in [
+ "sanity_check", "pre_unmerge", "unmerge", "post_unmerge", "final"])
+ replace_hooks = dict((x, []) for x in set(
+ install_hooks.keys() + uninstall_hooks.keys()))
+
+ install_csets = {"install_existing":"get_install_livefs_intersect"}
+ uninstall_csets = {
+ "uninstall_existing":"get_uninstall_livefs_intersect",
+ "uninstall":currying.partial(alias_cset, "old_cset")}
+ replace_csets = dict(install_csets)
+ replace_csets.update(uninstall_csets)
+
+ install_csets.update({}.fromkeys(["install", "replace"],
+ currying.partial(alias_cset, "new_cset")))
+ replace_csets["install"] = currying.partial(alias_cset, "new_cset")
+ replace_csets["modifying"] = (
+ lambda e, c: c["install"].intersection(c["uninstall"]))
+ replace_csets["uninstall"] = "get_remove_cset"
+ replace_csets["replace"] = "get_replace_cset"
+ replace_csets["install_existing"] = "get_install_livefs_intersect"
+
+ install_csets_preserve = ["new_cset"]
+ uninstall_csets_preserve = ["old_cset"]
+ replace_csets_preserve = ["new_cset", "old_cset"]
+
+
+ def __init__(self, mode, hooks, csets, preserves, observer, offset=None):
+ if observer is None:
+ observer = observer_mod.repo_observer()
+ self.observer = observer
+ self.mode = mode
+
+ self.hooks = ImmutableDict((x, []) for x in hooks)
+
+ self.preserve_csets = []
+ self.cset_sources = {}
+ # instantiate these seperately so their values are preserved
+ self.preserved_csets = LazyValDict(
+ self.preserve_csets, self._get_cset_source)
+ for k, v in csets.iteritems():
+ if isinstance(v, basestring):
+ v = getattr(self, v, v)
+ if not callable(v):
+ raise TypeError(
+ "cset values must be either the string name of "
+ "existing methods, or callables (got %s)" % v)
+
+ if k in preserves:
+ self.add_preserved_cset(k, v)
+ else:
+ self.add_cset(k, v)
+
+ if offset is None:
+ offset = "/"
+ self.offset = offset
+
+ # merge in default triggers first.
+ for trigger in get_plugins('triggers'):
+ t = trigger()
+ t.register(self)
+
+ # merge in overrides
+ for hook, triggers in hooks.iteritems():
+ for trigger in triggers:
+ self.add_trigger(hook, trigger)
+
+ self.regenerate_csets()
+ for x in hooks.keys():
+ setattr(self, x, currying.partial(self.execute_hook, x))
+
+ @classmethod
+ def install(cls, pkg, offset=None, observer=None):
+
+ """
+ generate a MergeEngine instance configured for uninstalling a pkg
+
+ @param pkg: L{pkgcore.package.metadata.package} instance to install
+ @param offset: any livefs offset to force for modifications
+ @return: L{MergeEngine}
+
+ """
+
+ hooks = dict(
+ (k, [y() for y in v])
+ for (k, v) in cls.install_hooks.iteritems())
+
+ csets = dict(cls.install_csets)
+ if "new_cset" not in csets:
+ csets["new_cset"] = currying.post_curry(cls.get_pkg_contents, pkg)
+ o = cls(
+ INSTALL_MODE, hooks, csets, cls.install_csets_preserve,
+ observer, offset=offset)
+
+ if o.offset != '/':
+ # wrap the results of new_cset to pass through an offset generator
+ o.cset_sources["new_cset"] = currying.post_curry(
+ o.generate_offset_cset, o.cset_sources["new_cset"])
+
+ o.new = pkg
+ return o
+
+ @classmethod
+ def uninstall(cls, pkg, offset=None, observer=None):
+
+ """
+ generate a MergeEngine instance configured for uninstalling a pkg
+
+ @param pkg: L{pkgcore.package.metadata.package} instance to uninstall,
+ must be from a livefs vdb
+ @param offset: any livefs offset to force for modifications
+ @return: L{MergeEngine}
+ """
+
+ hooks = dict(
+ (k, [y() for y in v])
+ for (k, v) in cls.uninstall_hooks.iteritems())
+ csets = dict(cls.uninstall_csets)
+ if "old_cset" not in csets:
+ csets["old_cset"] = currying.post_curry(cls.get_pkg_contents, pkg)
+ o = cls(
+ UNINSTALL_MODE, hooks, csets, cls.uninstall_csets_preserve,
+ observer, offset=offset)
+
+ if o.offset != '/':
+ # wrap the results of new_cset to pass through an offset generator
+ o.cset_sources["old_cset"] = currying.post_curry(
+ o.generate_offset_cset, o.cset_sources["old_cset"])
+
+ o.old = pkg
+ return o
+
+ @classmethod
+ def replace(cls, old, new, offset=None, observer=None):
+
+ """
+ generate a MergeEngine instance configured for replacing a pkg.
+
+ @param old: L{pkgcore.package.metadata.package} instance to replace,
+ must be from a livefs vdb
+ @param new: L{pkgcore.package.metadata.package} instance
+ @param offset: any livefs offset to force for modifications
+ @return: L{MergeEngine}
+
+ """
+
+ hooks = dict(
+ (k, [y() for y in v])
+ for (k, v) in cls.replace_hooks.iteritems())
+
+ csets = dict(cls.replace_csets)
+
+ for v, k in ((old, "old_cset"), (new, "new_cset")):
+ if k not in csets:
+ csets[k] = currying.post_curry(cls.get_pkg_contents, v)
+
+ o = cls(
+ REPLACE_MODE, hooks, csets, cls.replace_csets_preserve,
+ observer, offset=offset)
+
+ if o.offset != '/':
+ for k in ("old_cset", "new_cset"):
+ # wrap the results of new_cset to pass through an
+ # offset generator
+ o.cset_sources[k] = currying.post_curry(
+ o.generate_offset_cset, o.cset_sources[k])
+
+ o.old = old
+ o.new = new
+ return o
+
+ def regenerate_csets(self):
+ """
+ internal function, reset non preserverd csets.
+
+ Used in transitioning between hook points
+ """
+ self.csets = StackedDict(self.preserved_csets,
+ LazyValDict(self.cset_sources, self._get_cset_source))
+
+ def _get_cset_source(self, key):
+ return self.cset_sources[key](self, self.csets)
+
+ def add_preserved_cset(self, cset_name, func):
+ """
+ register a cset generator for use.
+
+ The cset will stay in memory until the engine finishes all steps.
+
+ @param cset_name: what to call the generated cset
+ @param func: callable to get the cset
+ """
+ self.add_cset(cset_name, func)
+ self.preserve_csets.append(cset_name)
+
+ def add_cset(self, cset_name, func):
+ """
+ regiser a cset generator for use.
+
+ The cset will be released from memory when it's no longer used.
+
+ @param cset_name: what to call the generated cset
+ @param func: callable to get the cset
+ """
+ if not callable(func):
+ raise TypeError("func must be a callable")
+ if not isinstance(cset_name, basestring):
+ raise TypeError("cset_name must be a string")
+ self.cset_sources[cset_name] = func
+
+ def add_trigger(self, hook_name, trigger, required_csets):
+ """
+ register a L{pkgcore.merge.triggers.base} instance to be executed
+
+ @param hook_name: engine step to hook the trigger into
+ @param trigger: L{triggers<pkgcore.merge.triggers.base>} to add
+ """
+ if hook_name not in self.hooks:
+ raise KeyError("trigger %r's hook %s isn't a known hook" %
+ (trigger, hook_name))
+
+ if required_csets is not None:
+ for rcs in required_csets:
+ if rcs not in self.cset_sources:
+ if isinstance(rcs, basestring):
+ raise errors.TriggerUnknownCset(trigger, rcs)
+
+ self.hooks[hook_name].append(trigger)
+
+ def execute_hook(self, hook):
+ """
+ execute any triggers bound to a hook point
+ """
+ try:
+ self.phase = hook
+ self.regenerate_csets()
+ for trigger in sorted(self.hooks[hook],
+ key=operator.attrgetter("priority")):
+ # error checking needed here.
+ self.observer.trigger_start(hook, trigger)
+ try:
+ trigger(self, self.csets)
+ finally:
+ self.observer.trigger_end(hook, trigger)
+ finally:
+ self.phase = None
+
+ @staticmethod
+ def generate_offset_cset(engine, csets, cset_generator):
+ """generate a cset with offset applied"""
+ return cset_generator(engine, csets).insert_offset(engine.offset)
+
+ @staticmethod
+ def get_pkg_contents(engine, csets, pkg):
+ """generate the cset of what files shall be merged to the livefs"""
+ return pkg.contents.clone()
+
+ @staticmethod
+ def get_remove_cset(engine, csets):
+ """generate the cset of what files shall be removed from the livefs"""
+ return csets["old_cset"].difference(csets["new_cset"])
+
+ @staticmethod
+ def get_replace_cset(engine, csets):
+ """Return the cset of what will be replaced going from old->new pkg."""
+ return csets["new_cset"].intersection(csets["old_cset"])
+
+ @staticmethod
+ def _get_livefs_intersect_cset(engine, csets, cset_name):
+ """generates the livefs intersection against a cset"""
+ return contents.contentsSet(livefs.intersect(csets[cset_name]))
+
+ @staticmethod
+ def get_install_livefs_intersect(engine, csets):
+ return engine._get_livefs_intersect_cset(engine, csets, "install")
+
+ @staticmethod
+ def get_uninstall_livefs_intersect(engine, csets):
+ return engine._get_livefs_intersect_cset(engine, csets, "uninstall")
+
+ alias_cset = staticmethod(alias_cset)
diff --git a/pkgcore/merge/errors.py b/pkgcore/merge/errors.py
new file mode 100644
index 0000000..b489f24
--- /dev/null
+++ b/pkgcore/merge/errors.py
@@ -0,0 +1,42 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+exceptions thrown by the MergeEngine
+"""
+
+class ModificationError(Exception):
+
+ """Base Exception class for modification errors/warnings"""
+
+ def __init__(self, trigger, msg):
+ self.trigger = trigger
+ self.msg = msg
+ Exception.__init__(self, "%s: modification error: %s" %
+ (self.trigger, self.msg))
+
+
+class BlockModification(ModificationError):
+ """Merging cannot proceed"""
+
+ def __str__(self):
+ return "Modification was blocked by %s: %s" % (
+ self.trigger.__class__.__name__, self.msg)
+
+class TriggerUnknownCset(ModificationError):
+ """Trigger's required content set isn't known"""
+
+ def __init__(self, trigger, csets):
+ if not isinstance(csets, (tuple, list)):
+ csets = (csets,)
+ ModificationError.__init__(self, "%s: trigger %r unknown cset: %r" %
+ (self.__class__, trigger, csets))
+ self.trigger, self.csets = trigger, csets
+
+
+class NonFatalModification(Exception):
+ pass
+
+class TriggerWarning(NonFatalModification):
+ pass
+
diff --git a/pkgcore/merge/todo.txt b/pkgcore/merge/todo.txt
new file mode 100644
index 0000000..023c7ec
--- /dev/null
+++ b/pkgcore/merge/todo.txt
@@ -0,0 +1,40 @@
+missing triggers:
+implement INSTALL_MASK
+
+
+- misc-functions.sh
+ 1) big ass scanelf block.
+ 2) install_mask (need to bind domain generation of triggers for it)
+ 3) preinst_mask (same thing, although that shouldn't wipe mangle the install image)
+ 4) sfperms (feature based), domain bound
+ 5) suid control. same thing (see a pattern?)
+ 6) selinux labelling. need to override the copies there imo, installing then slapping labels on sucks, although could mangle the image file and use a selinux aware copy
+
+prepman:
+ 1) all of it. the symlink rewriting might be fun...
+
+prepinfo:
+ 1) no different then prepman.
+
+prepstrip:
+ 1) splitdebug (transformation, fun one that one- maybe higher up, generate N pkgs instead)
+ 2) installsources
+
+
+prepall: (calls prepman, prepinfo, and prepstrip which are seperated in this list)
+ 1) qa: bug 4111, gen_usr_ldscript shit for static files.
+ 2) qa: check for la/.a in /lib
+ 3) more scanelf idiocy- check for libs without sonames, no NEEDED info.
+
+not automatically invoked-
+
+prepalldocs:
+ 1) symlink/compression. usual.
+
+potential
+preplib:
+ 1) we can generate this ourselves... figure out if ebuilds really should be doing it themselves (eapi bump for that most likely)
+
+size check (is there enough space on the partitions for merging?)
+revdep check.
+
diff --git a/pkgcore/merge/triggers.py b/pkgcore/merge/triggers.py
new file mode 100644
index 0000000..a0fd43f
--- /dev/null
+++ b/pkgcore/merge/triggers.py
@@ -0,0 +1,539 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+# $Id:$
+
+"""
+triggers, callables to bind to a step in a MergeEngine to affect changes
+"""
+
+__all__ = [
+ "base",
+ "trigger",
+ "UNINSTALLING_MODES",
+ "INSTALLING_MODES"
+]
+
+from pkgcore.merge import errors, const
+import pkgcore.os_data
+
+from snakeoil.osutils import listdir_files, pjoin, ensure_dirs, normpath
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'os',
+ 'errno',
+ 'pkgcore.plugin:get_plugin',
+ 'pkgcore:spawn',
+ 'pkgcore.fs.livefs:gen_obj',
+ 'pkgcore.fs:fs,contents',
+ 'snakeoil.fileutils:iter_read_bash',
+ 'time',
+ 'math:floor',
+)
+
+UNINSTALLING_MODES = (const.REPLACE_MODE, const.UNINSTALL_MODE)
+INSTALLING_MODES = (const.REPLACE_MODE, const.INSTALL_MODE)
+
+
+class base(object):
+
+ """base trigger class
+
+ @ivar required_csets: If None, all csets are passed in, else it must be a
+ sequence, those specific csets are passed in
+ @ivar _label: Either None, or a string to use for this triggers label
+ @ivar _hook: sequence of hook points to register into
+ @ivar _priority: range of 0 to 100, order of execution for triggers per hook
+ @ivar _engine_types: if None, trigger works for all engine modes, else it's
+ limited to that mode, and must be a sequence
+ """
+
+ required_csets = None
+ _label = None
+ _hooks = None
+ _engine_types = None
+ _priority = 50
+
+ @property
+ def priority(self):
+ return self._priority
+
+ @property
+ def label(self):
+ if self._label is not None:
+ return self._label
+ return str(self.__class__.__name__)
+
+ def register(self, engine):
+ """
+ register with a MergeEngine
+ """
+ if self._engine_types is not None and \
+ engine.mode not in self._engine_types:
+ return
+
+ # ok... so we care about this mode.
+ try:
+ i = iter(self._hooks)
+ except TypeError:
+ # bad monkey...
+ raise TypeError("%r: %r: _hooks needs to be a sequence" %
+ (self, self._hooks))
+
+ csets = self.get_required_csets(engine.mode)
+
+ for hook in self._hooks:
+ try:
+ engine.add_trigger(hook, self, csets)
+ except KeyError:
+ # unknown hook.
+ continue
+
+ def get_required_csets(self, mode):
+ csets = self.required_csets
+ if csets is not None:
+ if not isinstance(csets, tuple):
+ # has to be a dict.
+ csets = csets.get(mode)
+ return csets
+
+ def localize(self, mergeengine):
+ """
+ 'localize' a trigger to a specific merge engine process
+ mainly used if the trigger comes from configuration
+ """
+ return self
+
+ @staticmethod
+ def _get_csets(required_csets, csets):
+ return [csets[x] for x in required_csets]
+
+ def trigger(self, engine, csets):
+ raise NotImplementedError(self, 'trigger')
+
+ def __call__(self, engine, csets):
+ """execute the trigger"""
+
+ required_csets = self.get_required_csets(engine.mode)
+
+ if required_csets is None:
+ return self.trigger(engine, csets)
+ return self.trigger(engine, *self._get_csets(required_csets, csets))
+
+ def __str__(self):
+ return "%s: cset(%s) ftrigger(%s)" % (
+ self.label, self.required_csets, self.trigger)
+
+ def __repr__(self):
+ return "<%s cset=%r @#%x>" % (
+ self.label,
+ self.required_csets, id(self))
+
+
+class mtime_watcher(object):
+ """
+ passed a list of locations, return a L{contents.contentsSet} containing
+ those that are directories.
+
+ If the location doesn't exist, it's ignored. If stat_func is os.stat
+ and the location is a symlink pointing at a non existant location, it's
+ ignored.
+
+ Additionally, since this function is used for effectively 'snapshotting'
+ related directories, if any mtimes are *now* (fs doesn't do subsecond
+ resolution, osx for example), induces a sleep for a second to ensure
+ any later re-runs do not get bit by completing within the race window.
+
+ Finally, if any mtime is detected that is in the future, it is reset
+ to 'now'.
+ """
+
+ def __init__(self):
+ self.saved_mtimes = None
+ self.locations = None
+
+ def mtime_floats(func):
+ def mtime_floats_wrapper(self, *args, **kwargs):
+ cur = os.stat_float_times()
+ try:
+ os.stat_float_times(True)
+ return func(self, *args, **kwargs)
+ finally:
+ os.stat_float_times(cur)
+ return mtime_floats_wrapper
+
+ def __nonzero__(self):
+ return bool(self.saved_mtimes)
+
+ @staticmethod
+ def _scan_mtimes(locations, stat_func):
+ for x in locations:
+ try:
+ st = stat_func(x)
+ except OSError, oe:
+ if not oe.errno == errno.ENOENT:
+ raise
+ continue
+ obj = gen_obj(x, stat=st)
+ if fs.isdir(obj):
+ yield obj
+
+ @mtime_floats
+ def set_state(self, locations, stat_func=os.stat, forced_past=2):
+ """
+ set the initial state; will adjust ondisk mtimes as needed
+ to avoid race potentials.
+
+ @param locations: sequence, file paths to scan
+ @param stat_func: stat'er to use. defaults to os.stat
+ """
+ self.locations = locations
+ mtimes = list(self._scan_mtimes(locations, stat_func))
+
+ cset = contents.contentsSet(mtimes)
+ now = time.time()
+ pause_cutoff = floor(now)
+ past = max(pause_cutoff - forced_past, 0)
+ resets = [x for x in mtimes if x.mtime > past]
+ for x in resets:
+ cset.add(x.change_attributes(mtime=past))
+ os.utime(x.location, (past, past))
+
+ self.saved_mtimes = cset
+
+ @mtime_floats
+ def check_state(self, locations=None, stat_func=os.stat):
+ """
+ set the initial state; will adjust ondisk mtimes as needed
+ to avoid race potentials.
+
+ @param locations: sequence, file paths to scan; uses the locations
+ from the set_state invocation if not supplised.
+ @param stat_func: stat'er to use. defaults to os.stat
+ @return: boolean, True if things have changed, False if not.
+ """
+ if locations is None:
+ locations = self.locations
+
+ for x in self.get_changes(locations=locations, stat_func=stat_func):
+ return True
+ return False
+
+ @mtime_floats
+ def get_changes(self, locations=None, stat_func=os.stat):
+ """
+ generator yielding the fs objs for what has changed.
+
+ @param locations: sequence, file paths to scan; uses the locations
+ from the set_state invocation if not supplised.
+ @param stat_func: stat'er to use. defaults to os.stat
+ """
+ if locations is None:
+ locations = self.locations
+
+ for x in self._scan_mtimes(locations, stat_func):
+ if x not in self.saved_mtimes or \
+ self.saved_mtimes[x].mtime != x.mtime:
+ yield x
+
+
+class ldconfig(base):
+
+ required_csets = ()
+ _engine_types = None
+ _hooks = ('pre_merge', 'post_merge', 'pre_unmerge', 'post_unmerge')
+ _priority = 10
+
+ default_ld_path = ['usr/lib', 'usr/lib64', 'usr/lib32', 'lib',
+ 'lib64', 'lib32']
+
+ def __init__(self, ld_so_conf_path="etc/ld.so.conf"):
+ self.ld_so_conf_path = ld_so_conf_path.lstrip(os.path.sep)
+ self.saved_mtimes = mtime_watcher()
+
+ def ld_so_path(self, offset):
+ return pjoin(offset, self.ld_so_conf_path)
+
+ def read_ld_so_conf(self, offset):
+ fp = self.ld_so_path(offset)
+
+ try:
+ l = [x.lstrip(os.path.sep) for x in iter_read_bash(fp)]
+ except IOError, oe:
+ if oe.errno != errno.ENOENT:
+ raise
+ self._mk_ld_so_conf(fp)
+ # fall back to an edjucated guess.
+ l = self.default_ld_path
+ return [pjoin(offset, x) for x in l]
+
+ def _mk_ld_so_conf(self, fp):
+ if not ensure_dirs(os.path.dirname(fp), mode=0755, minimal=True):
+ raise errors.BlockModification(self,
+ "failed creating/setting %s to 0755, root/root for uid/gid" %
+ os.path.basename(fp))
+ # touch the file.
+ try:
+ open(fp, 'w')
+ except (IOError, OSError), e:
+ raise errors.BlockModification(self, e)
+
+ def trigger(self, engine):
+ locations = self.read_ld_so_conf(engine.offset)
+ if engine.phase.startswith('pre_'):
+ self.saved_mtimes.set_state(locations)
+ return
+
+ if self.saved_mtimes.check_state(locations):
+ self.regen(engine.offset)
+
+ def regen(self, offset):
+ ret = spawn.spawn(["/sbin/ldconfig", "-r", offset], fd_pipes={1:1, 2:2})
+ if ret != 0:
+ raise errors.TriggerWarning(self,
+ "ldconfig returned %i from execution" % ret)
+
+
+class InfoRegen(base):
+
+ required_csets = ()
+
+ # could implement this to look at csets, and do incremental removal and
+ # addition; doesn't seem worth while though for the additional complexity
+
+ _hooks = ('pre_merge', 'post_merge', 'pre_unmerge', 'post_unmerge')
+ _engine_types = None
+ _label = "gnu info regen"
+
+ locations = ('/usr/share/info',)
+
+ def __init__(self):
+ self.saved_mtimes = mtime_watcher()
+
+ def get_binary_path(self):
+ try:
+ return spawn.find_binary('install-info')
+ except spawn.CommandNotFound:
+ # swallow it.
+ return None
+
+ def trigger(self, engine):
+ bin_path = self.get_binary_path()
+ if bin_path is None:
+ return
+
+ offset = engine.offset
+
+ locs = [pjoin(offset, x.lstrip(os.path.sep)) for x in self.locations]
+
+ if engine.phase.startswith('pre_'):
+ self.saved_mtimes.set_state(locs)
+ return
+ elif engine.phase == 'post_merge' and \
+ engine.mode == const.REPLACE_MODE:
+ # skip post_merge for replace.
+ # we catch it on unmerge...
+ return
+
+ regens = set(x.location for x in self.saved_mtimes.get_changes(locs))
+ # force regeneration of any directory lacking the info index.
+ regens.update(x for x in locs if not os.path.isfile(pjoin(x, 'dir')))
+
+ bad = []
+ for x in regens:
+ bad.extend(self.regen(bin_path, x))
+
+ if bad and engine.observer is not None:
+ engine.observer.warn("bad info files: %r" % sorted(bad))
+
+ def regen(self, binary, basepath):
+ ignores = ("dir", "dir.old")
+ try:
+ files = listdir_files(basepath)
+ except OSError, oe:
+ if oe.errno == errno.ENOENT:
+ return
+ raise
+
+ # wipe old indexes.
+ for x in set(ignores).intersection(files):
+ os.remove(pjoin(basepath, x))
+
+ index = pjoin(basepath, 'dir')
+ for x in files:
+ if x in ignores or x.startswith("."):
+ continue
+
+ ret, data = spawn.spawn_get_output(
+ [binary, '--quiet', pjoin(basepath, x),
+ '--dir-file', index],
+ collect_fds=(1,2), split_lines=False)
+
+ if not data or "already exists" in data or \
+ "warning: no info dir entry" in data:
+ continue
+ yield pjoin(basepath, x)
+
+
+class merge(base):
+
+ required_csets = ('install',)
+ _engine_types = INSTALLING_MODES
+ _hooks = ('merge',)
+
+ def trigger(self, engine, merging_cset):
+ op = get_plugin('fs_ops.merge_contents')
+ return op(merging_cset, callback=engine.observer.installing_fs_obj)
+
+
+class unmerge(base):
+
+ required_csets = ('uninstall',)
+ _engine_types = UNINSTALLING_MODES
+ _hooks = ('unmerge',)
+
+ def trigger(self, engine, unmerging_cset):
+ op = get_plugin('fs_ops.unmerge_contents')
+ return op(unmerging_cset, callback=engine.observer.removing_fs_obj)
+
+
+class fix_uid_perms(base):
+
+ required_csets = ('new_cset',)
+ _hooks = ('pre_merge',)
+ _engine_types = INSTALLING_MODES
+
+ def __init__(self, uid=pkgcore.os_data.portage_uid,
+ replacement=pkgcore.os_data.root_uid):
+
+ base.__init__(self)
+ self.bad_uid = uid
+ self.good_uid = replacement
+
+ def trigger(self, engine, cset):
+ good = self.good_uid
+ bad = self.bad_uid
+
+ cset.update(x.change_attributes(uid=good)
+ for x in cset if x.uid == bad)
+
+
+class fix_gid_perms(base):
+
+ required_csets = ('new_cset',)
+ _hooks = ('pre_merge',)
+ _engine_types = INSTALLING_MODES
+
+ def __init__(self, gid=pkgcore.os_data.portage_gid,
+ replacement=pkgcore.os_data.root_gid):
+
+ base.__init__(self)
+ self.bad_gid = gid
+ self.good_gid = replacement
+
+ def trigger(self, engine, cset):
+ good = self.good_gid
+ bad = self.bad_gid
+
+ cset.update(x.change_attributes(gid=good)
+ for x in cset if x.gid == bad)
+
+
+class fix_set_bits(base):
+
+ required_csets = ('new_cset',)
+ _hooks = ('pre_merge',)
+ _engine_types = INSTALLING_MODES
+
+ def trigger(self, engine, cset):
+ reporter = engine.observer
+ # if s(uid|gid) *and* world writable...
+ l = [x for x in cset.iterlinks(True) if
+ (x.mode & 06000) and (x.mode & 0002)]
+
+ if reporter is not None:
+ for x in l:
+ if x.mode & 04000:
+ reporter.warn(
+ "correcting unsafe world writable SetGID: %s" %
+ (x.location,))
+ else:
+ reporter.warn(
+ "correcting unsafe world writable SetUID: %s" %
+ (x.location,))
+
+ if l:
+ # wipe setgid/setuid
+ cset.update(x.change_attributes(mode=x.mode & ~06002) for x in l)
+
+
+class detect_world_writable(base):
+
+ required_csets = ('new_cset',)
+ _hooks = ('pre_merge',)
+ _engine_types = INSTALLING_MODES
+
+ def __init__(self, fix_perms=False):
+ base.__init__(self)
+ self.fix_perms = fix_perms
+
+ def trigger(self, engine, cset):
+ if not engine.observer and not self.fix_perms:
+ return
+
+ reporter = engine.observer
+
+ l = [x for x in cset.iterlinks(True) if x.mode & 0002]
+ if reporter is not None:
+ for x in l:
+ reporter.warn("world writable file: %s" % x.location)
+ if self.fix_perms:
+ cset.update(x.change_attributes(mode=x.mode & ~0002) for x in l)
+
+
+class PruneFiles(base):
+
+ required_csets = ('new_cset',)
+ _hooks = ('pre_merge',)
+ _engine_types = INSTALLING_MODES
+
+ def __init__(self, sentinel_func):
+ """
+ @param sentinel_func: callable accepting a fsBase entry, returns
+ True if the entry should be removed, False otherwise
+ """
+ base.__init__(self)
+ self.sentinel = sentinel_func
+
+ def trigger(self, engine, cset):
+ removal = filter(self.sentinel, cset)
+ if engine.observer:
+ for x in removal:
+ engine.observer.info("pruning: %s" % x.location)
+ cset.difference_update(removal)
+
+
+class CommonDirectoryModes(base):
+
+ required_csets = ('new_cset',)
+ _hooks = ('pre_merge',)
+ _engine_types = INSTALLING_MODES
+
+ directories = [pjoin('/usr', x) for x in ('.', 'lib', 'lib64', 'lib32',
+ 'bin', 'sbin', 'local')]
+ directories.extend(pjoin('/usr/share', x) for x in ('.', 'man', 'info'))
+ directories.extend('/usr/share/man/man%i' % x for x in xrange(1, 10))
+ directories.extend(['/lib', '/lib32', '/lib64', '/etc', '/bin', '/sbin',
+ '/var'])
+ directories = frozenset(map(normpath, directories))
+ del x
+
+ def trigger(self, engine, cset):
+ r = engine.observer
+ if not r:
+ return
+ for x in cset.iterdirs():
+ if x.location not in self.directories:
+ continue
+ if x.mode != 0755:
+ r.warn('%s path has mode %s, should be 0755' %
+ (x.location, oct(x.mode)))
diff --git a/pkgcore/os_data.py b/pkgcore/os_data.py
new file mode 100644
index 0000000..b7ab71c
--- /dev/null
+++ b/pkgcore/os_data.py
@@ -0,0 +1,74 @@
+# Copyright: 2005 Gentoo Foundation
+# License: GPL2
+
+"""
+Avoid using- os data- root uid/gid, pkgcore uid/gid, etc.
+
+This will be killed off and bound into configuration subsystem at some point
+"""
+
+import os, pwd, grp
+
+ostype = os.uname()[0]
+
+if ostype == "Linux":
+ userland = "GNU"
+ xargs = os.environ["XARGS"] = "xargs -r"
+ lchown = os.lchown
+elif ostype == "Darwin":
+ userland = "Darwin"
+ xargs = os.environ["XARGS"] = "xargs"
+ def lchown(*pos_args, **key_args):
+ pass
+elif ostype in ["FreeBSD", "OpenBSD", "NetBSD"]:
+ userland = "BSD"
+ xargs = os.environ["XARGS"] = "xargs"
+ lchown = os.lchown
+else:
+ raise Exception("Operating system unsupported, '%s'" % ostype)
+
+
+#os.environ["USERLAND"] = userland
+
+#Secpass will be set to 1 if the user is root or in the portage group.
+secpass = 0
+
+uid = os.getuid()
+# hard coding sucks.
+root_uid = 0
+root_gid = wheelgid = 0
+
+if uid == 0:
+ secpass = 2
+try:
+ wheelgid = grp.getgrnam("wheel").gr_gid
+ if (not secpass) and (wheelgid in os.getgroups()):
+ secpass = 1
+except KeyError:
+ print "portage initialization: your system doesn't have a 'wheel' group."
+ print ("Please fix this as it is a normal system requirement. "
+ "'wheel' is GID 10")
+ print "'emerge baselayout' and an 'etc-update' should remedy this problem."
+
+#Discover the uid and gid of the portage user/group
+try:
+ portage_uid = pwd.getpwnam("portage").pw_uid
+ portage_gid = grp.getgrnam("portage").gr_gid
+ portage_user_groups = tuple(x.gr_name for x in grp.getgrall()
+ if 'portage' in x.gr_mem)
+
+ if (secpass == 0):
+ secpass = 1
+except KeyError:
+ portage_uid = 0
+ portage_gid = wheelgid
+ portage_user_groups = []
+ print
+ print "'portage' user or group missing. Please update baselayout"
+ print "and merge portage user(250) and group(250) into your passwd"
+ print "and group files. Non-root compilation is disabled until then."
+ print "Also note that non-root/wheel users will need to be added to"
+ print "the portage group to do portage commands.\n"
+ print "For the defaults, line 1 goes into passwd, and 2 into group."
+ print "portage:x:250:250:portage:/var/tmp/portage:/bin/false"
+ print "portage::250:portage"
diff --git a/pkgcore/package/__init__.py b/pkgcore/package/__init__.py
new file mode 100644
index 0000000..73d322b
--- /dev/null
+++ b/pkgcore/package/__init__.py
@@ -0,0 +1,10 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+package interface/classes
+"""
+
+# cpv and atom circularly import each other. This enforces a working order.
+#import cpv
+
diff --git a/pkgcore/package/base.py b/pkgcore/package/base.py
new file mode 100644
index 0000000..00684e6
--- /dev/null
+++ b/pkgcore/package/base.py
@@ -0,0 +1,67 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+base package class; instances should derive from this.
+
+Right now, doesn't provide much, need to change that down the line
+"""
+
+class base(object):
+
+ built = False
+ configurable = False
+
+ __slots__ = ("__weakref__",)
+ _get_attr = {}
+
+ def __setattr__(self, attr, value):
+ raise AttributeError(self, attr)
+
+ def __delattr__(self, attr):
+ raise AttributeError(self, attr)
+
+ def __getattr__(self, attr):
+ try:
+ val = self._get_attr[attr](self)
+ object.__setattr__(self, attr, val)
+ return val
+ except KeyError:
+ raise AttributeError(self, attr)
+
+ @property
+ def versioned_atom(self):
+ raise NotImplementedError(self, "versioned_atom")
+
+ @property
+ def unversioned_atom(self):
+ raise NotImplementedError(self, "versioned_atom")
+
+
+class wrapper(base):
+
+ __slots__ = ("_raw_pkg",)
+
+ def __init__(self, raw_pkg):
+ object.__setattr__(self, "_raw_pkg", raw_pkg)
+
+ def __cmp__(self, other):
+ if isinstance(other, wrapper):
+ return cmp(self._raw_pkg, other._raw_pkg)
+ return cmp(self._raw_pkg, other)
+
+ def __eq__(self, other):
+ if isinstance(other, wrapper):
+ return cmp(self._raw_pkg, other._raw_pkg) == 0
+ return cmp(self._raw_pkg, other) == 0
+
+ def __ne__(self, other):
+ return not self == other
+
+ @property
+ def versioned_atom(self):
+ return self.raw_pkg.versioned_atom
+
+ @property
+ def unversioned_atom(self):
+ return self.raw_pkg.unversioned_atom
diff --git a/pkgcore/package/conditionals.py b/pkgcore/package/conditionals.py
new file mode 100644
index 0000000..96456b1
--- /dev/null
+++ b/pkgcore/package/conditionals.py
@@ -0,0 +1,249 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+conditional attributes on a package.
+
+Changing them triggering regen of other attributes on the package instance.
+"""
+
+from operator import attrgetter
+from pkgcore.package.base import wrapper
+
+from snakeoil.containers import LimitedChangeSet, Unchangable
+from snakeoil.klass import GetAttrProxy
+from snakeoil.currying import partial
+from snakeoil.demandload import demandload
+demandload(globals(), "copy")
+
+
+def _getattr_wrapped(attr, self):
+ o = self._cached_wrapped.get(attr)
+ if o is None or o[0] != self._reuse_pt:
+ o = self._wrapped_attr[attr](getattr(self._raw_pkg, attr),
+ self._configurable)
+ o = self._cached_wrapped[attr] = (self._reuse_pt, o)
+ return o[1]
+
+
+def make_wrapper(configurable_attribute_name, attributes_to_wrap=()):
+ """
+ @param configurable_attribute_name: attribute name to add,
+ and that is used for evaluating attributes_to_wrap
+ @param attributes_to_wrap: mapping of attr_name:callable
+ for revaluating the pkg_instance, using the result
+ instead of the wrapped pkgs attr.
+ """
+
+ if configurable_attribute_name.find(".") != -1:
+ raise ValueError("can only wrap first level attributes, "
+ "'obj.dar' fex, not '%s'" %
+ (configurable_attribute_name))
+
+ class PackageWrapper(wrapper):
+ """Add a new attribute, and evaluate attributes of a wrapped pkg."""
+
+ __slots__ = ("_unchangable", "_configurable",
+ "_reuse_pt", "_cached_wrapped", "_buildable")
+
+ _wrapped_attr = attributes_to_wrap
+ _configurable_name = configurable_attribute_name
+
+ configurable = True
+
+ locals()[configurable_attribute_name] = \
+ property(attrgetter("_configurable"))
+
+ locals().update((x, property(partial(_getattr_wrapped, x)))
+ for x in attributes_to_wrap)
+
+ __getattr__ = GetAttrProxy("_raw_pkg")
+
+ def __init__(self, pkg_instance,
+ initial_settings=None, unchangable_settings=None,
+ build_callback=None):
+
+ """
+ @type pkg_instance: L{pkgcore.package.metadata.package}
+ @param pkg_instance: instance to wrap.
+ @type initial_settings: sequence
+ @param initial_settings: initial configuration of the
+ configurable_attribute
+ @type unchangable_settings: sequence
+ @param unchangable_settings: settings that configurable_attribute
+ cannot be set to
+ @param build_callback: None, or a callable to be used to get a
+ L{pkgcore.interfaces.format.build_base} instance
+ """
+
+ if initial_settings is None:
+ initial_settings = []
+ if unchangable_settings is None:
+ unchangable_settings = []
+
+ sf = object.__setattr__
+ sf(self, '_unchangable', unchangable_settings)
+ sf(self, '_configurable',
+ LimitedChangeSet(initial_settings, unchangable_settings))
+ sf(self, '_reuse_pt', 0)
+ sf(self, '_cached_wrapped', {})
+ sf(self, '_buildable', build_callback)
+ wrapper.__init__(self, pkg_instance)
+
+ def __copy__(self):
+ return self.__class__(self._raw_pkg, self._configurable_name,
+ initial_settings=set(self._configurable),
+ unchangable_settings=self._unchangable,
+ attributes_to_wrap=self._wrapped_attr)
+
+ def rollback(self, point=0):
+ """
+ rollback changes to the configurable attribute to an earlier point
+
+ @param point: must be an int
+ """
+ self._configurable.rollback(point)
+ # yes, nuking objs isn't necessarily required. easier this way though.
+ # XXX: optimization point
+ object.__setattr__(self, '_reuse_pt', self._reuse_pt + 1)
+
+ def commit(self):
+ """
+ Commit current changes.
+
+ This means that those changes can be reverted from this point out.
+ """
+ self._configurable.commit()
+ object.__setattr__(self, '_reuse_pt', 0)
+
+ def changes_count(self):
+ """
+ current commit point for the configurable
+ """
+ return self._configurable.changes_count()
+
+ def request_enable(self, attr, *vals):
+ """
+ internal function
+
+ since configurable somewhat steps outside of normal
+ restriction protocols, request_enable requests that this
+ package instance change its configuration to make the
+ restriction return True; if not possible, reverts any changes
+ it attempted
+
+ @param attr: attr to try and change
+ @param vals: L{pkgcore.restrictions.values.base} instances that
+ we're attempting to make match True
+ """
+ if attr not in self._wrapped_attr:
+ if attr == self._configurable_name:
+ entry_point = self.changes_count()
+ try:
+ map(self._configurable.add, vals)
+ object.__setattr__(self, '_reuse_pt',
+ self._reuse_pt + 1)
+ return True
+ except Unchangable:
+ self.rollback(entry_point)
+ else:
+ a = getattr(self._raw_pkg, attr)
+ for x in vals:
+ if x not in a:
+ break
+ else:
+ return True
+ return False
+ entry_point = self.changes_count()
+ a = getattr(self._raw_pkg, attr)
+ try:
+ for x in vals:
+ succeeded = False
+ for reqs in a.node_conds.get(x, ()):
+ succeeded = reqs.force_True(self)
+ if succeeded:
+ break
+ if not succeeded:
+ self.rollback(entry_point)
+ return False
+ except Unchangable:
+ self.rollback(entry_point)
+ return False
+ object.__setattr__(self, '_reuse_pt', self._reuse_pt + 1)
+ return True
+
+ def request_disable(self, attr, *vals):
+ """
+ internal function
+
+ since configurable somewhat steps outside of normal
+ restriction protocols, request_disable requests that this
+ package instance change its configuration to make the
+ restriction return False; if not possible, reverts any changes
+ it attempted
+
+ @param attr: attr to try and change
+ @param vals: L{pkgcore.restrictions.values.base} instances that
+ we're attempting to make match False
+ """
+ if attr not in self._wrapped_attr:
+ if attr == self._configurable_name:
+ entry_point = self.changes_count()
+ try:
+ map(self._configurable.remove, vals)
+ return True
+ except Unchangable:
+ self.rollback(entry_point)
+ else:
+ a = getattr(self._raw_pkg, attr)
+ for x in vals:
+ if x in a:
+ break
+ else:
+ return True
+ return False
+ entry_point = self.changes_count()
+ a = getattr(self._raw_pkg, attr)
+ try:
+ for x in vals:
+ succeeded = False
+ for reqs in a.node_conds.get(x, ()):
+ succeeded = reqs.force_False(self)
+ if succeeded:
+ break
+ if not succeeded:
+ self.rollback(entry_point)
+ return False
+ except Unchangable:
+ self.rollback(entry_point)
+ return False
+ object.__setattr__(self, '_reuse_pt', self._reuse_pt + 1)
+ return True
+
+ def __str__(self):
+ return "config wrapped(%s): %s" % (self._configurable_name,
+ self._raw_pkg)
+
+ def __repr__(self):
+ return "<%s pkg=%r wrapped=%r @%#8x>" % (
+ self.__class__.__name__, self._raw_pkg, self._configurable_name,
+ id(self))
+
+ def freeze(self):
+ o = copy.copy(self)
+ o.lock()
+ return o
+
+ def lock(self):
+ """
+ commit any outstanding changes and lock the configuration.
+ """
+ self.commit()
+ object.__setattr__(self, '_configurable', list(self._configurable))
+
+ def build(self, **kwds):
+ if self._buildable:
+ return self._buildable(self, **kwds)
+ return None
+
+ return PackageWrapper
diff --git a/pkgcore/package/errors.py b/pkgcore/package/errors.py
new file mode 100644
index 0000000..9516871
--- /dev/null
+++ b/pkgcore/package/errors.py
@@ -0,0 +1,18 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+#base class
+class InvalidPackage(ValueError):
+ pass
+
+
+class MetadataException(Exception):
+
+ def __init__(self, pkg, attr, error):
+ Exception.__init__(self,
+ "Metadata Exception: pkg %s, attr %s\nerror: %s" %
+ (pkg, attr, error))
+ self.pkg, self.attr, self.error = pkg, attr, error
+
+class InvalidDependency(ValueError):
+ pass
diff --git a/pkgcore/package/metadata.py b/pkgcore/package/metadata.py
new file mode 100644
index 0000000..ffc9501
--- /dev/null
+++ b/pkgcore/package/metadata.py
@@ -0,0 +1,126 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+
+"""
+package with its metadata accessible (think 'no longer abstract')
+"""
+
+from pkgcore.ebuild.cpv import CPV
+from pkgcore.ebuild.atom import atom
+
+from snakeoil.weakrefs import WeakValCache
+
+def DeriveMetadataKls(original_kls):
+ if getattr(original_kls, "_derived_metadata_kls", False):
+ return original_kls
+
+ class package(original_kls):
+ _derived_metadata_kls = True
+ built = False
+ __slots__ = ("_parent", "data")
+ try:
+ __doc__ = "package class with metadata bound to it for attribute " \
+ "generation\n\n" + \
+ "\n".join(x.lstrip()
+ for x in original_kls.__doc__.split("\n")
+ if "@ivar" in x or "@cvar" in x)
+ __doc__ += "\n@ivar repo: parent repository"
+ except AttributeError:
+ # wee, must be in -OO mode.
+ __doc__ = None
+
+ immutable = True
+ package_is_real = True
+
+ _get_attr = dict(original_kls._get_attr)
+
+ def __init__(self, parent_repository, *a, **kwds):
+ """
+ wrapper for %s.__init__; see %s.__init__ for allowed args/kwds,
+ they're passed directly to it
+
+ @param parent_repository: parent repository this package belongs to
+ @type parent_repository: L{pkgcore.repository.prototype.tree}
+ instance
+ """
+ original_kls.__init__(self, *a, **kwds)
+ object.__setattr__(self, "_parent", parent_repository)
+
+ def _get_data(self):
+ """
+ internal hook func to get the packages metadata, consumer
+ of L{_get_attr}
+ """
+ return self._fetch_metadata()
+ _get_attr["data"] = _get_data
+
+ @property
+ def repo(self):
+ return self._parent._parent_repo
+
+ @property
+ def slotted_atom(self):
+ return atom("%s:%s" % (self.key, self.slot))
+
+ def _fetch_metadata(self):
+ """
+ pull the metadata for this package.
+ must be overridden in derivative
+ """
+ raise NotImplementedError
+
+ def add_format_triggers(self, op_inst, format_op_inst, engine_inst):
+ pass
+
+ return package
+
+package = DeriveMetadataKls(CPV)
+
+class factory(object):
+
+ """
+ package generator
+
+ does weakref caching per repository
+
+ @cvar child_class: callable to generate packages
+ """
+
+ child_class = package
+
+ def __init__(self, parent_repo):
+ self._parent_repo = parent_repo
+ self._cached_instances = WeakValCache()
+
+ def new_package(self, *args):
+ """
+ generate a new package instance
+
+ """
+ inst = self._cached_instances.get(args)
+ if inst is None:
+ inst = self._cached_instances[args] = self.child_class(self, *args)
+ return inst
+
+ def __call__(self, *args, **kwds):
+ return self.new_package(*args, **kwds)
+
+ def clear(self):
+ """
+ wipe the weakref cache of packages instances
+ """
+ self._cached_instances.clear()
+
+ def _get_metadata(self, *args):
+ """Pulls metadata from the repo/cache/wherever.
+
+ Must be overriden in derivatives.
+ """
+ raise NotImplementedError
+
+ def _update_metadata(self, *args):
+ """Updates metadata in the repo/cache/wherever.
+
+ Must be overriden in derivatives."""
+ raise NotImplementedError
diff --git a/pkgcore/package/mutated.py b/pkgcore/package/mutated.py
new file mode 100644
index 0000000..03da7a7
--- /dev/null
+++ b/pkgcore/package/mutated.py
@@ -0,0 +1,43 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+package wrapper class to override a packages attributes
+"""
+
+from pkgcore.package.base import wrapper
+
+class MutatedPkg(wrapper):
+ __slots__ = ("_overrides",)
+
+ def __init__(self, pkg, overrides):
+ """
+ @param pkg: L{pkgcore.package.metadata.package} to wrap
+ @param overrides: is an attr -> instance mapping to substitute when
+ the attr is requested
+ """
+ wrapper.__init__(self, pkg)
+ object.__setattr__(self, "_overrides", overrides)
+
+ def __getattr__(self, attr):
+ o = self._overrides.get(attr)
+ if o is not None:
+ return o
+ return getattr(self._raw_pkg, attr)
+
+ def __repr__(self):
+ return '<%s pkg=%r overrides=%r @%#8x>' % (
+ self.__class__.__name__, self._raw_pkg, tuple(self._overrides),
+ id(self))
+
+ def __str__(self):
+ return '%s(%s, overrides=%s)' % \
+ (self.__class__.__name__, self._raw_pkg, tuple(self._overrides))
+
+ @property
+ def versioned_atom(self):
+ return self._raw_pkg.versioned_atom
+
+ @property
+ def unversioned_atom(self):
+ return self._raw_pkg.unversioned_atom
diff --git a/pkgcore/package/virtual.py b/pkgcore/package/virtual.py
new file mode 100644
index 0000000..813abb4
--- /dev/null
+++ b/pkgcore/package/virtual.py
@@ -0,0 +1,49 @@
+# Copyright: 2005 Jason Stubbs <jstubbs@gentoo.org>
+# License: GPL2
+
+"""
+virtual package
+"""
+
+from pkgcore.package import metadata
+from pkgcore.restrictions.packages import OrRestriction
+
+class package(metadata.package):
+
+ """
+ Virtual package.
+
+ Mainly useful since it's generating so little attrs on the fly.
+ """
+
+ package_is_real = False
+ built = True
+
+ __slots__ = ("__dict__")
+
+ def __init__(self, repo, provider, *a, **kwds):
+ metadata.package.__init__(self, repo, *a, **kwds)
+ object.__setattr__(self, 'provider', provider)
+ object.__setattr__(self, 'data', {})
+
+ def __getattr__ (self, key):
+ val = None
+ if key == "rdepends":
+ val = self.provider
+ elif key in ("depends", "post_rdepends", "provides"):
+ val = OrRestriction(finalize=True)
+ elif key == "slot":
+ val = "%s-%s" % (self.provider.category, self.version)
+ else:
+ return super(package, self).__getattr__(key)
+ self.__dict__[key] = val
+ return val
+
+ def _fetch_metadata(self):
+ data = self._parent._parent_repo._fetch_metadata(self)
+ return data
+
+
+class factory(metadata.factory):
+ child_class = package
+
diff --git a/pkgcore/pkgsets/__init__.py b/pkgcore/pkgsets/__init__.py
new file mode 100644
index 0000000..e0a075b
--- /dev/null
+++ b/pkgcore/pkgsets/__init__.py
@@ -0,0 +1,6 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+restriction generaters representing sets of packages
+"""
diff --git a/pkgcore/pkgsets/filelist.py b/pkgcore/pkgsets/filelist.py
new file mode 100644
index 0000000..346d3ba
--- /dev/null
+++ b/pkgcore/pkgsets/filelist.py
@@ -0,0 +1,78 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+pkgset based around loading a list of atoms from a world file
+"""
+
+import pkgcore.const
+from pkgcore.ebuild.atom import atom
+from pkgcore.config import ConfigHint
+
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'snakeoil.fileutils:AtomicWriteFile',
+ 'snakeoil.osutils:readlines',
+ 'pkgcore:os_data',
+)
+
+class FileList(object):
+ pkgcore_config_type = ConfigHint({'location':'str'}, typename='pkgset')
+
+ def __init__(self, location):
+ self.path = location
+ # note that _atoms is generated on the fly.
+
+ def __getattr__(self, attr):
+ if attr != "_atoms":
+ raise AttributeError(attr)
+ s = set()
+ for x in readlines(self.path):
+ x = x.strip()
+ if not x:
+ continue
+ s.add(atom(x))
+ self._atoms = s
+ return s
+
+ def __iter__(self):
+ return iter(self._atoms)
+
+ def __len__(self):
+ return len(self._atoms)
+
+ def __contains__(self, key):
+ return key in self._atoms
+
+ def add(self, atom_inst):
+ self._atoms.add(atom_inst)
+
+ def remove(self, atom_inst):
+ self._atoms.remove(atom_inst)
+
+ def flush(self):
+ f = None
+ # structured this way to force deletion (thus wiping) if something
+ # fails.
+ try:
+ f = AtomicWriteFile(self.path, gid=os_data.portage_gid, perms=0644)
+ f.write("\n".join(map(str, self._atoms)))
+ f.close()
+ finally:
+ del f
+
+
+class WorldFile(FileList):
+ pkgcore_config_type = ConfigHint(typename='pkgset')
+
+ def __init__(self, location=pkgcore.const.WORLD_FILE):
+ FileList.__init__(self, location)
+
+ def add(self, atom_inst):
+ atom_inst = atom(atom_inst.key)
+ FileList.add(self, atom_inst)
+
+ def remove(self, atom_inst):
+ atom_inst = atom(atom_inst.key)
+ FileList.remove(self, atom_inst)
+
diff --git a/pkgcore/pkgsets/glsa.py b/pkgcore/pkgsets/glsa.py
new file mode 100644
index 0000000..0d57142
--- /dev/null
+++ b/pkgcore/pkgsets/glsa.py
@@ -0,0 +1,253 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+Gentoo Linux Security Advisories (GLSA) support
+"""
+
+import os
+
+from pkgcore.restrictions import packages, restriction, boolean, values
+from pkgcore.config import ConfigHint
+
+from snakeoil.osutils import listdir_files, join as pjoin
+from snakeoil.klass import generic_equality
+from snakeoil.iterables import caching_iter
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'pkgcore.package:mutated',
+ 'pkgcore.ebuild:cpv,atom',
+ 'pkgcore.log:logger',
+ 'pkgcore.util.repo_utils:get_virtual_repos',
+ 'snakeoil.xml:etree',
+)
+
+
+class KeyedAndRestriction(boolean.AndRestriction):
+
+ type = packages.package_type
+
+ def __init__(self, *a, **kwds):
+ key = kwds.pop("key", None)
+ tag = kwds.pop("tag", None)
+ boolean.AndRestriction.__init__(self, *a, **kwds)
+ object.__setattr__(self, "key", key)
+ object.__setattr__(self, "tag", tag)
+
+ def __str__(self):
+ if self.tag is None:
+ return boolean.AndRestriction.__str__(self)
+ return "%s %s" % (self.tag, boolean.AndRestriction.__str__(self))
+
+
+class GlsaDirSet(object):
+
+ """
+ generate a pkgset based on GLSA's distributed via a directory.
+
+ (rsync tree is the usual source.)
+ """
+
+ pkgcore_config_type = ConfigHint({'src': 'ref:repo'}, typename='pkgset')
+ op_translate = {"ge":">=", "gt":">", "lt":"<", "le":"<=", "eq":"="}
+
+ __metaclass__ = generic_equality
+ __attr_comparison__ = ('paths',)
+
+ def __init__(self, src):
+ """
+ @param src: where to get the glsa from
+ @type src: must be either full path to glsa dir, or a repo object
+ to pull it from
+ """
+
+ if not isinstance(src, basestring):
+ src = tuple(sorted(filter(os.path.isdir,
+ (pjoin(repo.base, 'metadata', 'glsa') for repo in
+ get_virtual_repos(src, False) if hasattr(repo, 'base'))
+ )))
+ else:
+ src = [src]
+ self.paths = src
+
+ def __iter__(self):
+ for glsa, catpkg, pkgatom, vuln in self.iter_vulnerabilities():
+ yield KeyedAndRestriction(pkgatom, vuln, finalize=True, key=catpkg,
+ tag="GLSA vulnerable:")
+
+ def pkg_grouped_iter(self, sorter=None):
+ """
+ yield GLSA restrictions grouped by package key
+
+ @param sorter: must be either None, or a comparison function
+ """
+
+ if sorter is None:
+ sorter = iter
+ pkgs = {}
+ pkgatoms = {}
+ for glsa, pkg, pkgatom, vuln in self.iter_vulnerabilities():
+ pkgatoms[pkg] = pkgatom
+ pkgs.setdefault(pkg, []).append(vuln)
+
+ for pkgname in sorter(pkgs):
+ yield KeyedAndRestriction(pkgatoms[pkgname],
+ packages.OrRestriction(*pkgs[pkgname]),
+ key=pkgname)
+
+
+ def iter_vulnerabilities(self):
+ """
+ generator yielding each GLSA restriction
+ """
+ for path in self.paths:
+ for fn in listdir_files(path):
+ #"glsa-1234-12.xml
+ if not (fn.startswith("glsa-") and fn.endswith(".xml")):
+ continue
+ # This verifies the filename is of the correct syntax.
+ try:
+ [int(x) for x in fn[5:-4].split("-")]
+ except ValueError:
+ continue
+ root = etree.parse(pjoin(path, fn))
+ glsa_node = root.getroot()
+ if glsa_node.tag != 'glsa':
+ raise ValueError("glsa without glsa rootnode")
+ for affected in root.findall('affected'):
+ for pkg in affected.findall('package'):
+ try:
+ pkgname = str(pkg.get('name')).strip()
+ pkg_vuln_restrict = \
+ self.generate_intersects_from_pkg_node(
+ pkg, tag="glsa(%s)" % fn[5:-4])
+ if pkg_vuln_restrict is None:
+ continue
+ pkgatom = atom.atom(pkgname)
+ yield fn[5:-4], pkgname, pkgatom, pkg_vuln_restrict
+ except (TypeError, ValueError), v:
+ # thrown from cpv.
+ logger.warn("invalid glsa- %s, package %s: error %s"
+ % (fn, pkgname, v))
+ del v
+
+
+ def generate_intersects_from_pkg_node(self, pkg_node, tag=None):
+ arch = pkg_node.get("arch")
+ if arch is not None:
+ arch = str(arch.strip()).split()
+ if not arch or "*" in arch:
+ arch = None
+
+ vuln = list(pkg_node.findall("vulnerable"))
+ if not vuln:
+ return None
+ elif len(vuln) > 1:
+ vuln_list = [self.generate_restrict_from_range(x) for x in vuln]
+ vuln = packages.OrRestriction(finalize=True, *vuln_list)
+ else:
+ vuln_list = [self.generate_restrict_from_range(vuln[0])]
+ vuln = vuln_list[0]
+ if arch is not None:
+ vuln = packages.AndRestriction(vuln, packages.PackageRestriction(
+ "keywords", values.ContainmentMatch(all=False, *arch)))
+ invuln = (pkg_node.findall("unaffected"))
+ if not invuln:
+ # wrap it.
+ return KeyedAndRestriction(vuln, tag=tag, finalize=True)
+ invuln_list = [self.generate_restrict_from_range(x, negate=True)
+ for x in invuln]
+ invuln = [x for x in invuln_list if x not in vuln_list]
+ if not invuln:
+ if tag is None:
+ return KeyedAndRestriction(vuln, tag=tag, finalize=True)
+ return KeyedAndRestriction(vuln, tag=tag, finalize=True)
+ return KeyedAndRestriction(vuln, finalize=True, tag=tag, *invuln)
+
+ def generate_restrict_from_range(self, node, negate=False):
+ op = str(node.get("range").strip())
+ base = str(node.text.strip())
+ glob = base.endswith("*")
+ if glob:
+ base = base[:-1]
+ base = cpv.CPV("cat/pkg-%s" % base)
+ restrict = self.op_translate[op.lstrip("r")]
+ if op.startswith("r"):
+ if glob:
+ raise ValueError("glob cannot be used with %s ops" % op)
+ elif not base.revision:
+ if '=' not in restrict:
+ # this is a non-range.
+ raise ValueError(
+ "range %s version %s is a guranteed empty set" %
+ (op, str(node.text.strip())))
+ return atom.VersionMatch("~", base.version, negate=negate)
+ return packages.AndRestriction(
+ atom.VersionMatch("~", base.version),
+ atom.VersionMatch(restrict, base.version, rev=base.revision),
+ finalize=True, negate=negate)
+ if glob:
+ return packages.PackageRestriction("fullver",
+ values.StrGlobMatch(base.fullver))
+ return atom.VersionMatch(restrict, base.version, rev=base.revision,
+ negate=negate)
+
+
+def find_vulnerable_repo_pkgs(glsa_src, repo, grouped=False, arch=None):
+ """
+ generator yielding GLSA restrictions, and vulnerable pkgs from a repo.
+
+ @param glsa_src: GLSA pkgset to pull vulnerabilities from
+ @param repo: repo to scan for vulnerable packages
+ @param grouped: if grouped, combine glsa restrictions into one restriction
+ (thus yielding a pkg only once)
+ @param arch: arch to scan for, x86 for example
+ """
+
+ if grouped:
+ i = glsa_src.pkg_grouped_iter()
+ else:
+ i = iter(glsa_src)
+ if arch is None:
+ wrapper = lambda p: p
+ else:
+ if isinstance(arch, basestring):
+ arch = (arch,)
+ else:
+ arch = tuple(arch)
+ wrapper = lambda p: mutated.MutatedPkg(p, {"keywords":arch})
+ for restrict in i:
+ matches = caching_iter(wrapper(x)
+ for x in repo.itermatch(restrict,
+ sorter=sorted))
+ if matches:
+ yield restrict, matches
+
+
+class SecurityUpgrades(object):
+
+ """
+ pkgset that can be used directly from pkgcore configuration.
+
+ generates set of restrictions of required upgrades.
+ """
+
+ pkgcore_config_type = ConfigHint({'ebuild_repo': 'ref:repo',
+ 'vdb': 'ref:vdb'},
+ typename='pkgset')
+
+ __metaclass__ = generic_equality
+ __attr_comparison__ = ('arch', 'glsa_src', 'vdb')
+
+ def __init__(self, ebuild_repo, vdb, arch):
+ self.glsa_src = GlsaDirSet(ebuild_repo)
+ self.vdb = vdb
+ self.arch = arch
+
+ def __iter__(self):
+ for glsa, matches in find_vulnerable_repo_pkgs(self.glsa_src, self.vdb,
+ grouped=True,
+ arch=self.arch):
+ yield KeyedAndRestriction(glsa[0], restriction.Negate(glsa[1]),
+ finalize=True)
+
diff --git a/pkgcore/pkgsets/installed.py b/pkgcore/pkgsets/installed.py
new file mode 100644
index 0000000..3190ea6
--- /dev/null
+++ b/pkgcore/pkgsets/installed.py
@@ -0,0 +1,38 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+import operator
+
+from pkgcore.restrictions import packages, values
+from pkgcore.config import ConfigHint
+
+
+class _Base(object):
+
+ """Base for Installed and VersionedInstalled."""
+
+ def __init__(self, vdb):
+ self.vdbs = vdb
+
+ def __iter__(self):
+ restrict = packages.PackageRestriction("package_is_real",
+ values.EqualityMatch(True))
+ for repo in self.vdbs:
+ for pkg in repo.itermatch(restrict):
+ yield self.getter(pkg)
+
+
+class Installed(_Base):
+
+ """pkgset holding slotted_atoms of all installed pkgs."""
+
+ pkgcore_config_type = ConfigHint({'vdb': 'refs:repo'}, typename='pkgset')
+ getter = operator.attrgetter('slotted_atom')
+
+
+class VersionedInstalled(_Base):
+
+ """pkgset holding versioned_atoms of all installed pkgs."""
+
+ pkgcore_config_type = ConfigHint({'vdb': 'refs:repo'}, typename='pkgset')
+ getter = operator.attrgetter('versioned_atom')
diff --git a/pkgcore/pkgsets/system.py b/pkgcore/pkgsets/system.py
new file mode 100644
index 0000000..978d164
--- /dev/null
+++ b/pkgcore/pkgsets/system.py
@@ -0,0 +1,13 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+system pkgset based off of profile system collapsing
+"""
+
+# yuck. :)
+from pkgcore.config import configurable
+
+@configurable({'profile': 'ref:profile'}, typename='pkgset')
+def SystemSet(profile):
+ return frozenset(profile.system)
diff --git a/pkgcore/plugin.py b/pkgcore/plugin.py
new file mode 100644
index 0000000..07d3c93
--- /dev/null
+++ b/pkgcore/plugin.py
@@ -0,0 +1,224 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""Plugin system, heavily inspired by twisted's plugin system."""
+
+# Implementation note: we have to be pretty careful about error
+# handling in here since some core functionality in pkgcore uses this
+# code. Since we can function without a cache we will generally be
+# noisy but keep working if something is wrong with the cache.
+#
+# Currently we explode if something is wrong with a plugin package
+# dir, but not if something prevents importing a module in it.
+# Rationale is the former should be a PYTHONPATH issue while the
+# latter an installed plugin issue. May have to change this if it
+# causes problems.
+
+import operator
+import os.path
+
+from pkgcore import plugins
+from snakeoil.osutils import join as pjoin
+from snakeoil import modules, demandload
+demandload.demandload(globals(), 'tempfile', 'errno', 'pkgcore.log:logger')
+
+
+CACHE_HEADER = 'pkgcore plugin cache v2\n'
+
+# Global plugin cache. Mapping of package to package cache, which is a
+# mapping of plugin key to a list of module names.
+_cache = {}
+
+
+def initialize_cache(package):
+ """Determine available plugins in a package.
+
+ Writes cache files if they are stale and writing is possible.
+ """
+ # package plugin cache, see above.
+ package_cache = {}
+ seen_modnames = set()
+ for path in package.__path__:
+ # Check if the path actually exists first.
+ try:
+ modlist = os.listdir(path)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ continue
+ # Directory cache, mapping modulename to
+ # (mtime, set([keys]))
+ stored_cache = {}
+ stored_cache_name = pjoin(path, 'plugincache2')
+ try:
+ cachefile = open(stored_cache_name)
+ except IOError:
+ # Something is wrong with the cache file. We just handle
+ # this as a missing/empty cache, which will force a
+ # rewrite. If whatever it is that is wrong prevents us
+ # from writing the new cache we log it there.
+ pass
+ else:
+ try:
+ # Remove this extra nesting once we require python 2.5
+ try:
+ if cachefile.readline() != CACHE_HEADER:
+ raise ValueError('bogus header')
+ for line in cachefile:
+ module, mtime, entries = line[:-1].split(':', 2)
+ mtime = int(mtime)
+ result = set()
+ # Needed because ''.split(':') == [''], not []
+ if entries:
+ for s in entries.split(':'):
+ name, max_prio = s.split(',')
+ if max_prio:
+ max_prio = int(max_prio)
+ else:
+ max_prio = None
+ result.add((name, max_prio))
+ stored_cache[module] = (mtime, result)
+ except ValueError:
+ # Corrupt cache, treat as empty.
+ stored_cache = {}
+ finally:
+ cachefile.close()
+ cache_stale = False
+ # Hunt for modules.
+ actual_cache = {}
+ assumed_valid = set()
+ for modfullname in modlist:
+ modname, modext = os.path.splitext(modfullname)
+ if modext != '.py':
+ continue
+ if modname == '__init__':
+ continue
+ if modname in seen_modnames:
+ # This module is shadowed by a module earlier in
+ # sys.path. Skip it, assuming its cache is valid.
+ assumed_valid.add(modname)
+ continue
+ # It is an actual module. Check if its cache entry is valid.
+ mtime = int(os.path.getmtime(pjoin(path, modfullname)))
+ if mtime == stored_cache.get(modname, (0, ()))[0]:
+ # Cache is good, use it.
+ actual_cache[modname] = stored_cache[modname]
+ else:
+ # Cache entry is stale.
+ logger.debug(
+ 'stale because of %s: actual %s != stored %s',
+ modname, mtime, stored_cache.get(modname, (0, ()))[0])
+ cache_stale = True
+ entries = []
+ qualname = '.'.join((package.__name__, modname))
+ try:
+ module = modules.load_module(qualname)
+ except modules.FailedImport:
+ # This is a serious problem, but if we blow up
+ # here we cripple pkgcore entirely which may make
+ # fixing the problem impossible. So be noisy but
+ # try to continue.
+ logger.exception('plugin import failed')
+ else:
+ values = set()
+ registry = getattr(module, 'pkgcore_plugins', {})
+ for key, plugs in registry.iteritems():
+ max_prio = None
+ for plug in plugs:
+ priority = getattr(plug, 'priority', None)
+ if priority is not None \
+ and not isinstance(priority, int):
+ # This happens rather a lot with
+ # plugins not meant for use with
+ # get_plugin. Just ignore it.
+ priority = None
+ if priority is not None and (
+ max_prio is None or priority > max_prio):
+ max_prio = priority
+ values.add((key, max_prio))
+ actual_cache[modname] = (mtime, values)
+ # Cache is also stale if it sees entries that are no longer there.
+ for key in stored_cache:
+ if key not in actual_cache and key not in assumed_valid:
+ logger.debug('stale because %s is no longer there', key)
+ cache_stale = True
+ break
+ if cache_stale:
+ # Write a new cache.
+ try:
+ fd, name = tempfile.mkstemp(dir=path)
+ except OSError, e:
+ # We cannot write a new cache. We should log this
+ # since it will have a performance impact.
+
+ # Use error, not exception for this one: the traceback
+ # is not necessary and too alarming.
+ logger.error('Cannot write cache for %s: %s. '
+ 'Try running pplugincache.',
+ stored_cache_name, e)
+ else:
+ cachefile = os.fdopen(fd, 'w')
+ cachefile.write(CACHE_HEADER)
+ try:
+ for module, (mtime, entries) in actual_cache.iteritems():
+ strings = []
+ for plugname, max_prio in entries:
+ if max_prio is None:
+ strings.append(plugname + ',')
+ else:
+ strings.append('%s,%s' % (plugname, max_prio))
+ cachefile.write(
+ '%s:%s:%s\n' % (module, mtime, ':'.join(strings)))
+ finally:
+ cachefile.close()
+ os.chmod(name, 0644)
+ os.rename(name, stored_cache_name)
+ # Update the package_cache.
+ for module, (mtime, entries) in actual_cache.iteritems():
+ seen_modnames.add(module)
+ for key, max_prio in entries:
+ package_cache.setdefault(key, []).append((module, max_prio))
+ return package_cache
+
+
+def get_plugins(key, package=plugins):
+ """Return all enabled plugins matching "key".
+
+ Plugins with a C{disabled} attribute evaluating to C{True} are skipped.
+ """
+ cache = _cache.get(package)
+ if cache is None:
+ cache = _cache[package] = initialize_cache(package)
+ for modname, max_prio in cache.get(key, ()):
+ module = modules.load_module('.'.join((package.__name__, modname)))
+ for obj in module.pkgcore_plugins.get(key, ()):
+ if not getattr(obj, 'disabled', False):
+ yield obj
+
+
+def get_plugin(key, package=plugins):
+ """Get a single plugin matching this key.
+
+ This assumes all plugins for this key have a priority attribute.
+ If any of them do not the AttributeError is not stopped.
+
+ @return: highest-priority plugin or None if no plugin available.
+ """
+ cache = _cache.get(package)
+ if cache is None:
+ cache = _cache[package] = initialize_cache(package)
+ modlist = cache.get(key, [])
+ modlist.sort(key=operator.itemgetter(1), reverse=True)
+ plugs = []
+ for i, (modname, max_prio) in enumerate(modlist):
+ module = modules.load_module('.'.join((package.__name__, modname)))
+ plugs.extend(
+ plug for plug in module.pkgcore_plugins.get(key, ())
+ if not getattr(plug, 'disabled', False))
+ if not plugs:
+ continue
+ plugs.sort(key=operator.attrgetter('priority'), reverse=True)
+ if i + 1 == len(modlist) or plugs[0].priority > modlist[i + 1][1]:
+ return plugs[0]
+ return None
diff --git a/pkgcore/plugins/__init__.py b/pkgcore/plugins/__init__.py
new file mode 100644
index 0000000..0e8743d
--- /dev/null
+++ b/pkgcore/plugins/__init__.py
@@ -0,0 +1,42 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""pkgcore plugins package."""
+
+import sys
+import os.path
+
+
+# XXX Having this function here is a bit of a wart: it is used by
+# other plugin packages (like the pkgcore-check one), but we cannot
+# put it in pkgcore.plugin because that imports this package (circular
+# import).
+
+def extend_path(path, name):
+ """Simpler version of the stdlib's L{pkgutil.extend_path}.
+
+ It does not support ".pkg" files, and it does not require an
+ __init__.py (this is important: we want only one thing (pkgcore
+ itself) to install the __init__.py to avoid name clashes).
+
+ It also modifies the "path" list in place (and returns C{None})
+ instead of copying it and returning the modified copy.
+ """
+ if not isinstance(path, list):
+ # This could happen e.g. when this is called from inside a
+ # frozen package. Return the path unchanged in that case.
+ return
+ # Reconstitute as relative path.
+ pname = os.path.join(*name.split('.'))
+
+ for entry in sys.path:
+ if not isinstance(entry, basestring) or not os.path.isdir(entry):
+ continue
+ subdir = os.path.join(entry, pname)
+ # XXX This may still add duplicate entries to path on
+ # case-insensitive filesystems
+ if subdir not in path:
+ path.append(subdir)
+
+extend_path(__path__, __name__)
diff --git a/pkgcore/plugins/pkgcore_configurables.py b/pkgcore/plugins/pkgcore_configurables.py
new file mode 100644
index 0000000..27e63e0
--- /dev/null
+++ b/pkgcore/plugins/pkgcore_configurables.py
@@ -0,0 +1,50 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+from pkgcore.config import basics
+from pkgcore.ebuild import (
+ portage_conf, repository as ebuild_repo, profiles, domain, eclass_cache,
+ overlay_repository, formatter)
+from pkgcore.pkgsets import system, filelist, installed, glsa
+from pkgcore.vdb import ondisk
+from pkgcore.cache import flat_hash, metadata
+from pkgcore.fetch import custom
+from pkgcore.binpkg import repository as binpkg_repo
+from pkgcore.sync import rsync, base
+
+
+pkgcore_plugins = {
+ 'configurable': [
+ basics.section_alias,
+ basics.parse_config_file,
+ portage_conf.SecurityUpgradesViaProfile,
+ portage_conf.config_from_make_conf,
+ system.SystemSet,
+ ondisk.tree,
+ flat_hash.database,
+ metadata.database,
+ metadata.paludis_flat_list,
+ custom.fetcher,
+ binpkg_repo.tree,
+ ebuild_repo.UnconfiguredTree,
+ ebuild_repo.SlavedTree,
+ profiles.OnDiskProfile,
+ domain.domain,
+ eclass_cache.cache,
+ eclass_cache.StackedCaches,
+ overlay_repository.OverlayRepo,
+ formatter.basic_factory,
+ formatter.pkgcore_factory,
+ formatter.portage_factory,
+ formatter.paludis_factory,
+ formatter.portage_verbose_factory,
+ filelist.FileList,
+ filelist.WorldFile,
+ installed.Installed,
+ installed.VersionedInstalled,
+ glsa.GlsaDirSet,
+ glsa.SecurityUpgrades,
+ rsync.rsync_syncer,
+ base.GenericSyncer,
+ ],
+ }
diff --git a/pkgcore/plugins/pkgcore_ebuild_built.py b/pkgcore/plugins/pkgcore_ebuild_built.py
new file mode 100644
index 0000000..73b42c7
--- /dev/null
+++ b/pkgcore/plugins/pkgcore_ebuild_built.py
@@ -0,0 +1,8 @@
+# Copyright: 2007 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+from pkgcore.ebuild import ebuild_built
+
+pkgcore_plugins = {
+ 'format.ebuild_built': [ebuild_built.generate_new_factory],
+ }
diff --git a/pkgcore/plugins/pkgcore_ebuild_src.py b/pkgcore/plugins/pkgcore_ebuild_src.py
new file mode 100644
index 0000000..164e1e7
--- /dev/null
+++ b/pkgcore/plugins/pkgcore_ebuild_src.py
@@ -0,0 +1,8 @@
+# Copyright: 2007 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+from pkgcore.ebuild import ebuild_src
+
+pkgcore_plugins = {
+ 'format.ebuild_src': [ebuild_src.generate_new_factory],
+ }
diff --git a/pkgcore/plugins/pkgcore_formatters.py b/pkgcore/plugins/pkgcore_formatters.py
new file mode 100644
index 0000000..7308d36
--- /dev/null
+++ b/pkgcore/plugins/pkgcore_formatters.py
@@ -0,0 +1,26 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+from pkgcore.config import basics
+
+pkgcore_plugins = {
+ 'global_config': [{
+ 'basic-formatter': basics.ConfigSectionFromStringDict({
+ 'class': 'pkgcore.ebuild.formatter.basic_factory',
+ }),
+ 'pkgcore-formatter': basics.ConfigSectionFromStringDict({
+ 'class': 'pkgcore.ebuild.formatter.pkgcore_factory',
+ }),
+ 'portage-formatter': basics.ConfigSectionFromStringDict({
+ 'class': 'pkgcore.ebuild.formatter.portage_factory',
+ 'default': 'True',
+ }),
+ 'paludis-formatter': basics.ConfigSectionFromStringDict({
+ 'class': 'pkgcore.ebuild.formatter.paludis_factory',
+ }),
+ 'portage-verbose-formatter': basics.ConfigSectionFromStringDict({
+ 'class':
+ 'pkgcore.ebuild.formatter.portage_verbose_factory',
+ }),
+ }],
+ }
diff --git a/pkgcore/plugins/pkgcore_fsops_default.py b/pkgcore/plugins/pkgcore_fsops_default.py
new file mode 100644
index 0000000..717d740
--- /dev/null
+++ b/pkgcore/plugins/pkgcore_fsops_default.py
@@ -0,0 +1,12 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+from pkgcore.fs import ops
+
+pkgcore_plugins = {
+ 'fs_ops.copyfile': [ops.default_copyfile],
+ 'fs_ops.ensure_perms': [ops.default_ensure_perms],
+ 'fs_ops.mkdir': [ops.default_mkdir],
+ 'fs_ops.merge_contents': [ops.merge_contents],
+ 'fs_ops.unmerge_contents': [ops.unmerge_contents],
+ }
diff --git a/pkgcore/plugins/pkgcore_syncers.py b/pkgcore/plugins/pkgcore_syncers.py
new file mode 100644
index 0000000..10a4313
--- /dev/null
+++ b/pkgcore/plugins/pkgcore_syncers.py
@@ -0,0 +1,16 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+from pkgcore.sync import bzr, cvs, darcs, git, hg, svn
+
+
+pkgcore_plugins = {
+ 'syncer': [
+ bzr.bzr_syncer,
+ cvs.cvs_syncer,
+ darcs.darcs_syncer,
+ git.git_syncer,
+ hg.hg_syncer,
+ svn.svn_syncer,
+ ],
+ }
diff --git a/pkgcore/plugins/pkgcore_triggers.py b/pkgcore/plugins/pkgcore_triggers.py
new file mode 100644
index 0000000..7cca4f2
--- /dev/null
+++ b/pkgcore/plugins/pkgcore_triggers.py
@@ -0,0 +1,18 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.merge import triggers
+
+pkgcore_plugins = {
+ 'triggers':[
+ triggers.ldconfig,
+ triggers.merge,
+ triggers.unmerge,
+ triggers.fix_uid_perms,
+ triggers.fix_gid_perms,
+ triggers.fix_set_bits,
+ triggers.detect_world_writable,
+ triggers.InfoRegen,
+ triggers.CommonDirectoryModes,
+ ],
+ }
diff --git a/pkgcore/repository/__init__.py b/pkgcore/repository/__init__.py
new file mode 100644
index 0000000..4982be7
--- /dev/null
+++ b/pkgcore/repository/__init__.py
@@ -0,0 +1,6 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+repository subsystem
+"""
diff --git a/pkgcore/repository/configured.py b/pkgcore/repository/configured.py
new file mode 100644
index 0000000..1f2ba0e
--- /dev/null
+++ b/pkgcore/repository/configured.py
@@ -0,0 +1,62 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+wrap a repository, binding configuration to pkgs returned from the repository
+"""
+
+from pkgcore.repository import prototype
+from pkgcore.package.conditionals import make_wrapper
+from snakeoil.currying import partial
+from snakeoil.klass import GetAttrProxy
+
+
+class tree(prototype.tree):
+ configured = True
+
+ def __init__(self, raw_repo, wrapped_attrs):
+
+ """
+ @param raw_repo: repo to wrap
+ @type raw_repo: L{pkgcore.repository.prototype.tree}
+ @param wrapped_attrs: sequence of attrs to wrap for each pkg
+ """
+
+ # yes, we're intentionally not using tree's init.
+ # not perfect I know.
+ self.raw_repo = raw_repo
+ self.wrapped_attrs = wrapped_attrs
+ self.attr_filters = frozenset(wrapped_attrs.keys() +
+ [self.configurable])
+
+ self._klass = make_wrapper(self.configurable, self.wrapped_attrs)
+
+ def _get_pkg_kwds(self, pkg):
+ raise NotImplementedError()
+
+ def package_class(self, pkg, *a):
+ return self._klass(pkg, **self._get_pkg_kwds(pkg))
+
+ __getattr__ = GetAttrProxy("raw_repo")
+
+ def itermatch(self, restrict, **kwds):
+ kwds.setdefault("force", True)
+ o = kwds.get("pkg_klass_override")
+ if o is not None:
+ kwds["pkg_klass_override"] = partial(self.package_class, o)
+ else:
+ kwds["pkg_klass_override"] = self.package_class
+ return self.raw_repo.itermatch(restrict, **kwds)
+
+ itermatch.__doc__ = prototype.tree.itermatch.__doc__.replace(
+ "@param", "@keyword").replace("@keyword restrict:", "@param restrict:")
+
+ def __getitem__(self, key):
+ return self.package_class(self.raw_repo[key])
+
+ def __repr__(self):
+ return '<%s.%s raw_repo=%r wrapped=%r @%#8x>' % (
+ self.__class__.__module__, self.__class__.__name__,
+ getattr(self, 'raw_repo', 'unset'),
+ getattr(self, 'wrapped_attrs', {}).keys(),
+ id(self))
diff --git a/pkgcore/repository/errors.py b/pkgcore/repository/errors.py
new file mode 100644
index 0000000..0cf7b33
--- /dev/null
+++ b/pkgcore/repository/errors.py
@@ -0,0 +1,17 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+exceptions thrown by repository classes.
+
+Need to extend the usage a bit further still.
+"""
+
+class TreeCorruption(Exception):
+ def __init__(self, err):
+ Exception.__init__(self, "unexpected tree corruption: %s" % (err,))
+ self.err = err
+
+class InitializationError(TreeCorruption):
+ def __str__(self):
+ return "initialization failed: %s" % str(self.err)
diff --git a/pkgcore/repository/misc.py b/pkgcore/repository/misc.py
new file mode 100644
index 0000000..5be10b7
--- /dev/null
+++ b/pkgcore/repository/misc.py
@@ -0,0 +1,90 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.restrictions import packages
+from pkgcore.package.mutated import MutatedPkg
+from snakeoil.iterables import caching_iter
+from snakeoil.klass import GetAttrProxy
+
+__all__ = ("nodeps_repo", "caching_repo")
+
+class nodeps_repo(object):
+
+ """
+ repository wrapper that returns wrapped pkgs via
+ L{MutatedPkg} that have their depends/rdepends/post_rdepends wiped
+ """
+
+ default_depends = packages.AndRestriction(finalize=True)
+ default_rdepends = packages.AndRestriction(finalize=True)
+ default_post_rdepends = packages.AndRestriction(finalize=True)
+
+ def __init__(self, repo):
+ """
+ @param repo: repository to wrap
+ """
+ self.raw_repo = repo
+
+ def itermatch(self, *a, **kwds):
+ return (MutatedPkg(x,
+ overrides={"depends":self.default_depends,
+ "rdepends":self.default_rdepends,
+ "post_rdepends":self.default_post_rdepends})
+ for x in self.raw_repo.itermatch(*a, **kwds))
+
+ def match(self, *a, **kwds):
+ return list(self.itermatch(*a, **kwds))
+
+ __getattr__ = GetAttrProxy("raw_repo")
+
+ def __iter__(self):
+ return self.itermatch(packages.AlwaysTrue)
+
+
+class caching_repo(object):
+
+ """
+ repository wrapper that overrides match, returning
+ L{caching_iter} instances; itermatch is slaved to match,
+ in other words iterating over the caching_iter.
+
+ Main use for this is to cache results from query lookups;
+ if matches restrict arg is in the cache, the caller gets a shared
+ caching_iter sequence, which may already be fully loaded with pkg
+ instances.
+
+ This can boost random lookup time pretty nicely, while helping to
+ hold instance in memory to avoid redoing work.
+
+ Cost of this of course is that involved objects are forced to stay
+ in memory till the cache is cleared. General use, not usually what
+ you want- if you're making a lot of random queries that are duplicates
+ (resolver does this for example), caching helps.
+ """
+
+ def __init__(self, db, strategy):
+ """
+ @param db: an instance supporting the repository protocol to cache
+ queries from.
+ @param strategy: forced sorting strategy for results. If you don't
+ need sorting, pass in iter.
+ """
+ self.__db__ = db
+ self.__strategy__ = strategy
+ self.__cache__ = {}
+
+ def match(self, restrict):
+ v = self.__cache__.get(restrict)
+ if v is None:
+ v = self.__cache__[restrict] = \
+ caching_iter(self.__db__.itermatch(restrict,
+ sorter=self.__strategy__))
+ return v
+
+ def itermatch(self, restrict):
+ return iter(self.match(restrict))
+
+ __getattr__ = GetAttrProxy("__db__")
+
+ def clear(self):
+ self.__cache__.clear()
diff --git a/pkgcore/repository/multiplex.py b/pkgcore/repository/multiplex.py
new file mode 100644
index 0000000..b054d87
--- /dev/null
+++ b/pkgcore/repository/multiplex.py
@@ -0,0 +1,120 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+repository that combines multiple repositories together
+"""
+
+from operator import itemgetter
+from pkgcore.repository import prototype, errors
+from snakeoil.currying import partial
+from snakeoil.iterables import iter_sort
+
+class tree(prototype.tree):
+
+ """repository combining multiple repositories into one"""
+
+ zero_index_grabber = itemgetter(0)
+
+ def __init__(self, *trees):
+ """
+ @param trees: L{pkgcore.repository.prototype.tree} instances
+ to combines into one
+ """
+ super(tree, self).__init__()
+ for x in trees:
+ if not isinstance(x, prototype.tree):
+ raise errors.InitializationError(
+ "%s is not a repository tree derivative" % (x,))
+ self.trees = trees
+
+ def _get_categories(self, *optional_category):
+ d = set()
+ failures = 0
+ if optional_category:
+ optional_category = optional_category[0]
+ for x in self.trees:
+ try:
+ d.update(x.categories[optional_category])
+ except KeyError:
+ failures += 1
+ else:
+ for x in self.trees:
+ try:
+ map(d.add, x.categories)
+ except (errors.TreeCorruption, KeyError):
+ failures += 1
+ if failures == len(self.trees):
+ if optional_category:
+ raise KeyError("category base '%s' not found" %
+ str(optional_category))
+ raise KeyError("failed getting categories")
+ return tuple(d)
+
+ def _get_packages(self, category):
+ d = set()
+ failures = 0
+ for x in self.trees:
+ try:
+ d.update(x.packages[category])
+ except (errors.TreeCorruption, KeyError):
+ failures += 1
+ if failures == len(self.trees):
+ raise KeyError("category '%s' not found" % category)
+ return tuple(d)
+
+ def _get_versions(self, package):
+ d = set()
+ failures = 0
+ for x in self.trees:
+ try:
+ d.update(x.versions[package])
+ except (errors.TreeCorruption, KeyError):
+ failures += 1
+
+ if failures == len(self.trees):
+ raise KeyError("category '%s' not found" % package)
+ return tuple(d)
+
+ def itermatch(self, restrict, **kwds):
+ sorter = kwds.get("sorter", iter)
+ if sorter is iter:
+ return (match for repo in self.trees
+ for match in repo.itermatch(restrict, **kwds))
+ # ugly, and a bit slow, but works.
+ def f(x, y):
+ l = sorter([x, y])
+ if l[0] == y:
+ return 1
+ return -1
+ f = partial(sorted, cmp=f)
+ return iter_sort(f,
+ *[repo.itermatch(restrict, **kwds) for repo in self.trees])
+
+ itermatch.__doc__ = prototype.tree.itermatch.__doc__.replace(
+ "@param", "@keyword").replace("@keyword restrict:", "@param restrict:")
+
+ def __iter__(self):
+ return (pkg for repo in self.trees for pkg in repo)
+
+ def __len__(self):
+ return sum(len(repo) for repo in self.trees)
+
+ def __getitem__(self, key):
+ for t in self.trees:
+ try:
+ p = t[key]
+ return p
+ except KeyError:
+ pass
+ # made it here, no match.
+ raise KeyError("package %s not found" % key)
+
+ def __repr__(self):
+ return '<%s.%s trees=%r @%#8x>' % (
+ self.__class__.__module__, self.__class__.__name__,
+ getattr(self, 'trees', 'unset'),
+ id(self))
+
+ def _visibility_limiters(self):
+ return [x for r in self.trees for x in r.default_visibility_limiters]
diff --git a/pkgcore/repository/prototype.py b/pkgcore/repository/prototype.py
new file mode 100644
index 0000000..5676b49
--- /dev/null
+++ b/pkgcore/repository/prototype.py
@@ -0,0 +1,521 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+base repository template
+"""
+
+from pkgcore.ebuild.atom import atom
+from pkgcore.restrictions import values, boolean, restriction, packages
+from pkgcore.restrictions.util import collect_package_restrictions
+
+from snakeoil.mappings import LazyValDict, DictMixin
+from snakeoil.lists import iflatten_instance
+from snakeoil.compatibility import any
+
+class IterValLazyDict(LazyValDict):
+
+ def __str__(self):
+ return str(list(self))
+
+ def force_regen(self, key):
+ if key in self._vals:
+ del self._vals[key]
+ else:
+ self._keys = tuple(x for x in self._keys if x != key)
+
+
+class CategoryIterValLazyDict(IterValLazyDict):
+
+ def force_add(self, key):
+ if key not in self:
+ s = set(self._keys)
+ s.add(key)
+ self._keys = tuple(s)
+
+ def force_remove(self, key):
+ if key in self:
+ self._keys = tuple(x for x in self._keys if x != key)
+
+ __iter__ = IterValLazyDict.iterkeys
+
+ def __contains__(self, key):
+ if self._keys_func is not None:
+ return key in self.keys()
+ return key in self._keys
+
+
+class PackageMapping(DictMixin):
+
+ def __init__(self, parent_mapping, pull_vals):
+ self._cache = {}
+ self._parent = parent_mapping
+ self._pull_vals = pull_vals
+
+ def __getitem__(self, key):
+ o = self._cache.get(key)
+ if o is not None:
+ return o
+ if key not in self._parent:
+ raise KeyError(key)
+ self._cache[key] = vals = self._pull_vals(key)
+ return vals
+
+ def iterkeys(self):
+ return self._parent.iterkeys()
+
+ def __contains__(self, key):
+ return key in self._cache or key in self._parent
+
+ def force_regen(self, cat):
+ try:
+ del self._cache[cat]
+ except KeyError:
+ pass
+
+
+class VersionMapping(DictMixin):
+
+ def __init__(self, parent_mapping, pull_vals):
+ self._cache = {}
+ self._parent = parent_mapping
+ self._pull_vals = pull_vals
+ self._known_keys = {}
+ self._finalized = False
+
+ def __getitem__(self, key):
+ o = self._cache.get(key)
+ if o is not None:
+ return o
+ cat, pkg = key
+ known_pkgs = self._known_keys.get(cat)
+ if known_pkgs is None:
+ if self._finalized:
+ raise KeyError(key)
+ self._known_keys[cat] = known_pkgs = set(self._parent[cat])
+ if pkg not in known_pkgs:
+ raise KeyError(key)
+
+ val = self._pull_vals(key)
+ self._cache[key] = val
+ known_pkgs.remove(pkg)
+ return val
+
+ def iterkeys(self):
+ for key in self._cache:
+ yield key
+
+ if not self._finalized:
+ for cat, pkgs in self._parent.iteritems():
+ if cat in self._known_keys:
+ continue
+ s = set()
+ for pkg in pkgs:
+ if (cat, pkg) in self._cache:
+ continue
+ s.add(pkg)
+ self._known_keys[cat] = s
+ self._finalized = True
+
+ for cat, pkgs in self._known_keys.iteritems():
+ for pkg in list(pkgs):
+ yield cat, pkg
+
+ def force_regen(self, key, val):
+ if val:
+ self._cache[key] = val
+ else:
+ self._cache.pop(key, None)
+ self._known_keys.pop(key[0], None)
+
+
+class tree(object):
+ """
+ repository template
+
+ @ivar raw_repo: if wrapping a repo, set raw_repo per instance to it
+ @ivar livefs: boolean, set it to True if it's a repository representing
+ a livefs
+ @ivar package_class: callable to generate a package instance, must override
+ @ivar configured: if a repo is unusable for merging/unmerging
+ without being configured, set it to False
+ @ivar configure: if the repository isn't configured, must be a callable
+ yielding a configured form of the repository
+ """
+
+ raw_repo = None
+ livefs = False
+ package_class = None
+ configured = True
+ configure = None
+ syncable = False
+
+ def __init__(self, frozen=True):
+ """
+ @param frozen: controls whether the repository is mutable or immutable
+ """
+
+ self.categories = CategoryIterValLazyDict(
+ self._get_categories, self._get_categories)
+ self.packages = PackageMapping(self.categories,
+ self._get_packages)
+ self.versions = VersionMapping(self.packages, self._get_versions)
+
+ self.frozen = frozen
+ self.lock = None
+
+ def _get_categories(self, *args):
+ """this must return a list, or sequence"""
+ raise NotImplementedError(self, "_get_categories")
+
+ def _get_packages(self, category):
+ """this must return a list, or sequence"""
+ raise NotImplementedError(self, "_get_packages")
+
+ def _get_versions(self, package):
+ """this must return a list, or sequence"""
+ raise NotImplementedError(self, "_get_versions")
+
+ def __getitem__(self, cpv):
+ cpv_inst = self.package_class(*cpv)
+ if cpv_inst.fullver not in self.versions[(cpv_inst.category, cpv_inst.package)]:
+ del cpv_inst
+ raise KeyError(cpv)
+ return cpv_inst
+
+ def __setitem__(self, *vals):
+ raise AttributeError
+
+ def __delitem__(self, cpv):
+ raise AttributeError
+
+ def __iter__(self):
+ return self.itermatch(packages.AlwaysTrue)
+
+ def __len__(self):
+ return sum(len(v) for v in self.versions.itervalues())
+
+ def match(self, atom, **kwds):
+ return list(self.itermatch(atom, **kwds))
+
+ def itermatch(self, restrict, restrict_solutions=None, sorter=None,
+ pkg_klass_override=None, force=None, yield_none=False):
+
+ """
+ generator that yields packages match a restriction.
+
+ @type restrict : L{pkgcore.restrictions.packages.PackageRestriction}
+ instance
+ @param restrict: restriction to search via
+ @param restrict_solutions: cnf collapsed list of the restrict.
+ Don't play with it unless you know what you're doing
+ @param sorter: callable to do sorting during searching-
+ if sorting the results, use this instead of sorting externally.
+ @param yield_none: if True then itermatch will yield None for every
+ non-matching package. This is meant for use in combination with
+ C{twisted.task.cooperate} or other async uses where itermatch
+ should not wait many (wallclock) seconds between yielding
+ packages. If you override this method you should yield
+ None in long-running loops, strictly calling it for every package
+ is not necessary.
+ """
+
+ if not isinstance(restrict, restriction.base):
+ raise TypeError("restrict must be a "
+ "pkgcore.restriction.restrictions.base instance: "
+ "got %r" % (restrict,))
+
+ if sorter is None:
+ sorter = iter
+
+ if isinstance(restrict, atom):
+ candidates = [(restrict.category, restrict.package)]
+ else:
+ candidates = self._identify_candidates(restrict, sorter)
+
+ if force is None:
+ match = restrict.match
+ elif force:
+ match = restrict.force_True
+ else:
+ match = restrict.force_False
+ return self._internal_match(
+ candidates, match, sorter, pkg_klass_override,
+ yield_none=yield_none)
+
+ def _internal_gen_candidates(self, candidates, sorter):
+ pkls = self.package_class
+ for cp in candidates:
+ for pkg in sorter(pkls(cp[0], cp[1], ver)
+ for ver in self.versions.get(cp, ())):
+ yield pkg
+
+ def _internal_match(self, candidates, match_func, sorter,
+ pkg_klass_override, yield_none=False):
+ for pkg in self._internal_gen_candidates(candidates, sorter):
+ if pkg_klass_override is not None:
+ pkg = pkg_klass_override(pkg)
+
+ if match_func(pkg):
+ yield pkg
+ elif yield_none:
+ yield None
+
+ def _identify_candidates(self, restrict, sorter):
+ # full expansion
+
+ if not isinstance(restrict, boolean.base) or isinstance(restrict, atom):
+ return self._fast_identify_candidates(restrict, sorter)
+ dsolutions = [
+ ([c.restriction
+ for c in collect_package_restrictions(x, ["category"])],
+ [p.restriction
+ for p in collect_package_restrictions(x, ["package"])])
+ for x in restrict.iter_dnf_solutions(True)]
+
+ for x in dsolutions:
+ if not x[0] and not x[1]:
+ # great... one doesn't rely on cat/pkg.
+ if iter is sorter:
+ return self.versions
+ return (
+ (c,p)
+ for c in sorter(self.categories)
+ for p in sorter(self.packages.get(c, ())))
+ # simple cases first.
+ # if one specifies categories, and one doesn't
+ cat_specified = bool(dsolutions[0][0])
+ pkg_specified = bool(dsolutions[0][1])
+ pgetter = self.packages.get
+ if any(True for x in dsolutions[1:] if bool(x[0]) != cat_specified):
+ if any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified):
+ # merde. so we've got a mix- some specify cats, some
+ # don't, some specify pkgs, some don't.
+ # this may be optimizable
+ return self.versions
+ # ok. so... one doesn't specify a category, but they all
+ # specify packages (or don't)
+ pr = values.OrRestriction(*tuple(iflatten_instance(
+ (x[1] for x in dsolutions if x[1]), values.base)))
+ return ((c, p)
+ for c in sorter(self.categories)
+ for p in sorter(pgetter(c, [])) if pr.match(p))
+
+ elif any(True for x in dsolutions[1:] if bool(x[1]) != pkg_specified):
+ # one (or more) don't specify pkgs, but they all specify cats.
+ cr = values.OrRestriction(*tuple(iflatten_instance(
+ (x[0] for x in dsolutions), values.base)))
+ cats_iter = (c for c in sorter(self.categories) if cr.match(c))
+ return ((c, p)
+ for c in cats_iter for p in sorter(pgetter(c, [])))
+
+ return self._fast_identify_candidates(restrict, sorter)
+
+ def _fast_identify_candidates(self, restrict, sorter):
+ pkg_restrict = set()
+ cat_restrict = set()
+ cat_exact = set()
+ pkg_exact = set()
+
+ for x in collect_package_restrictions(restrict,
+ ["category", "package"]):
+ if x.attr == "category":
+ cat_restrict.add(x.restriction)
+ elif x.attr == "package":
+ pkg_restrict.add(x.restriction)
+
+ for e, s in ((pkg_exact, pkg_restrict), (cat_exact, cat_restrict)):
+ l = [x for x in s
+ if isinstance(x, values.StrExactMatch) and not x.negate]
+ s.difference_update(l)
+ e.update(x.exact for x in l)
+ del l
+
+ if cat_exact:
+ if not cat_restrict and len(cat_exact) == 1:
+ # Cannot use pop here, cat_exact is reused below.
+ c = iter(cat_exact).next()
+ if not pkg_restrict and len(pkg_exact) == 1:
+ cp = (c, pkg_exact.pop())
+ if cp in self.versions:
+ return [cp]
+ return []
+ cats_iter = [c]
+ else:
+ cat_restrict.add(values.ContainmentMatch(*cat_exact))
+ cats_iter = sorter(self._cat_filter(cat_restrict))
+ elif cat_restrict:
+ cats_iter = self._cat_filter(cat_restrict)
+ else:
+ cats_iter = sorter(self.categories)
+
+ if pkg_exact:
+ if not pkg_restrict:
+ if sorter is iter:
+ pkg_exact = tuple(pkg_exact)
+ else:
+ pkg_exact = sorter(pkg_exact)
+ return (
+ (c,p)
+ for c in cats_iter for p in pkg_exact)
+ else:
+ pkg_restrict.add(values.ContainmentMatch(*pkg_exact))
+
+ if pkg_restrict:
+ return self._package_filter(cats_iter, pkg_restrict)
+ elif not cat_restrict:
+ if sorter is iter and not cat_exact:
+ return self.versions
+ else:
+ return ((c, p) for c in
+ cats_iter for p in sorter(self.packages.get(c, ())))
+ return ((c, p)
+ for c in cats_iter for p in sorter(self.packages.get(c, ())))
+
+ def _cat_filter(self, cat_restricts):
+ cats = [x.match for x in cat_restricts]
+ for x in self.categories:
+ for match in cats:
+ if match(x):
+ yield x
+ break
+
+ def _package_filter(self, cats_iter, pkg_restricts):
+ restricts = [x.match for x in pkg_restricts]
+ pkgs_dict = self.packages
+ for cat in cats_iter:
+ for pkg in pkgs_dict.get(cat, ()):
+ for match in restricts:
+ if match(pkg):
+ yield (cat, pkg)
+ break
+
+ def notify_remove_package(self, pkg):
+ """
+ internal function,
+
+ notify the repository that a pkg it provides is being removed
+ """
+ ver_key = (pkg.category, pkg.package)
+ l = [x for x in self.versions[ver_key] if x != pkg.fullver]
+ if not l:
+ # dead package
+ wipe = list(self.packages[pkg.category]) == [pkg.package]
+ self.packages.force_regen(pkg.category)
+ if wipe:
+ self.categories.force_regen(pkg.category)
+ self.versions.force_regen(ver_key, tuple(l))
+
+ def notify_add_package(self, pkg):
+ """
+ internal function,
+
+ notify the repository that a pkg is being addeded to it
+ """
+ ver_key = (pkg.category, pkg.package)
+ s = set(self.versions.get(ver_key, ()))
+ s.add(pkg.fullver)
+ if pkg.category not in self.categories:
+ self.categories.force_add(pkg.category)
+ self.packages.force_regen(pkg.category)
+ self.versions.force_regen(ver_key, tuple(s))
+
+ def install(self, pkg, *a, **kw):
+ """
+ internal function, install a pkg to the repository
+
+ @param pkg: L{pkgcore.package.metadata.package} instance to install
+ @param a: passed to _install
+ @param kw: passed to _install
+ @raise AttributeError: if the repository is frozen (immutable)
+ @return: L{pkgcore.interfaces.repo.nonlivefs_install} or
+ L{pkgcore.interfaces.repo.livefs_install} instance
+ """
+ if not kw.pop('force', False) and self.frozen:
+ raise AttributeError("repo is frozen")
+ return self._install(pkg, *a, **kw)
+
+ def _install(self, pkg, *a, **kw):
+ """
+ internal install function- must be overrided in derivatives
+
+ @param pkg: L{pkgcore.package.metadata.package} instance to install
+ @param a: passed to _install
+ @param kw: passed to _install
+ @return: L{pkgcore.interfaces.repo.nonlivefs_install} or
+ L{pkgcore.interfaces.repo.livefs_install} instance
+ """
+ raise NotImplementedError(self, "_install")
+
+ def uninstall(self, pkg, *a, **kw):
+ """
+ internal function, uninstall a pkg from the repository
+
+ @param pkg: L{pkgcore.package.metadata.package} instance to install
+ @param a: passed to _install
+ @param kw: passed to _install
+ @raise AttributeError: if the repository is frozen (immutable)
+ @return: L{pkgcore.interfaces.repo.nonlivefs_uninstall} or
+ L{pkgcore.interfaces.repo.livefs_uninstall} instance
+ """
+ if self.frozen and not kw.pop("force", False):
+ raise AttributeError("repo is frozen")
+ return self._uninstall(pkg, *a, **kw)
+
+ def _uninstall(self, pkg, *a, **kw):
+ """
+ internal uninstall function- must be overrided in derivatives
+
+ @param pkg: L{pkgcore.package.metadata.package} instance to install
+ @param a: passed to _install
+ @param kw: passed to _install
+ @return: L{pkgcore.interfaces.repo.nonlivefs_uninstall} or
+ L{pkgcore.interfaces.repo.livefs_uninstall} instance
+ """
+ raise NotImplementedError(self, "_uninstall")
+
+ def replace(self, orig, new, *a, **kw):
+ """
+ internal function, replace a pkg in the repository with another
+
+ @param orig: L{pkgcore.package.metadata.package} instance to install,
+ must be from this repository instance
+ @param new: L{pkgcore.package.metadata.package} instance to install
+ @param a: passed to _install
+ @param kw: passed to _install
+ @raise AttributeError: if the repository is frozen (immutable)
+ @return: L{pkgcore.interfaces.repo.nonlivefs_replace} or
+ L{pkgcore.interfaces.repo.livefs_replace} instance
+ """
+ if self.frozen and not kw.pop("force", False):
+ raise AttributeError("repo is frozen")
+ return self._replace(orig, new, *a, **kw)
+
+ def _replace(self, orig, new, *a, **kw):
+ """
+ internal replace function- must be overrided in derivatives
+
+ @param orig: L{pkgcore.package.metadata.package} instance to install,
+ must be from this repository instance
+ @param new: L{pkgcore.package.metadata.package} instance to install
+ @param a: passed to _install
+ @param kw: passed to _install
+ @return: L{pkgcore.interfaces.repo.nonlivefs_replace} or
+ L{pkgcore.interfaces.repo.livefs_replace} instance
+ """
+ raise NotImplementedError(self, "_replace")
+
+ def __nonzero__(self):
+ try:
+ iter(self.versions).next()
+ return True
+ except StopIteration:
+ return False
+
+ @property
+ def default_visibility_limiters(self):
+ # designed this way to allow for easy override
+ return self._visibility_limiters()
+
+ def _visibility_limiters(self):
+ return []
diff --git a/pkgcore/repository/syncable.py b/pkgcore/repository/syncable.py
new file mode 100644
index 0000000..3fb532f
--- /dev/null
+++ b/pkgcore/repository/syncable.py
@@ -0,0 +1,20 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.sync import base
+
+class tree_mixin(object):
+
+ def __init__(self, sync=None):
+ self._sync = sync
+
+ def sync(self, status_obj=None, force=False):
+ # often enough, the syncer is a lazy_ref
+ syncer = self._sync
+ if not isinstance(syncer, base.syncer):
+ syncer = syncer.instantiate()
+ return syncer.sync(force=force)
+
+ @property
+ def syncable(self):
+ return self._sync is not None
diff --git a/pkgcore/repository/util.py b/pkgcore/repository/util.py
new file mode 100644
index 0000000..4a5cb31
--- /dev/null
+++ b/pkgcore/repository/util.py
@@ -0,0 +1,41 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.repository.prototype import tree
+from pkgcore.ebuild.cpv import CPV
+
+class SimpleTree(tree):
+
+ def __init__(self, cpv_dict, pkg_klass=None):
+ self.cpv_dict = cpv_dict
+ if pkg_klass is None:
+ pkg_klass = CPV
+ self.package_class = pkg_klass
+ tree.__init__(self)
+
+ def _get_categories(self, *arg):
+ if arg:
+ return ()
+ return tuple(self.cpv_dict.iterkeys())
+
+ def _get_packages(self, category):
+ return tuple(self.cpv_dict[category].iterkeys())
+
+ def _get_versions(self, cp_key):
+ return tuple(self.cpv_dict[cp_key[0]][cp_key[1]])
+
+ def notify_remove_package(self, pkg):
+ vers = self.cpv_dict[pkg.category][pkg.package]
+ vers = [x for x in vers if x != pkg.fullver]
+ if vers:
+ self.cpv_dict[pkg.category][pkg.package] = vers
+ else:
+ del self.cpv_dict[pkg.category][pkg.package]
+ if not self.cpv_dict[pkg.category]:
+ del self.cpv_dict[pkg.category]
+ tree.notify_remove_package(self, pkg)
+
+ def notify_add_package(self, pkg):
+ self.cpv_dict.setdefault(pkg.category,
+ {}).setdefault(pkg.package, []).append(pkg.fullver)
+ tree.notify_add_package(self, pkg)
diff --git a/pkgcore/repository/virtual.py b/pkgcore/repository/virtual.py
new file mode 100644
index 0000000..5333294
--- /dev/null
+++ b/pkgcore/repository/virtual.py
@@ -0,0 +1,51 @@
+# Copyright: 2005-2006 Brian harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+virtual repository, pkgs generated via callable
+"""
+
+from pkgcore.repository import prototype
+from pkgcore.package import virtual
+from snakeoil.currying import partial
+
+
+class tree(prototype.tree):
+
+ factory_kls = staticmethod(virtual.factory)
+
+ def __init__(self, livefs=False):
+ """
+ @param grab_virtuals_func: callable to get a package -> versions mapping
+ @param livefs: is this a livefs repository?
+ """
+ prototype.tree.__init__(self)
+ self.livefs = livefs
+ vf = self.factory_kls(self)
+ self.package_class = vf.new_package
+
+ def _expand_vers(self, cp, ver):
+ raise NotImplementedError(self, "_expand_vers")
+
+ def _internal_gen_candidates(self, candidates, sorter):
+ pkls = self.package_class
+ for cp in candidates:
+ for pkg in sorter(pkls(provider, cp[0], cp[1], ver)
+ for ver in self.versions.get(cp, ())
+ for provider in self._expand_vers(cp, ver)):
+ yield pkg
+
+ def _get_categories(self, *optional_category):
+ # return if optional_category is passed... cause it's not yet supported
+ if optional_category:
+ return ()
+ return ("virtual",)
+
+ def _load_data(self):
+ raise NotImplementedError(self, "_load_data")
+
+ def _get_packages(self, category):
+ if category != "virtual":
+ raise KeyError("no %s category for this repository" % category)
+ self._load_data()
+ return self.packages[category]
diff --git a/pkgcore/repository/visibility.py b/pkgcore/repository/visibility.py
new file mode 100644
index 0000000..ddb1fae
--- /dev/null
+++ b/pkgcore/repository/visibility.py
@@ -0,0 +1,64 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+filtering repository
+"""
+
+# icky.
+# ~harring
+from pkgcore.repository import prototype, errors
+from pkgcore.restrictions.restriction import base
+from snakeoil.klass import GetAttrProxy
+
+class filterTree(prototype.tree):
+
+ """Filter existing repository based upon passed in restrictions."""
+
+ def __init__(self, repo, restriction, sentinel_val=False):
+ self.raw_repo = repo
+ self.sentinel_val = sentinel_val
+ if not isinstance(self.raw_repo, prototype.tree):
+ raise errors.InitializationError(
+ "%s is not a repository tree derivative" % (self.raw_repo,))
+ if not isinstance(restriction, base):
+ raise errors.InitializationError(
+ "%s is not a restriction" % (restriction,))
+ self.restriction = restriction
+ self.raw_repo = repo
+
+ def itermatch(self, restrict, **kwds):
+ # note that this lets the repo do the initial filtering.
+ # better design would to analyze the restrictions, and inspect
+ # the repo, determine what can be done without cost
+ # (determined by repo's attributes) versus what does cost
+ # (metadata pull for example).
+ for cpv in self.raw_repo.itermatch(restrict, **kwds):
+ if self.restriction.match(cpv) == self.sentinel_val:
+ yield cpv
+
+
+ itermatch.__doc__ = prototype.tree.itermatch.__doc__.replace(
+ "@param", "@keyword").replace("@keyword restrict:", "@param restrict:")
+
+ def __len__(self):
+ count = 0
+ for i in self:
+ count += 1
+ return count
+
+ __getattr__ = GetAttrProxy("raw_repo")
+
+ def __getitem__(self, key):
+ v = self.raw_repo[key]
+ if self.restriction.match(v) != self.sentinel_val:
+ raise KeyError(key)
+ return v
+
+ def __repr__(self):
+ return '<%s raw_repo=%r restriction=%r sentinel=%r @%#8x>' % (
+ self.__class__.__name__,
+ getattr(self, 'raw_repo', 'unset'),
+ getattr(self, 'restriction', 'unset'),
+ getattr(self, 'sentinel_val', 'unset'),
+ id(self))
diff --git a/pkgcore/repository/wrapper.py b/pkgcore/repository/wrapper.py
new file mode 100644
index 0000000..8d4ce90
--- /dev/null
+++ b/pkgcore/repository/wrapper.py
@@ -0,0 +1,36 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+simple repository wrapping to override the package instances returned
+"""
+
+# icky.
+# ~harring
+from pkgcore.repository import prototype, errors
+from snakeoil.klass import GetAttrProxy
+from itertools import imap
+
+class tree(prototype.tree):
+
+ """wrap an existing repository yielding wrapped packages."""
+
+ def __init__(self, repo, package_class):
+ """
+ @param repo: L{pkgcore.repository.prototype.tree} instance to wrap
+ @param package_class: callable to yield the package instance
+ """
+ self.raw_repo = repo
+ if not isinstance(self.raw_repo, prototype.tree):
+ raise errors.InitializationError(
+ "%s is not a repository tree derivative" % (self.raw_repo,))
+ self.package_class = package_class
+ self.raw_repo = repo
+
+ def itermatch(self, *args, **kwargs):
+ return imap(self.package_class, self.raw_repo.itermatch(*args, **kwargs))
+
+ __getattr__ = GetAttrProxy("raw_repo")
+
+ def __len__(self):
+ return len(self.raw_repo)
diff --git a/pkgcore/resolver/__init__.py b/pkgcore/resolver/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkgcore/resolver/__init__.py
diff --git a/pkgcore/resolver/choice_point.py b/pkgcore/resolver/choice_point.py
new file mode 100644
index 0000000..68cbf0b
--- /dev/null
+++ b/pkgcore/resolver/choice_point.py
@@ -0,0 +1,155 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+
+from snakeoil.lists import iter_stable_unique
+
+class choice_point(object):
+
+ __slots__ = (
+ "__weakref__", "atom", "matches", "matches_cur", "solution_filters",
+ "_prdeps", "_rdeps", "_deps", "_provides")
+
+ def __init__(self, a, matches):
+ self.atom = a
+ self.matches = iter(matches)
+ self.matches_cur = None
+ self.solution_filters = set()
+ # match solutions, remaining
+ self._deps = None
+ self._rdeps = None
+ self._prdeps = None
+ self._provides = None
+
+ @property
+ def state(self):
+ m = self.matches_cur
+ return (len(self.solution_filters),
+ m.repo, m,
+ self.matches,
+ self._deps,
+ self._rdeps,
+ self._prdeps)
+
+ @staticmethod
+ def _filter_choices(cnf_reqs, filterset):
+ for choices in cnf_reqs:
+ l = [x for x in choices if x not in filterset]
+ if not l:
+ return
+ yield l
+
+ def _internal_force_next(self):
+ """
+ force next pkg without triggering a reduce_atoms call
+ @return: True if pkgs remain, False if no more remain
+ """
+ for self.matches_cur in self.matches:
+ self._reset_iters()
+ return True
+ self.matches_cur = self.matches = None
+ return False
+
+ def reduce_atoms(self, atom):
+
+ if self.matches is None:
+ raise IndexError("no solutions remain")
+ if hasattr(atom, "__contains__") and not isinstance(atom, basestring):
+ self.solution_filters.update(atom)
+ else:
+ self.solution_filters.add(atom)
+
+ filterset = self.solution_filters
+ if self.matches_cur is None:
+ if not self._internal_force_next():
+ return True
+
+ round = -1
+ while True:
+ round += 1
+ if round:
+ if not self._internal_force_next():
+ return True
+
+ for depset_name in ("_deps", "_rdeps", "_prdeps"):
+ depset = getattr(self, depset_name)
+ reqs = list(self._filter_choices(depset, filterset))
+ if len(reqs) != len(depset):
+ break
+ setattr(self, depset_name, reqs)
+ else:
+ return round > 0
+
+ def _reset_iters(self):
+ cur = self.matches_cur
+ self._deps = cur.depends.cnf_solutions()
+ self._rdeps = cur.rdepends.cnf_solutions()
+ self._prdeps = cur.post_rdepends.cnf_solutions()
+ self._provides = tuple(iter_stable_unique(cur.provides))
+
+ @property
+ def slot(self):
+ return self.current_pkg.slot
+
+ @property
+ def key(self):
+ return self.current_pkg.key
+
+ @property
+ def current_pkg(self):
+ if self.matches_cur is None:
+ if self.matches is None:
+ raise IndexError("no packages remain")
+ for self.matches_cur in self.matches:
+ break
+ else:
+ self.matches = None
+ raise IndexError("no more packages remain")
+ self._reset_iters()
+ return self.matches_cur
+
+ def force_next_pkg(self):
+ if self.matches is None:
+ return False
+ for self.matches_cur in self.matches:
+ break
+ else:
+ self.matches_cur = self.matches = None
+ return False
+ return self.reduce_atoms([])
+
+ @property
+ def depends(self):
+ if not self:
+ raise IndexError("no more solutions remain")
+ return self._deps
+
+ @property
+ def rdepends(self):
+ if not self:
+ raise IndexError("no more solutions remain")
+ return self._rdeps
+
+ @property
+ def post_rdepends(self):
+ if not self:
+ raise IndexError("no more solutions remain")
+ return self._prdeps
+
+ @property
+ def provides(self):
+ if not self:
+ raise IndexError("no more solutions remain")
+ return self._provides
+
+ def __nonzero__(self):
+ if self.matches_cur is None:
+ if self.matches is None:
+ return False
+ for self.matches_cur in self.matches:
+ break
+ else:
+ self.matches = None
+ return False
+ self._reset_iters()
+ return True
diff --git a/pkgcore/resolver/pigeonholes.py b/pkgcore/resolver/pigeonholes.py
new file mode 100644
index 0000000..057dc82
--- /dev/null
+++ b/pkgcore/resolver/pigeonholes.py
@@ -0,0 +1,82 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.restrictions import restriction
+
+# lil too getter/setter like for my tastes...
+
+class PigeonHoledSlots(object):
+ """class for tracking slotting to a specific atom/obj key
+ no atoms present, just prevents conflicts of obj.key; atom present, assumes
+ it's a blocker and ensures no obj matches the atom for that key
+ """
+
+ def __init__(self):
+ self.slot_dict = {}
+ self.limiters = {}
+
+ def fill_slotting(self, obj, force=False):
+ """Try to insert obj in.
+
+ @return: any conflicting objs (empty list if inserted successfully).
+ """
+ key = obj.key
+ l = [x for x in self.limiters.get(key, ()) if x.match(obj)]
+
+ dslot = obj.slot
+ l.extend(x for x in self.slot_dict.get(key, ()) if x.slot == dslot)
+
+ if not l or force:
+ self.slot_dict.setdefault(key, []).append(obj)
+ return l
+
+
+ def get_conflicting_slot(self, pkg):
+ for x in self.slot_dict.get(pkg.key, ()):
+ if pkg.slot == x.slot:
+ return x
+ return None
+
+ def find_atom_matches(self, atom, key=None):
+ if key is None:
+ key = atom.key
+ return filter(atom.match, self.slot_dict.get(key, ()))
+
+ def add_limiter(self, atom, key=None):
+ """add a limiter, returning any conflicting objs"""
+ if not isinstance(atom, restriction.base):
+ raise TypeError("atom must be a restriction.base derivative: "
+ "got %r, key=%r" % (atom, key))
+ # debug.
+
+ if key is None:
+ key = atom.key
+ self.limiters.setdefault(key, []).append(atom)
+ return filter(atom.match, self.slot_dict.get(key, ()))
+
+ def remove_slotting(self, obj):
+ key = obj.key
+ # let the key error be thrown if they screwed up.
+ l = [x for x in self.slot_dict[key] if x is not obj]
+ if len(l) == len(self.slot_dict[key]):
+ raise KeyError("obj %s isn't slotted" % obj)
+ if l:
+ self.slot_dict[key] = l
+ else:
+ del self.slot_dict[key]
+
+ def remove_limiter(self, atom, key=None):
+ if key is None:
+ key = atom.key
+ l = [x for x in self.limiters[key] if x is not atom]
+ if len(l) == len(self.limiters[key]):
+ raise KeyError("obj %s isn't slotted" % atom)
+ if not l:
+ del self.limiters[key]
+ else:
+ self.limiters[key] = l
+
+ def __contains__(self, obj):
+ if isinstance(obj, restriction.base):
+ return obj in self.limiters.get(obj.key, ())
+ return obj in self.slot_dict.get(obj.key, ())
diff --git a/pkgcore/resolver/plan.py b/pkgcore/resolver/plan.py
new file mode 100644
index 0000000..764c253
--- /dev/null
+++ b/pkgcore/resolver/plan.py
@@ -0,0 +1,877 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+import operator
+from itertools import chain, islice, ifilterfalse
+from collections import deque
+
+from pkgcore.resolver.choice_point import choice_point
+from pkgcore.restrictions import packages, values, restriction
+from pkgcore.repository.misc import caching_repo
+from pkgcore.resolver import state
+
+from snakeoil.currying import partial, post_curry
+from snakeoil.compatibility import any
+from snakeoil.iterables import caching_iter, iter_sort
+
+
+limiters = set(["cycle"])#, None])
+def dprint(fmt, args=None, label=None):
+ if None in limiters or label in limiters:
+ if args is None:
+ print fmt
+ else:
+ print fmt % args
+
+
+#iter/pkg sorting functions for selection strategy
+pkg_sort_highest = partial(sorted, reverse=True)
+pkg_sort_lowest = sorted
+
+pkg_grabber = operator.itemgetter(0)
+
+def highest_iter_sort(l, pkg_grabber=pkg_grabber):
+ def f(x, y):
+ c = cmp(x, y)
+ if c:
+ return c
+ elif x.repo.livefs:
+ if y.repo.livefs:
+ return 0
+ return 1
+ elif y.repo.livefs:
+ return -1
+ return 0
+ l.sort(f, key=pkg_grabber, reverse=True)
+ return l
+
+
+def lowest_iter_sort(l, pkg_grabber=pkg_grabber):
+ def f(x, y):
+ c = cmp(x, y)
+ if c:
+ return c
+ elif x.repo.livefs:
+ if y.repo.livefs:
+ return 0
+ return -1
+ elif y.repo.livefs:
+ return 1
+ return 0
+ l.sort(f, key=pkg_grabber)
+ return l
+
+
+class resolver_frame(object):
+
+ __slots__ = ("parent", "atom", "choices", "mode", "start_point", "dbs",
+ "depth", "drop_cycles", "__weakref__", "ignored", "vdb_limited",
+ "events", "succeeded")
+
+ def __init__(self, parent, mode, atom, choices, dbs, start_point, depth,
+ drop_cycles, ignored=False, vdb_limited=False):
+ self.parent = parent
+ self.atom = atom
+ self.choices = choices
+ self.dbs = dbs
+ self.mode = mode
+ self.start_point = start_point
+ self.depth = depth
+ self.drop_cycles = drop_cycles
+ self.ignored = False
+ self.vdb_limited = vdb_limited
+ self.events = []
+ self.succeeded = None
+
+ def reduce_solutions(self, nodes):
+ self.events.append(("reduce", nodes))
+ return self.choices.reduce_atoms(nodes)
+
+ def __str__(self):
+ pkg = self.current_pkg
+ if pkg is None:
+ pkg = "exhausted"
+ else:
+ cpv = pkg.cpvstr
+ pkg = getattr(pkg.repo, 'repo_id', None)
+ if pkg is not None:
+ pkg = "%s::%s" % (cpv, pkg)
+ else:
+ pkg = str(pkg)
+ if self.succeeded is not None:
+ result = ": %s" % (self.succeeded and "succeeded" or "failed")
+ else:
+ result = ""
+ return "frame%s: mode %r: atom %s: current %s%s%s%s" % \
+ (result, self.mode, self.atom, pkg,
+ self.drop_cycles and ": cycle dropping" or '',
+ self.ignored and ": ignored" or '',
+ self.vdb_limited and ": vdb limited" or '')
+
+ @property
+ def current_pkg(self):
+ try:
+ return self.choices.current_pkg
+ except IndexError:
+ return None
+
+
+class resolver_stack(deque):
+
+ frame_klass = resolver_frame
+ depth = property(len)
+ current_frame = property(operator.itemgetter(-1))
+ filter_ignored = staticmethod(
+ partial(ifilterfalse, operator.attrgetter("ignored")))
+
+ # this *has* to be a property, else it creates a cycle.
+ parent = property(lambda s:s)
+
+ def __init__(self):
+ self.events = []
+
+ def __str__(self):
+ return 'resolver stack:\n %s' % '\n '.join(str(x) for x in self)
+
+ def __repr__(self):
+ return '<%s: %r>' % (self.__class__.__name__,
+ tuple(repr(x) for x in self))
+
+ def add_frame(self, mode, atom, choices, dbs, start_point, drop_cycles, vdb_limited=False):
+ if not self:
+ parent = self
+ else:
+ parent = self[-1]
+ frame = self.frame_klass(parent, mode, atom, choices, dbs, start_point,
+ self.depth + 1, drop_cycles, vdb_limited=vdb_limited)
+ self.append(frame)
+ return frame
+
+ def add_event(self, event):
+ if not self:
+ self.events.append(event)
+ else:
+ self[-1].events.append(event)
+
+ def pop_frame(self, result):
+ frame = self.pop()
+ frame.succeeded = bool(result)
+ frame.parent.events.append(frame)
+
+ def will_cycle(self, atom, cur_choice, attr, start=0):
+ # short cut...
+ if attr == "post_rdepends":
+ # not possible for a cycle we'll care about to exist.
+ # the 'cut off' point is for the new atom, thus not possible for
+ # a cycle.
+ return -1
+
+ cycle_start = -1
+ if start != 0:
+ i = islice(self, start, None)
+ else:
+ i = self
+ for idx, x in enumerate(i):
+ if x.mode == "post_rdepends":
+ cycle_start = -1
+ if x.atom == atom:
+ cycle_start = idx
+
+ if cycle_start != -1:
+ # deque can't be sliced, thus islice
+ if attr is not None:
+ s = ', '.join('[%s: %s]' % (x.atom, x.current_pkg) for x in
+ islice(self, cycle_start))
+ if s:
+ s += ', '
+ s += '[%s: %s]' % (atom, cur_choice.current_pkg)
+ dprint("%s level cycle: stack: %s\n",
+ (attr, s), "cycle")
+ return cycle_start + start
+ return -1
+
+ def pkg_cycles(self, trg_frame, **kwds):
+ pkg = trg_frame
+ return (frame for frame in self._cycles(trg_frame, skip_trg_frame=True,
+ **kwds)
+ if pkg == frame.current_pkg)
+
+ def atom_cycles(self, trg_frame, **kwds):
+ atom = trg_frame.atom
+ return (frame for frame in self._cycles(trg_frame, skip_trg_frame=True,
+ **kwds)
+ if atom == frame.atom)
+
+ def slot_cycles(self, trg_frame, **kwds):
+ pkg = trg_frame.current_pkg
+ slot = pkg.slot
+ key = pkg.key
+ return (frame for frame in self._cycles(trg_frame, skip_trg_frame=True,
+ **kwds)
+ if key == frame.current_pkg.key and slot == frame.current_pkg.slot)
+
+ def _cycles(self, trg_frame, start=0, reverse=False, skip_trg_frame=True):
+ i = self.filter_ignored(self)
+ if reverse:
+ i = self.filter_ignored(reversed(self))
+ else:
+ i = self.filter_ignored(self)
+ if start != 0:
+ i = islice(i, start, None)
+ if skip_trg_frame:
+ return (frame for frame in i if frame is not trg_frame)
+ return i
+
+ def index(self, frame, start=0, stop=None):
+ if start != 0 or stop is not None:
+ i = slice(self, start, stop)
+ else:
+ i = self
+ for idx, x in enumerate(self):
+ if x == frame:
+ return idx + start
+ return -1
+
+
+class merge_plan(object):
+
+ vdb_restrict = packages.PackageRestriction("repo.livefs",
+ values.EqualityMatch(True))
+
+ def __init__(self, dbs, per_repo_strategy,
+ global_strategy=None,
+ depset_reorder_strategy=None,
+ process_built_depends=False,
+ drop_cycles=False, debug=False):
+
+ if not isinstance(dbs, (list, tuple)):
+ dbs = [dbs]
+
+ if global_strategy is None:
+ global_strategy = self.default_global_strategy
+
+ if depset_reorder_strategy is None:
+ depset_reorder_strategy = self.default_depset_reorder_strategy
+
+ self.depset_reorder = depset_reorder_strategy
+ self.per_repo_strategy = per_repo_strategy
+ self.global_strategy = global_strategy
+ self.forced_atoms = set()
+ self.all_dbs = [caching_repo(x, self.per_repo_strategy) for x in dbs]
+ self.livefs_dbs = [x for x in self.all_dbs if x.livefs]
+ self.dbs = [x for x in self.all_dbs if not x.livefs]
+ self.state = state.plan_state()
+ self.insoluble = set()
+ self.vdb_preloaded = False
+ self.drop_cycles = drop_cycles
+ self.process_built_depends = process_built_depends
+ if debug:
+ self._rec_add_atom = partial(self._stack_debugging_rec_add_atom,
+ self._rec_add_atom)
+ self._debugging_depth = 0
+ self._debugging_drop_cycles = False
+
+ def notify_starting_mode(self, mode, stack):
+ if mode == "post_rdepends":
+ mode = 'prdepends'
+ dprint("%s:%s%s: started: %s" %
+ (mode, ' ' * ((stack.current_frame.depth * 2) + 12 - len(mode)),
+ stack.current_frame.atom,
+ stack.current_frame.choices.current_pkg)
+ )
+
+ def notify_trying_choice(self, stack, atom, choices):
+ dprint("choose for %s%s, %s",
+ (stack.depth *2*" ", atom, choices.current_pkg))
+ stack.add_event(('inspecting', choices.current_pkg))
+
+ def notify_choice_failed(self, stack, atom, choices, msg, msg_args=()):
+ if msg:
+ msg = ': %s' % (msg % msg_args)
+ dprint("choice for %s%s, %s failed%s",
+ (stack.depth * 2 * ' ', atom, choices.current_pkg, msg))
+# stack[-1].events.append("failed")
+
+ def notify_choice_succeeded(self, stack, atom, choices, msg='', msg_args=()):
+ if msg:
+ msg = ': %s' % (msg % msg_args)
+ dprint("choice for %s%s, %s succeeded%s",
+ (stack.depth * 2 * ' ', atom, choices.current_pkg, msg))
+# stack[-1].events.append("succeeded")
+
+ def load_vdb_state(self):
+ for r in self.livefs_dbs:
+ for pkg in r.__db__:
+ dprint("inserting %s from %s", (pkg, r), "vdb")
+ ret = self.add_atom(pkg.versioned_atom, dbs=self.livefs_dbs)
+ dprint("insertion of %s from %s: %s", (pkg, r, ret), "vdb")
+ if ret:
+ raise Exception(
+ "couldn't load vdb state, %s %s" %
+ (pkg.versioned_atom, ret))
+ self.vdb_preloaded = True
+
+ def add_atom(self, atom, dbs=None):
+ """add an atom, recalculating as necessary.
+
+ @return: the last unresolvable atom stack if a solution can't be found,
+ else returns None if the atom was successfully added.
+ """
+ if dbs is None:
+ dbs = self.all_dbs
+ if atom not in self.forced_atoms:
+ stack = resolver_stack()
+ ret = self._rec_add_atom(atom, stack, dbs)
+ if ret:
+ dprint("failed- %s", ret)
+ return ret, stack.events[0]
+ else:
+ self.forced_atoms.add(atom)
+
+ return ()
+
+ def check_for_cycles(self, stack, cur_frame):
+ """check the current stack for cyclical issues;
+ @param stack: current stack, a L{resolver_stack} instance
+ @param cur_frame: current frame, a L{resolver_frame} instance
+ @return: True if no issues and resolution should continue, else the
+ value to return after collapsing the calling frame
+ """
+ force_vdb = False
+ for frame in stack.slot_cycles(cur_frame, reverse=True):
+ if True:
+ # exact same pkg.
+ if frame.mode == 'depends':
+ # ok, we *must* go vdb if not already.
+ if frame.current_pkg.repo.livefs:
+ if cur_frame.current_pkg.repo.livefs:
+ return None
+ # force it to vdb.
+ if cur_frame.current_pkg.repo.livefs:
+ return True
+ elif cur_frame.current_pkg == frame.current_pkg and \
+ cur_frame.mode == 'post_rdepends':
+ # if non vdb and it's a post_rdeps cycle for the cur
+ # node, exempt it; assuming the stack succeeds,
+ # it's satisfied
+ return True
+ force_vdb = True
+ break
+ else:
+ # should be doing a full walk of the cycle here, seeing
+ # if an rdep becomes a dep.
+ return None
+ # portage::gentoo -> rysnc -> portage::vdb; let it process it.
+ return True
+ # only need to look at the most recent match; reasoning is simple,
+ # logic above forces it to vdb if needed.
+ break
+ if not force_vdb:
+ return True
+ # we already know the current pkg isn't livefs; force livefs to
+ # sidestep this.
+ cur_frame.parent.events.append(("cycle", frame, cur_frame, "limiting to vdb"))
+ cur_frame.ignored = True
+ return self._rec_add_atom(cur_frame.atom, stack,
+ self.livefs_dbs, mode=cur_frame.mode,
+ drop_cycles = cur_frame.drop_cycles)
+
+
+ def process_dependencies(self, stack, attr, depset):
+ failure = []
+ additions, blocks, = [], []
+ cur_frame = stack.current_frame
+ self.notify_starting_mode(attr, stack)
+ for potentials in depset:
+ failure = []
+ for or_node in potentials:
+ if or_node.blocks:
+ blocks.append(or_node)
+ break
+ failure = self._rec_add_atom(or_node, stack,
+ cur_frame.dbs, mode=attr,
+ drop_cycles=cur_frame.drop_cycles)
+ if failure:
+ # XXX this is whacky tacky fantastically crappy
+ # XXX kill it; purpose seems... questionable.
+ if failure and cur_frame.drop_cycles:
+ dprint("%s level cycle: %s: "
+ "dropping cycle for %s from %s",
+ (attr, cur_frame.atom, datom,
+ cur_frame.current_pkg),
+ "cycle")
+ failure = None
+ break
+
+ if cur_frame.reduce_solutions(or_node):
+ # pkg changed.
+ return [failure]
+ continue
+ additions.append(or_node)
+ break
+ else: # didn't find any solutions to this or block.
+ cur_frame.reduce_solutions(potentials)
+ return [potentials]
+ else: # all potentials were usable.
+ return additions, blocks
+
+ def insert_choice(self, atom, stack, choices):
+ # well, we got ourselvs a resolution.
+ # do a trick to make the resolver now aware of vdb pkgs if needed
+ if not self.vdb_preloaded and not choices.current_pkg.repo.livefs:
+ slotted_atom = choices.current_pkg.slotted_atom
+ l = self.state.match_atom(slotted_atom)
+ if not l:
+ # hmm. ok... no conflicts, so we insert in vdb matches
+ # to trigger a replace instead of an install
+ for repo in self.livefs_dbs:
+ m = repo.match(slotted_atom)
+ if m:
+ c = choice_point(slotted_atom, m)
+ state.add_op(c, c.current_pkg, force=True).apply(self.state)
+ break
+
+ # first, check for conflicts.
+ # lil bit fugly, but works for the moment
+ conflicts = state.add_op(choices, choices.current_pkg).apply(self.state)
+ if conflicts:
+ # this means in this branch of resolution, someone slipped
+ # something in already. cycle, basically.
+ # hack. see if what was insert is enough for us.
+
+ # this is tricky... if it's the same node inserted
+ # (cycle), then we ignore it; this does *not* perfectly
+ # behave though, doesn't discern between repos.
+
+ if (len(conflicts) == 1 and conflicts[0] == choices.current_pkg and
+ conflicts[0].repo.livefs == choices.current_pkg.repo.livefs and
+ atom.match(conflicts[0])):
+
+ # early exit. means that a cycle came about, but exact
+ # same result slipped through.
+ return False
+
+ dprint("was trying to insert atom '%s' pkg '%s',\n"
+ "but '[%s]' exists already",
+ (atom, choices.current_pkg,
+ ", ".join(map(str, conflicts))))
+
+ try_rematch = False
+ if any(True for x in conflicts if isinstance(x, restriction.base)):
+ # blocker was caught
+ try_rematch = True
+ elif not any (True for x in conflicts if not
+ self.vdb_restrict.match(x)):
+ # vdb entry, replace.
+ if self.vdb_restrict.match(choices.current_pkg):
+ # we're replacing a vdb entry with a vdb entry? wtf.
+ print ("internal weirdness spotted- vdb restrict matches, "
+ "but current doesn't, bailing")
+ raise Exception()
+ conflicts = state.replace_op(choices, choices.current_pkg).apply(
+ self.state)
+ if not conflicts:
+ dprint("replacing vdb entry for '%s' with pkg '%s'",
+ (atom, choices.current_pkg))
+
+ else:
+ try_rematch = True
+ if try_rematch:
+ # XXX: this block looks whacked. figure out what it's up to.
+ l2 = self.state.match_atom(atom)
+ if l2 == [choices.current_pkg]:
+ # stop resolution.
+ conflicts = False
+ elif l2:
+ # potentially need to do some form of cleanup here.
+ conflicts = False
+ else:
+ conflicts = None
+ return conflicts
+
+ def notify_viable(self, stack, atom, viable, msg='', pre_solved=False):
+ t_viable = viable and "processing" or "not viable"
+ if pre_solved and viable:
+ t_viable = "pre-solved"
+ t_msg = msg and (" "+msg) or ''
+ s=''
+ if stack:
+ s = " for %s " % (stack[-1].atom)
+ dprint("%s%s%s%s%s", (t_viable.ljust(13), " "*stack.depth, atom, s, t_msg))
+ stack.add_event(("viable", viable, pre_solved, atom, msg))
+
+ def _viable(self, atom, stack, dbs, limit_to_vdb):
+ """
+ internal function to discern if an atom is viable, returning
+ the choicepoint/matches iter if viable.
+
+ @return: 3 possible; None (not viable), True (presolved),
+ L{caching_iter} (not solved, but viable), L{choice_point}
+ """
+ if atom in self.insoluble:
+ self.notify_viable(stack, atom, False, "globally insoluble")
+ return None
+ l = self.state.match_atom(atom)
+ if l:
+ self.notify_viable(stack, atom, True, pre_solved=True)
+ return True
+ # not in the plan thus far.
+ matches = caching_iter(self.global_strategy(self, dbs, atom))
+ if matches:
+ choices = choice_point(atom, matches)
+ # ignore what dropped out, at this juncture we don't care.
+ choices.reduce_atoms(self.insoluble)
+ if choices:
+ return choices, matches
+ # and was intractable because it has a hard dep on an
+ # unsolvable atom.
+ self.notify_viable(stack, atom, False,
+ msg="pruning of insoluble deps left no choices")
+ else:
+ self.notify_viable(stack, atom, False,
+ msg="no matches")
+
+ if not limit_to_vdb:
+ self.insoluble.add(atom)
+ return None
+
+ def insert_blockers(self, stack, choices, blocks):
+ # level blockers.
+ fail = True
+ for x in blocks:
+ # check for any matches; none, try and insert vdb nodes.
+ if not self.vdb_preloaded and \
+ not choices.current_pkg.repo.livefs and \
+ not self.state.match_atom(x):
+ for repo in self.livefs_dbs:
+ m = repo.match(x)
+ if m:
+ dprint("inserting vdb node for blocker"
+ " %s %s" % (x, m[0]))
+ # ignore blockers for for vdb atm, since
+ # when we level this nodes blockers they'll
+ # hit
+ c = choice_point(x, m)
+ state.add_op(c, c.current_pkg, force=True).apply(
+ self.state)
+ break
+
+ rewrote_blocker = self.generate_mangled_blocker(choices, x)
+ l = self.state.add_blocker(choices, rewrote_blocker, key=x.key)
+ if l:
+ # blocker caught something. yay.
+ dprint("%s blocker %s hit %s for atom %s pkg %s",
+ (stack[-1].mode, x, l, stack[-1].atom, choices.current_pkg))
+ stack.add_event(("blocker", x, l))
+ return [x]
+ return None
+
+ def _stack_debugging_rec_add_atom(self, func, atom, stack, dbs, **kwds):
+ current = len(stack)
+ cycles = kwds.get('drop_cycles', False)
+ reset_cycles = False
+ if cycles and not self._debugging_drop_cycles:
+ self._debugging_drop_cycles = reset_cycles = True
+ if not reset_cycles:
+ self._debugging_depth += 1
+
+ assert current == self._debugging_depth -1
+ ret = func(atom, stack, dbs, **kwds)
+ assert current == len(stack)
+ assert current == self._debugging_depth -1
+ if not reset_cycles:
+ self._debugging_depth -= 1
+ else:
+ self._debugging_drop_cycles = False
+ return ret
+
+ def _rec_add_atom(self, atom, stack, dbs, mode="none", drop_cycles=False):
+ """Add an atom.
+
+ @return: False on no issues (inserted succesfully),
+ else a list of the stack that screwed it up.
+ """
+ limit_to_vdb = dbs == self.livefs_dbs
+
+ depth = stack.depth
+
+ matches = self._viable(atom, stack, dbs, limit_to_vdb)
+ if matches is None:
+ return [atom]
+ elif matches is True:
+ return None
+ choices, matches = matches
+
+ if stack:
+ if limit_to_vdb:
+ dprint("processing %s%s [%s]; mode %s vdb bound",
+ (depth*2*" ", atom, stack[-1].atom, mode))
+ else:
+ dprint("processing %s%s [%s]; mode %s",
+ (depth*2*" ", atom, stack[-1].atom, mode))
+ else:
+ dprint("processing %s%s", (depth*2*" ", atom))
+
+ stack.add_frame(mode, atom, choices, dbs,
+ self.state.current_state, drop_cycles, vdb_limited=limit_to_vdb)
+ ret = self.check_for_cycles(stack, stack.current_frame)
+ if ret is not True:
+ stack.pop_frame(ret is None)
+ return ret
+
+ blocks = []
+ failures = []
+
+ last_state = None
+ while choices:
+ new_state = choices.state
+ if last_state == new_state:
+ raise AssertionError("no state change detected, "
+ "old %r != new %r\nchoices(%r)\ncurrent(%r)\ndepends(%r)\n"
+ "rdepends(%r)\npost_rdepends(%r)\nprovides(%r)" %
+ (last_state, new_state, tuple(choices.matches),
+ choices.current_pkg, choices.depends,
+ choices.rdepends, choices.post_rdepends,
+ choices.provides))
+ last_state = new_state
+ additions, blocks = [], []
+
+ self.notify_trying_choice(stack, atom, choices)
+
+ if not choices.current_pkg.built or self.process_built_depends:
+ l = self.process_dependencies(stack, "depends",
+ self.depset_reorder(self, choices.depends, "depends"))
+ if len(l) == 1:
+ dprint("reseting for %s%s because of depends: %s",
+ (depth*2*" ", atom, l[0][-1]))
+ self.state.backtrack(stack.current_frame.start_point)
+ failures = l[0]
+ continue
+ additions += l[0]
+ blocks = l[1]
+
+ # level blockers.
+ ret = self.insert_blockers(stack, choices, blocks)
+ if ret is not None:
+ # hackish in terms of failures, needs cleanup
+ failures = ret
+ self.notify_choice_failed(stack, atom, choices,
+ "failed due to %s", (ret,))
+ stack.current_frame.reduce_solutions(ret)
+ self.state.backtrack(stack.current_frame.start_point)
+ continue
+
+ l = self.process_dependencies(stack, "rdepends",
+ self.depset_reorder(self, choices.rdepends, "rdepends"))
+ if len(l) == 1:
+ dprint("reseting for %s%s because of rdepends: %s",
+ (depth*2*" ", atom, l[0]))
+ self.state.backtrack(stack.current_frame.start_point)
+ failures = l[0]
+ continue
+ additions += l[0]
+ blocks = l[1]
+
+ l = self.insert_choice(atom, stack, choices)
+ if l is False:
+ # this means somehow the node already slipped in.
+ # so we exit now, we are satisfied
+ self.notify_choice_succeeded(stack, atom, choices,
+ "already exists in the state plan")
+ stack.pop_frame(True)
+ return None
+ elif l is not None:
+ # failure.
+ self.notify_choice_failed(stack, atom, choices,
+ "failed inserting: %s", l)
+ self.state.backtrack(stack.current_frame.start_point)
+ choices.force_next_pkg()
+ continue
+
+ # XXX: push this into a method.
+ fail = True
+ for x in choices.provides:
+ l = state.add_op(choices, x).apply(self.state)
+ if l and l != [x]:
+ # slight hack; basically, should be pruning providers as the parent is removed
+ # this duplicates it, basically; if it's not a restrict, then it's a pkg.
+ # thus poke it.
+ if len(l) == 1 and not isinstance(l[0], restriction.base):
+ p = getattr(l[0], 'provider', None)
+ if p is not None and not self.state.match_atom(p):
+ # ok... force it.
+ fail = state.replace_op(choices, x).apply(self.state)
+ if not fail:
+ continue
+ break
+ fail = l
+ break
+ else:
+ fail = False
+ if fail:
+ self.state.backtrack(stack.current_frame.start_point)
+ choices.force_next_pkg()
+ continue
+
+ ret = self.insert_blockers(stack, choices, blocks)
+ if ret is not None:
+ # hackish in terms of failures, needs cleanup
+ failures = ret
+ self.notify_choice_failed(stack, atom, choices,
+ "failed due to %s", (ret,))
+ stack.current_frame.reduce_solutions(ret)
+ self.state.backtrack(stack.current_frame.start_point)
+ continue
+
+ l = self.process_dependencies(stack, "post_rdepends",
+ self.depset_reorder(self, choices.post_rdepends,
+ "post_rdepends"))
+
+ if len(l) == 1:
+ dprint("resetting for %s%s because of rdepends: %s",
+ (depth*2*" ", atom, l[0]))
+ self.state.backtrack(stack.current_frame.start_point)
+ failures = l[0]
+ continue
+ additions += l[0]
+ blocks = l[1]
+
+ # level blockers.
+ ret = self.insert_blockers(stack, choices, blocks)
+ if ret is not None:
+ # hackish in terms of failures, needs cleanup
+ failures = ret
+ self.notify_choice_failed(stack, atom, choices,
+ "failed due to %s", (ret,))
+ stack.current_frame.reduce_solutions(ret)
+ self.state.backtrack(stack.current_frame.start_point)
+ continue
+ # kinky... the request is fully satisfied
+ break
+
+ else:
+ dprint("no solution %s%s", (depth*2*" ", atom))
+ self.state.backtrack(stack.current_frame.start_point)
+ # saving roll. if we're allowed to drop cycles, try it again.
+ # this needs to be *far* more fine grained also. it'll try
+ # regardless of if it's cycle issue
+ if not drop_cycles and self.drop_cycles:
+ dprint("trying saving throw for %s ignoring cycles",
+ atom, "cycle")
+ # note everything is retored to a pristine state prior also.
+ stack[-1].ignored = True
+ l = self._rec_add_atom(atom, stack, dbs,
+ mode=mode, drop_cycles=True)
+ if not l:
+ stack.pop_frame(True)
+ return None
+ stack.pop_frame(False)
+ return [atom] + failures
+
+ self.notify_choice_succeeded(stack, atom, choices)
+ stack.pop_frame(True)
+ return None
+
+ def generate_mangled_blocker(self, choices, blocker):
+ """converts a blocker into a "cannot block ourself" block"""
+ # note the second Or clause is a bit loose; allows any version to
+ # slip through instead of blocking everything that isn't the
+ # parent pkg
+ if blocker.category != 'virtual':
+ return blocker
+ return packages.AndRestriction(blocker,
+ packages.PackageRestriction("provider.key",
+ values.StrExactMatch(choices.current_pkg.key),
+ negate=True, ignore_missing=True),
+ finalize=True)
+
+ def free_caches(self):
+ for repo in self.all_dbs:
+ repo.clear()
+
+ # selection strategies for atom matches
+
+ @staticmethod
+ def default_depset_reorder_strategy(self, depset, mode):
+ for or_block in depset:
+ vdb = []
+ non_vdb = []
+ if len(or_block) == 1:
+ yield or_block
+ continue
+ for atom in or_block:
+ if atom.blocks:
+ non_vdb.append(atom)
+ elif self.state.match_atom(atom):
+ vdb.append(atom)
+ elif caching_iter(p for r in self.livefs_dbs
+ for p in r.match(atom)):
+ vdb.append(atom)
+ else:
+ non_vdb.append(atom)
+ if vdb:
+ yield vdb + non_vdb
+ else:
+ yield or_block
+
+ @staticmethod
+ def default_global_strategy(self, dbs, atom):
+ return chain(*[repo.match(atom) for repo in dbs])
+
+ @staticmethod
+ def just_livefs_dbs(dbs):
+ return (r for r in dbs if r.livefs)
+
+ @staticmethod
+ def just_nonlivefs_dbs(dbs):
+ return (r for r in dbs if not r.livefs)
+
+ @classmethod
+ def prefer_livefs_dbs(cls, dbs, just_vdb=None):
+ """
+ @param dbs: db list to walk
+ @param just_vdb: if None, no filtering; if True, just vdb, if False,
+ non-vdb only
+ @return: yields repositories in requested ordering
+ """
+ return chain(cls.just_livefs_dbs(dbs), cls.just_nonlivefs_dbs(dbs))
+
+ @staticmethod
+ def prefer_highest_version_strategy(self, dbs, atom):
+ # XXX rework caching_iter so that it iter's properly
+ return iter_sort(highest_iter_sort,
+ *[repo.match(atom)
+ for repo in self.prefer_livefs_dbs(dbs)])
+
+ @staticmethod
+ def prefer_lowest_version_strategy(self, dbs, atom):
+ return iter_sort(lowest_iter_sort,
+ self.default_global_strategy(self, dbs, atom))
+
+ @staticmethod
+ def prefer_reuse_strategy(self, dbs, atom):
+
+ return chain(
+ iter_sort(highest_iter_sort,
+ *[repo.match(atom) for repo in self.just_livefs_dbs(dbs)]),
+ iter_sort(highest_iter_sort,
+ *[repo.match(atom) for repo in self.just_nonlivefs_dbs(dbs)])
+ )
+
+ def generic_force_version_strategy(self, vdb, dbs, atom, iter_sorter,
+ pkg_sorter):
+ try:
+ # nasty, but works.
+ yield iter_sort(iter_sorter,
+ *[r.itermatch(atom, sorter=pkg_sorter)
+ for r in [vdb] + dbs]).next()
+ except StopIteration:
+ # twas no matches
+ pass
+
+ force_max_version_strategy = staticmethod(
+ post_curry(generic_force_version_strategy,
+ highest_iter_sort, pkg_sort_highest))
+ force_min_version_strategy = staticmethod(
+ post_curry(generic_force_version_strategy,
+ lowest_iter_sort, pkg_sort_lowest))
diff --git a/pkgcore/resolver/state.py b/pkgcore/resolver/state.py
new file mode 100644
index 0000000..82e19ed
--- /dev/null
+++ b/pkgcore/resolver/state.py
@@ -0,0 +1,205 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from snakeoil.containers import RefCountingSet
+from pkgcore.resolver.pigeonholes import PigeonHoledSlots
+
+REMOVE = 0
+ADD = 1
+REPLACE = 2
+FORWARD_BLOCK_INCREF = 3
+FORWARD_BLOCK_DECREF = 4
+
+class plan_state(object):
+ def __init__(self):
+ self.state = PigeonHoledSlots()
+ self.plan = []
+ self.pkg_choices = {}
+ self.rev_blockers = {}
+ self.blockers_refcnt = RefCountingSet()
+ self.match_atom = self.state.find_atom_matches
+
+ def add_blocker(self, choices, blocker, key=None):
+ """adds blocker, returning any packages blocked"""
+ return incref_forward_block_op(choices, blocker, key).apply(self)
+
+ def _remove_pkg_blockers(self, choices):
+ l = self.rev_blockers.get(choices, ())
+ # walk a copy- it's possible it'll change under foot
+ for blocker, key in l[:]:
+ decref_forward_block_op(choices, blocker, key).apply(self)
+
+ def backtrack(self, state_pos):
+ assert state_pos <= len(self.plan)
+ if len(self.plan) == state_pos:
+ return
+ for change in reversed(self.plan[state_pos:]):
+ change.revert(self)
+ self.plan = self.plan[:state_pos]
+
+ def iter_ops(self, return_livefs=False):
+ iterable = (x for x in self.plan if not x.internal)
+ if return_livefs:
+ return iterable
+ return (y for y in iterable
+ if not y.pkg.repo.livefs)
+
+ @property
+ def current_state(self):
+ return len(self.plan)
+
+
+class base_op(object):
+ __slots__ = ("pkg", "force", "choices")
+ internal = False
+
+ def __init__(self, choices, pkg, force=False):
+ self.choices = choices
+ self.pkg = pkg
+ self.force = force
+
+ def __str__(self):
+ s = ''
+ if self.force:
+ s = ' forced'
+ return "%s: %s%s" % (self.desc, self.pkg, s)
+
+
+class add_op(base_op):
+
+ desc = "add"
+
+ def apply(self, plan):
+ l = plan.state.fill_slotting(self.pkg, force=self.force)
+ if l and not self.force:
+ return l
+ plan.pkg_choices[self.pkg] = self.choices
+ plan.plan.append(self)
+
+ def revert(self, plan):
+ plan.state.remove_slotting(self.pkg)
+ del plan.pkg_choices[self.pkg]
+
+
+class remove_op(base_op):
+ __slots__ = ()
+
+ desc = "remove"
+
+ def apply(self, plan):
+ plan.state.remove_slotting(self.pkg)
+ plan._remove_pkg_blockers(plan.pkg_choices)
+ del plan.pkg_choices[self.pkg]
+ plan.plan.append(self)
+
+ def revert(self, plan):
+ plan.state.fill_slotting(self.pkg, force=self.force)
+ plan.pkg_choices[self.pkg] = self.choices
+
+
+class replace_op(base_op):
+ __slots__ = ("old_pkg", "old_choices")
+
+ desc = "replace"
+
+ def apply(self, plan):
+ old = plan.state.get_conflicting_slot(self.pkg)
+ # probably should just convert to an add...
+ assert old is not None
+ plan.state.remove_slotting(old)
+ old_choices = plan.pkg_choices[old]
+ revert_point = plan.current_state
+ plan._remove_pkg_blockers(old_choices)
+ l = plan.state.fill_slotting(self.pkg, force=self.force)
+ if l:
+ # revert... limiter.
+ l2 = plan.state.fill_slotting(old)
+ plan.backtrack(revert_point)
+ assert not l2
+ return l
+
+ # wipe olds blockers.
+
+ self.old_pkg = old
+ self.old_choices = old_choices
+ del plan.pkg_choices[old]
+ plan.pkg_choices[self.pkg] = self.choices
+ plan.plan.append(self)
+
+ def revert(self, plan):
+ # far simpler, since the apply op generates multiple ops on it's own.
+ # all we have to care about is swap.
+ plan.state.remove_slotting(self.pkg)
+ l = plan.state.fill_slotting(self.old_pkg, force=self.force)
+ assert not l
+ del plan.pkg_choices[self.pkg]
+ plan.pkg_choices[self.old_pkg] = self.old_choices
+
+ def __str__(self):
+ s = ''
+ if self.force:
+ s = ' forced'
+ return "replace: %s with %s%s" % (self.old_pkg, self.pkg, s)
+
+
+class blocker_base_op(object):
+ __slots__ = ("choices", "blocker", "key")
+
+ desc = None
+ internal = True
+
+ def __init__(self, choices, blocker, key=None):
+ if key is None:
+ self.key = blocker.key
+ else:
+ self.key = key
+ self.choices = choices
+ self.blocker = blocker
+
+ def __str__(self):
+ return "%s: key %s, %s from %s" % (self.__class__.__name__, self.key,
+ self.blocker, self.choices)
+
+
+class incref_forward_block_op(blocker_base_op):
+ __slots__ = ()
+
+ def apply(self, plan):
+ plan.plan.append(self)
+ if self.blocker not in plan.blockers_refcnt:
+ l = plan.state.add_limiter(self.blocker, self.key)
+ else:
+ l = []
+ plan.rev_blockers.setdefault(self.choices, []).append(
+ (self.blocker, self.key))
+ plan.blockers_refcnt.add(self.blocker)
+ return l
+
+ def revert(self, plan):
+ l = plan.rev_blockers[self.choices]
+ l.remove((self.blocker, self.key))
+ if not l:
+ del plan.rev_blockers[self.choices]
+ plan.blockers_refcnt.remove(self.blocker)
+ if self.blocker not in plan.blockers_refcnt:
+ plan.state.remove_limiter(self.blocker, self.key)
+
+
+class decref_forward_block_op(blocker_base_op):
+ __slots__ = ()
+
+ def apply(self, plan):
+ plan.plan.append(self)
+ plan.blockers_refcnt.remove(self.blocker)
+ if self.blocker not in plan.blockers_refcnt:
+ plan.state.remove_limiter(self.blocker, self.key)
+ plan.rev_blockers[self.choices].remove((self.blocker, self.key))
+ if not plan.rev_blockers[self.choices]:
+ del plan.rev_blockers[self.choices]
+
+ def revert(self, plan):
+ plan.rev_blockers.setdefault(self.choices, []).append(
+ (self.blocker, self.key))
+ if self.blocker not in plan.blockers_refcnt:
+ plan.state.add_limiter(self.blocker, self.key)
+ plan.blockers_refcnt.add(self.blocker)
diff --git a/pkgcore/resolver/util.py b/pkgcore/resolver/util.py
new file mode 100644
index 0000000..368096e
--- /dev/null
+++ b/pkgcore/resolver/util.py
@@ -0,0 +1,42 @@
+# Copyright: 2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from itertools import groupby
+
+def group_attempts(sequence, filter_func=lambda x:True):
+ last, l = None, []
+ for x in sequence:
+ if isinstance(x, tuple) and x[0] == 'inspecting':
+ if l:
+ yield last, l
+ last, l = x[1], []
+ elif last is not None:
+ if filter_func(x):
+ # inline ignored frames
+ if getattr(x, 'ignored', False):
+ l.extend(y for y in x.events if filter_func(y))
+ else:
+ l.append(x)
+ if l:
+ yield last, l
+
+def fails_filter(x):
+ if not isinstance(x, tuple):
+ return not x.succeeded
+ if x[0] == "viable":
+ return not x[1]
+ return x[0] != "inspecting"
+
+def reduce_to_failures(frame):
+ if frame.succeeded:
+ return []
+ l = [frame]
+ for pkg, nodes in group_attempts(frame.events, fails_filter):
+ l2 = []
+ for x in nodes:
+ if not isinstance(x, tuple):
+ l2.append(reduce_to_failures(x))
+ else:
+ l2.append(x)
+ l.append((pkg, l2))
+ return l
diff --git a/pkgcore/restrictions/__init__.py b/pkgcore/restrictions/__init__.py
new file mode 100644
index 0000000..dff841e
--- /dev/null
+++ b/pkgcore/restrictions/__init__.py
@@ -0,0 +1,4 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""restriction subsystem, used both for depencies and general package queries"""
diff --git a/pkgcore/restrictions/_restrictions.so b/pkgcore/restrictions/_restrictions.so
new file mode 100755
index 0000000..2b1e691
--- /dev/null
+++ b/pkgcore/restrictions/_restrictions.so
Binary files differ
diff --git a/pkgcore/restrictions/boolean.py b/pkgcore/restrictions/boolean.py
new file mode 100644
index 0000000..0552c40
--- /dev/null
+++ b/pkgcore/restrictions/boolean.py
@@ -0,0 +1,490 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""Boolean combinations of restrictions.
+
+This module provides classes that can be used to combine arbitrary
+collections of restrictions in AND, NAND, OR, NOR, XOR, XNOR style
+operations.
+"""
+
+__all__ = ("AndRestriction", "OrRestriction")
+
+from itertools import islice
+from pkgcore.restrictions import restriction
+from snakeoil.klass import generic_equality
+
+class base(restriction.base):
+
+ """base template for boolean restrictions"""
+
+ __metaclass__ = generic_equality
+ __attr_comparison__ = ('negate', 'type', 'restrictions')
+ __slots__ = ('restrictions', 'type', 'negate')
+
+ def __init__(self, *restrictions, **kwds):
+
+ """
+ @keyword node_type: type of restriction this accepts
+ (L{package_type<pkgcore.restrictions.packages.package_type>} and
+ L{value_type<pkgcore.restrictions.values.value_type>} being
+ common types). If set to C{None}, no instance limiting is done.
+ @type restrictions: node_type (if that is specified)
+ @param restrictions: initial restrictions to add
+ @keyword finalize: should this instance be made immutable immediately?
+ @keyword negate: should the logic be negated?
+ """
+
+ sf = object.__setattr__
+
+ node_type = kwds.pop("node_type", None)
+
+ sf(self, "type", node_type)
+ sf(self, "negate", kwds.pop("negate", False))
+
+ if node_type is not None:
+ try:
+ for r in restrictions:
+ if r.type is not None and r.type != node_type:
+ raise TypeError(
+ "instance '%s' is restriction type '%s', "
+ "must be '%s'" % (r, r.type, node_type))
+ except AttributeError:
+ raise TypeError(
+ "type '%s' instance '%s' has no restriction type, "
+ "'%s' required" % (
+ r.__class__, r, node_type))
+
+ if kwds.pop("finalize", False):
+ if not isinstance(restrictions, tuple):
+ sf(self, "restrictions", tuple(restrictions))
+ else:
+ sf(self, "restrictions", restrictions)
+ else:
+ sf(self, "restrictions", list(restrictions))
+
+ if kwds:
+ kwds.pop("disable_inst_caching", None)
+ if kwds:
+ raise TypeError("unknown keywords to %s: %s" %
+ (self.__class__, kwds))
+
+ def change_restrictions(self, *restrictions, **kwds):
+ """
+ return a new instance of self.__class__, using supplied restrictions
+
+ """
+ if self.type is not None:
+ if self.__class__.type not in restriction.valid_types or \
+ self.__class__.type != self.type:
+ kwds["node_type"] = self.type
+ kwds.setdefault("negate", self.negate)
+ return self.__class__(*restrictions, **kwds)
+
+ def add_restriction(self, *new_restrictions):
+ """
+ add an more restriction(s)
+
+ @param new_restrictions: if node_type is enforced,
+ restrictions must be of that type.
+ """
+
+ if not new_restrictions:
+ raise TypeError("need at least one restriction handed in")
+ if self.type is not None:
+ try:
+ for r in new_restrictions:
+ if r.type is not None and r.type != self.type:
+ raise TypeError(
+ "instance '%s' is restriction type '%s', "
+ "must be '%s'" % (r, r.type, self.type))
+ except AttributeError:
+ raise TypeError(
+ "type '%s' instance '%s' has no restriction type, "
+ "'%s' required" % (
+ r.__class__, r, getattr(self, "type", "unset")))
+
+ try:
+ self.restrictions.extend(new_restrictions)
+ except AttributeError:
+ raise TypeError("%r is finalized" % self)
+
+ def finalize(self):
+ """
+ finalize the restriction instance, disallowing adding restrictions.
+ """
+ object.__setattr__(self, "restrictions", tuple(self.restrictions))
+
+ def __repr__(self):
+ return '<%s negate=%r type=%r finalized=%r restrictions=%r @%#8x>' % (
+ self.__class__.__name__, self.negate, getattr(self, 'type', None),
+ isinstance(self.restrictions, tuple), self.restrictions,
+ id(self))
+
+ def __len__(self):
+ return len(self.restrictions)
+
+ def __iter__(self):
+ return iter(self.restrictions)
+
+ def match(self, action, *vals):
+ raise NotImplementedError
+
+ force_False, force_True = match, match
+
+ def dnf_solutions(self, full_solution_expansion=False):
+ raise NotImplementedError()
+
+ cnf_solutions = dnf_solutions
+
+ def iter_cnf_solutions(self, *a, **kwds):
+ """iterate over the cnf solution"""
+ return iter(self.cnf_solutions(*a, **kwds))
+
+ def iter_dnf_solutions(self, *a, **kwds):
+ """iterate over the dnf solution"""
+ return iter(self.dnf_solutions(*a, **kwds))
+
+ def __getitem__(self, key):
+ return self.restrictions[key]
+
+
+# this beast, handles N^2 permutations. convert to stack based.
+def iterative_quad_toggling(pkg, pvals, restrictions, starting, end, truths,
+ filter, desired_false=None, desired_true=None,
+ kill_switch=None):
+ if desired_false is None:
+ desired_false = lambda r, a:r.force_False(*a)
+ if desired_true is None:
+ desired_true = lambda r, a:r.force_True(*a)
+
+# import pdb;pdb.set_trace()
+ reset = True
+ if starting == 0:
+ if filter(truths):
+ yield True
+ for index, rest in islice(enumerate(restrictions), starting, end):
+ if reset:
+ entry = pkg.changes_count()
+ reset = False
+ if truths[index]:
+ if desired_false(rest, pvals):
+ reset = True
+ t = truths[:]
+ t[index] = False
+ if filter(t):
+ yield True
+ for i in iterative_quad_toggling(
+ pkg, pvals, restrictions, index + 1, end, t, filter,
+ desired_false=desired_false, desired_true=desired_true,
+ kill_switch=kill_switch):
+# import pdb;pdb.set_trace()
+ yield True
+ reset = True
+ else:
+ if kill_switch is not None and kill_switch(truths, index):
+ return
+ else:
+ if desired_true(rest, pvals):
+ reset = True
+ t = truths[:]
+ t[index] = True
+ if filter(t):
+ yield True
+ for x in iterative_quad_toggling(
+ pkg, pvals, restrictions, index + 1, end, t, filter,
+ desired_false=desired_false, desired_true=desired_true):
+# import pdb;pdb.set_trace()
+ yield True
+ reset = True
+ elif index == end:
+ if filter(truths):
+# import pdb;pdb.set_trace()
+ yield True
+ else:
+ if kill_switch is not None and kill_switch(truths, index):
+ return
+
+ if reset:
+ pkg.rollback(entry)
+
+
+class AndRestriction(base):
+ """Boolean AND grouping of restrictions. negation is a NAND"""
+ __slots__ = ()
+
+ def match(self, vals):
+ for rest in self.restrictions:
+ if not rest.match(vals):
+ return self.negate
+ return not self.negate
+
+ def force_True(self, pkg, *vals):
+ pvals = [pkg]
+ pvals.extend(vals)
+ entry_point = pkg.changes_count()
+ # get the simple one out of the way first.
+ if not self.negate:
+ for r in self.restrictions:
+ if not r.force_True(*pvals):
+ pkg.rollback(entry_point)
+ return False
+ return True
+
+ # <insert page long curse here>, NAND logic,
+ # len(restrictions)**2 potential solutions.
+ # 0|0 == 0, 0|1 == 1|0 == 0|0 == 1.
+ # XXX this is quadratic. patches welcome to dodge the
+ # requirement to push through all potential truths.
+ truths = [r.match(*pvals) for r in self.restrictions]
+ def filter(truths):
+ return False in truths
+
+ for i in iterative_quad_toggling(pkg, pvals, self.restrictions, 0,
+ len(self.restrictions), truths,
+ filter):
+ return True
+ return False
+
+ def force_False(self, pkg, *vals):
+ pvals = [pkg]
+ pvals.extend(vals)
+ entry_point = pkg.changes_count()
+ # get the simple one out of the way first.
+ if self.negate:
+ for r in self.restrictions:
+ if not r.force_True(*pvals):
+ pkg.rollback(entry_point)
+ return False
+ return True
+
+ # <insert page long curse here>, NAND logic,
+ # (len(restrictions)^2)-1 potential solutions.
+ # 1|1 == 0, 0|1 == 1|0 == 0|0 == 1.
+ # XXX this is quadratic. patches welcome to dodge the
+ # requirement to push through all potential truths.
+ truths = [r.match(*pvals) for r in self.restrictions]
+ def filter(truths):
+ return False in truths
+ for i in iterative_quad_toggling(pkg, pvals, self.restrictions, 0,
+ len(self.restrictions), truths,
+ filter):
+ return True
+ return False
+
+ def iter_dnf_solutions(self, full_solution_expansion=False):
+ """
+ generater yielding DNF (disjunctive normalized form) of this instance.
+
+ @param full_solution_expansion: controls whether to expand everything
+ (break apart atoms for example); this isn't likely what you want
+ """
+ if self.negate:
+# raise NotImplementedError("negation for dnf_solutions on "
+# "AndRestriction isn't implemented yet")
+ # hack- this is an experiment
+ for r in OrRestriction(
+ node_type=self.type,
+ *[restriction.Negate(x)
+ for x in self.restrictions]).iter_dnf_solutions():
+ yield r
+ return
+ if not self.restrictions:
+ yield []
+ return
+ hardreqs = []
+ optionals = []
+ for x in self.restrictions:
+ if isinstance(x, base):
+ s2 = x.dnf_solutions(
+ full_solution_expansion=full_solution_expansion)
+ assert s2
+ if len(s2) == 1:
+ hardreqs.extend(s2[0])
+ else:
+ optionals.append(s2)
+ else:
+ hardreqs.append(x)
+ def f(arg, *others):
+ if others:
+ for node in arg:
+ for node2 in f(*others):
+ yield node + node2
+ else:
+ for node in arg:
+ yield node
+
+ for solution in f([hardreqs], *optionals):
+ assert isinstance(solution, (tuple, list))
+ yield solution
+
+ def dnf_solutions(self, *args, **kwds):
+ """
+ list form of L{iter_dnf_solutions}, see iter_dnf_solutions for args
+ """
+ return list(self.iter_dnf_solutions(*args, **kwds))
+
+ def cnf_solutions(self, full_solution_expansion=False):
+
+ """
+ returns solutions in CNF (conjunctive normalized form) of this instance
+
+ @param full_solution_expansion: controls whether to expand everything
+ (break apart atoms for example); this isn't likely what you want
+ """
+
+ if self.negate:
+ raise NotImplementedError("negation for solutions on "
+ "AndRestriction isn't implemented yet")
+ andreqs = []
+ for x in self.restrictions:
+ if isinstance(x, base):
+ andreqs.extend(x.cnf_solutions(
+ full_solution_expansion=full_solution_expansion))
+ else:
+ andreqs.append([x])
+ return andreqs
+
+ def __str__(self):
+ if self.negate:
+ return "not ( %s )" % " && ".join(str(x) for x in self.restrictions)
+ return "( %s )" % " && ".join(str(x) for x in self.restrictions)
+
+
+class OrRestriction(base):
+ """Boolean OR grouping of restrictions."""
+ __slots__ = ()
+
+ def match(self, vals):
+ for rest in self.restrictions:
+ if rest.match(vals):
+ return not self.negate
+ return self.negate
+
+ def cnf_solutions(self, full_solution_expansion=False):
+ """
+ returns alist in CNF (conjunctive normalized form) for of this instance
+
+ @param full_solution_expansion: controls whether to expand everything
+ (break apart atoms for example); this isn't likely what you want
+ """
+ if self.negate:
+ raise NotImplementedError(
+ "OrRestriction.solutions doesn't yet support self.negate")
+
+ if not self.restrictions:
+ return []
+
+ dcnf = []
+ cnf = []
+ for x in self.restrictions:
+ if isinstance(x, base):
+ s2 = x.dnf_solutions(
+ full_solution_expansion=full_solution_expansion)
+ if len(s2) == 1:
+ cnf.extend(s2)
+ else:
+ for y in s2:
+ if len(y) == 1:
+ dcnf.append(y[0])
+ else:
+ cnf.append(y)
+ else:
+ dcnf.append(x)
+
+ # combinatorial explosion. if it's got cnf, we peel off one of
+ # each and smash append to the dcnf.
+ dcnf = [dcnf]
+ for andreq in cnf:
+ dcnf = list(y + [x] for x in andreq for y in dcnf)
+ return dcnf
+
+
+ def iter_dnf_solutions(self, full_solution_expansion=False):
+ """
+ returns a list in DNF (disjunctive normalized form) for of this instance
+
+ @param full_solution_expansion: controls whether to expand everything
+ (break apart atoms for example); this isn't likely what you want
+ """
+ if self.negate:
+ # hack- this is an experiment
+ for x in AndRestriction(
+ node_type=self.type,
+ *[restriction.Negate(x)
+ for x in self.restrictions]).iter_dnf_solutions():
+ yield x
+ if not self.restrictions:
+ yield []
+ return
+ for x in self.restrictions:
+ if isinstance(x, base):
+ for y in x.iter_dnf_solutions(
+ full_solution_expansion=full_solution_expansion):
+ yield y
+ else:
+ yield [x]
+
+ def dnf_solutions(self, *args, **kwds):
+ """
+ see dnf_solutions, iterates yielding DNF solutions
+ """
+ return list(self.iter_dnf_solutions(*args, **kwds))
+
+ def force_True(self, pkg, *vals):
+ pvals = [pkg]
+ pvals.extend(vals)
+ entry_point = pkg.changes_count()
+ # get the simple one out of the way first.
+ if self.negate:
+ for r in self.restrictions:
+ if not r.force_False(*pvals):
+ pkg.rollback(entry_point)
+ return False
+ return True
+
+ # <insert page long curse here>, OR logic,
+ # len(restrictions)**2-1 potential solutions.
+ # 0|0 == 0, 0|1 == 1|0 == 1|1 == 1.
+ # XXX this is quadratic. patches welcome to dodge the
+ # requirement to push through all potential truths.
+ truths = [r.match(*pvals) for r in self.restrictions]
+ def filter(truths):
+ return True in truths
+ for i in iterative_quad_toggling(pkg, pvals, self.restrictions, 0,
+ len(self.restrictions), truths,
+ filter):
+ return True
+ return False
+
+ def force_False(self, pkg, *vals):
+ pvals = [pkg]
+ pvals.extend(vals)
+ entry_point = pkg.changes_count()
+ # get the simple one out of the way first.
+ if not self.negate:
+ for r in self.restrictions:
+ if not r.force_False(*pvals):
+ pkg.rollback(entry_point)
+ return
+ yield True
+ return
+
+ # <insert page long curse here>, OR logic,
+ # (len(restrictions)**2)-1 potential solutions.
+ # 0|0 == 0, 0|1 == 1|0 == 1|1 == 1.
+ # XXX this is quadratic. patches welcome to dodge the
+ # requirement to push through all potential truths.
+ truths = [r.match(*pvals) for r in self.restrictions]
+ def filter(truths):
+ return True in truths
+ for i in iterative_quad_toggling(pkg, pvals, self.restrictions, 0,
+ len(self.restrictions), truths,
+ filter):
+ yield True
+
+
+ def __str__(self):
+ if self.negate:
+ return "not ( %s )" % " || ".join(str(x) for x in self.restrictions)
+ return "( %s )" % " || ".join(str(x) for x in self.restrictions)
diff --git a/pkgcore/restrictions/delegated.py b/pkgcore/restrictions/delegated.py
new file mode 100644
index 0000000..2d66840
--- /dev/null
+++ b/pkgcore/restrictions/delegated.py
@@ -0,0 +1,53 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+functionality to hand off to a callable, enabling collapsing
+long chains of restrictions into Nlog N, or generating
+restrictions on the fly
+"""
+
+__all__ = ("delegate",)
+from pkgcore.restrictions import restriction
+from pkgcore.restrictions import packages
+
+class delegate(restriction.base):
+
+ """
+ hand off matching to a handed in prototype
+
+ Example usage of this class should be available in
+ L{pkgcore.ebuild.domain}.
+ """
+
+ __slots__ = ('_transform', 'negate')
+
+ type = packages.package_type
+ inst_caching = False
+
+ def __init__(self, transform_func, negate=False):
+ """
+
+ @param transform_func: callable inovked with data, pkg, and mode
+ mode may be "match", "force_True", or "force_False"
+ """
+
+ if not callable(transform_func):
+ raise TypeError(transform_func)
+
+ object.__setattr__(self, "negate", negate)
+ object.__setattr__(self, "_transform", transform_func)
+
+
+ def match(self, pkginst):
+ return self._transform(pkginst, "match") != self.negate
+
+ def force_True(self, pkginst):
+ if self.negate:
+ return self._transform(pkginst, "force_False")
+ return self._transform(pkginst, "force_True")
+
+ def force_False(self, pkginst):
+ if self.negate:
+ return self._transform(pkginst, "force_True")
+ return self._transform(pkginst, "force_False")
diff --git a/pkgcore/restrictions/packages.py b/pkgcore/restrictions/packages.py
new file mode 100644
index 0000000..620e825
--- /dev/null
+++ b/pkgcore/restrictions/packages.py
@@ -0,0 +1,245 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+restriction classes designed for package level matching
+"""
+
+from pkgcore.restrictions import restriction, boolean
+from snakeoil.compatibility import any
+from snakeoil.klass import chained_getter, generic_equality
+from snakeoil.demandload import demandload
+demandload(globals(), "pkgcore.log:logger")
+
+# Backwards compatibility.
+package_type = restriction.package_type
+
+
+class native_PackageRestriction(object):
+ __slots__ = ('_pull_attr', 'attr', 'restriction', 'ignore_missing',
+ 'negate')
+
+ __attr_comparison__ = ("__class__", "negate", "attr", "restriction")
+ __metaclass__ = generic_equality
+
+ def __init__(self, attr, childrestriction, negate=False,
+ ignore_missing=True):
+ """
+ @param attr: package attribute to match against
+ @param childrestriction: a L{pkgcore.restrictions.values.base} instance
+ to pass attr to for matching
+ @param negate: should the results be negated?
+ """
+ if not childrestriction.type == self.subtype:
+ raise TypeError("restriction must be of type %r" % (self.subtype,))
+ sf = object.__setattr__
+ sf(self, "negate", negate)
+ sf(self, "_pull_attr", chained_getter(attr))
+ sf(self, "attr", attr)
+ sf(self, "restriction", childrestriction)
+ sf(self, "ignore_missing", ignore_missing)
+
+
+class PackageRestriction_mixin(restriction.base):
+ """Package data restriction."""
+
+ __slots__ = ()
+
+ # Careful: some methods (__eq__, __hash__, intersect) try to work
+ # for subclasses too. They will not behave as intended if a
+ # subclass adds attributes. So if you do that, override the
+ # methods.
+
+ type = restriction.package_type
+ subtype = restriction.value_type
+
+ def _handle_exception(self, pkg, exc):
+ if isinstance(exc, AttributeError):
+ if not self.ignore_missing:
+ logger.exception("failed getting attribute %s from %s, "
+ "exception %s" % (self.attr, str(pkg), str(exc)))
+ s = self.attr.split('.')
+ eargs = [x for x in exc.args if isinstance(x, basestring)]
+ if any(x in s for x in eargs):
+ return False
+ elif any("'%s'" % x in y for x in s for y in eargs):
+ # this is fairly horrible; probably specific to cpython also.
+ # either way, does a lookup specifically for attr components
+ # in the string exception string, looking for 'attr' in the
+ # text.
+ # if it doesn't match, exception is thrown.
+ return False
+ logger.exception("caught unexpected exception accessing %s from %s, "
+ "exception %s" % (self.attr, str(pkg), str(exc)))
+ return True
+
+ def match(self, pkg):
+ try:
+ return self.restriction.match(self._pull_attr(pkg)) != self.negate
+ except (KeyboardInterrupt, RuntimeError, SystemExit):
+ raise
+ except Exception, e:
+ if self._handle_exception(pkg, e):
+ raise
+ return self.negate
+
+ def force_False(self, pkg):
+ try:
+ if self.negate:
+ return self.restriction.force_True(pkg, self.attr,
+ self._pull_attr(pkg))
+ else:
+ return self.restriction.force_False(pkg, self.attr,
+ self._pull_attr(pkg))
+ except (KeyboardInterrupt, RuntimeError, SystemExit):
+ raise
+ except Exception, e:
+ if self._handle_exception(pkg, e):
+ raise
+ return not self.negate
+
+ def force_True(self, pkg):
+ try:
+ if self.negate:
+ return self.restriction.force_False(pkg, self.attr,
+ self._pull_attr(pkg))
+ else:
+ return self.restriction.force_True(pkg, self.attr,
+ self._pull_attr(pkg))
+ except (KeyboardInterrupt, RuntimeError, SystemExit):
+ raise
+ except Exception, e:
+ if self._handle_exception(pkg, e):
+ raise
+ return self.negate
+
+ def __len__(self):
+ if not isinstance(self.restriction, boolean.base):
+ return 1
+ return len(self.restriction) + 1
+
+ def intersect(self, other):
+ """Build a restriction that matches anything matched by this and other.
+
+ If an optimized intersection cannot be determined this returns C{None}.
+ """
+ if (self.negate != other.negate or
+ self.attr != other.attr or
+ self.__class__ is not other.__class__):
+ return None
+ # Make the most subclassed instance do the intersecting
+ if isinstance(self.restriction, other.restriction.__class__):
+ s = self.restriction.intersect(other.restriction)
+ elif isinstance(other.restriction, self.restriction.__class__):
+ s = other.restriction.intersect(self.restriction)
+ else:
+ # Child restrictions are not related, give up.
+ return None
+ if s is None:
+ return None
+
+ # optimization: do not build a new wrapper if we already have one.
+ if s == self.restriction:
+ return self
+ elif s == other.restriction:
+ return other
+
+ # This breaks completely if we are a subclass with different
+ # __init__ args, so such a subclass had better override this
+ # method...
+ return self.__class__(self.attr, s, negate=self.negate)
+
+ def __hash__(self):
+ return hash((self.negate, self.attr, self.restriction))
+
+ def __str__(self):
+ s = self.attr+" "
+ if self.negate:
+ s += "not "
+ return s + str(self.restriction)
+
+ def __repr__(self):
+ if self.negate:
+ string = '<%s attr=%r restriction=%r negated @%#8x>'
+ else:
+ string = '<%s attr=%r restriction=%r @%#8x>'
+ return string % (
+ self.__class__.__name__, self.attr, self.restriction, id(self))
+
+
+try:
+ from pkgcore.restrictions._restrictions import PackageRestriction as \
+ PackageRestriction_base
+except ImportError:
+ PackageRestriction_base = native_PackageRestriction
+
+class PackageRestriction(PackageRestriction_mixin, PackageRestriction_base):
+ __slots__ = ()
+ __inst_caching__ = True
+
+class Conditional(PackageRestriction):
+
+ """
+ base object representing a conditional package restriction
+
+ used to control whether a payload of restrictions are accessible or not
+ """
+
+ __slots__ = ('payload',)
+
+ __attr_comparison__ = ("__class__", "negate", "attr", "restriction",
+ "payload")
+ __metaclass__ = generic_equality
+ # note that instance caching is turned off.
+ # rarely pays off for conditionals from a speed/mem comparison
+
+ def __init__(self, attr, childrestriction, payload, **kwds):
+ """
+ @param attr: attr to match against
+ @param childrestriction: restriction to control whether or not the
+ payload is accessible
+ @param payload: payload data, whatever it may be.
+ @param kwds: additional args to pass to L{PackageRestriction}
+ """
+ PackageRestriction.__init__(self, attr, childrestriction, **kwds)
+ object.__setattr__(self, "payload", tuple(payload))
+
+ def intersect(self, other):
+ # PackageRestriction defines this but its implementation won't
+ # work for us, so fail explicitly.
+ raise NotImplementedError(self)
+
+ def __str__(self):
+ return "( Conditional: %s payload: [ %s ] )" % (
+ PackageRestriction.__str__(self),
+ ", ".join(map(str, self.payload)))
+
+ def __repr__(self):
+ if self.negate:
+ string = '<%s attr=%r restriction=%r payload=%r negated @%#8x>'
+ else:
+ string = '<%s attr=%r restriction=%r payload=%r @%#8x>'
+ return string % (
+ self.__class__.__name__, self.attr, self.restriction, self.payload,
+ id(self))
+
+ def __iter__(self):
+ return iter(self.payload)
+
+ def __hash__(self):
+ return hash((self.attr, self.negate, self.restriction, self.payload))
+
+
+# "Invalid name" (pylint uses the module const regexp, not the class regexp)
+# pylint: disable-msg=C0103
+
+AndRestriction = restriction.curry_node_type(boolean.AndRestriction,
+ restriction.package_type)
+OrRestriction = restriction.curry_node_type(boolean.OrRestriction,
+ restriction.package_type)
+
+AlwaysBool = restriction.curry_node_type(restriction.AlwaysBool,
+ restriction.package_type)
+
+AlwaysTrue = AlwaysBool(negate=True)
+AlwaysFalse = AlwaysBool(negate=False)
diff --git a/pkgcore/restrictions/restriction.py b/pkgcore/restrictions/restriction.py
new file mode 100644
index 0000000..c9f2228
--- /dev/null
+++ b/pkgcore/restrictions/restriction.py
@@ -0,0 +1,200 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""
+base restriction class
+"""
+
+from snakeoil import caching
+from snakeoil.currying import partial, pretty_docs
+
+class base(object):
+
+ """
+ base restriction matching object.
+
+ all derivatives *should* be __slot__ based (lot of instances may
+ wind up in memory).
+ """
+
+ __metaclass__ = caching.WeakInstMeta
+ __inst_caching__ = True
+
+ # __weakref__ here's is implicit via the metaclass
+ __slots__ = ()
+ package_matching = False
+
+ def __setattr__(self, attr, val):
+ raise TypeError(self, "is immutable")
+
+ def __delattr__(self, attr):
+ raise TypeError(self, "is immutable")
+
+ def match(self, *arg, **kwargs):
+ raise NotImplementedError
+
+ def force_False(self, *arg, **kwargs):
+ return not self.match(*arg, **kwargs)
+
+ def force_True(self, *arg, **kwargs):
+ return self.match(*arg, **kwargs)
+
+ def intersect(self, other):
+ return None
+
+ def __len__(self):
+ return 1
+
+
+class AlwaysBool(base):
+ """
+ restriction that always yields a specific boolean
+ """
+ __slots__ = ("type", "negate")
+
+ __inst_caching__ = True
+
+ def __init__(self, node_type=None, negate=False):
+ """
+ @param node_type: the restriction type the instance should be,
+ typically L{pkgcore.restrictions.packages.package_type} or
+ L{pkgcore.restrictions.values.value_type}
+ @param negate: boolean to return for the match
+ """
+ object.__setattr__(self, "negate", negate)
+ object.__setattr__(self, "type", node_type)
+
+ def match(self, *a, **kw):
+ return self.negate
+
+ def force_True(self, *a, **kw):
+ return self.negate
+
+ def force_False(self, *a, **kw):
+ return not self.negate
+
+ def __iter__(self):
+ return iter(())
+
+ def __str__(self):
+ return "always '%s'" % self.negate
+
+ def __repr__(self):
+ return '<%s always %r @%#8x>' % (
+ self.__class__.__name__, self.negate, id(self))
+
+
+class Negate(base):
+
+ """
+ wrap and negate a restriction instance
+ """
+
+ __slots__ = ("type", "_restrict")
+ __inst_caching__ = False
+
+ def __init__(self, restrict):
+ """
+ @param restrict: L{pkgcore.restrictions.restriction.base} instance
+ to negate
+ """
+ sf = object.__setattr__
+ sf(self, "type", restrict.type)
+ sf(self, "_restrict", restrict)
+
+ def match(self, *a, **kw):
+ return not self._restrict.match(*a, **kw)
+
+ def __str__(self):
+ return "not (%s)" % self._restrict
+
+
+class FakeType(base):
+
+ """
+ wrapper to wrap and fake a node_type
+ """
+
+ __slots__ = ("type", "_restrict")
+ __inst_caching__ = False
+
+ def __init__(self, restrict, new_type):
+ """
+ @param restrict: L{pkgcore.restrictions.restriction.base} instance
+ to wrap
+ @param new_type: new node_type
+ """
+ sf = object.__setattr__
+ sf(self, "type", new_type)
+ sf(self, "_restrict", restrict)
+
+ def match(self, *a, **kw):
+ return self._restrict.match(*a, **kw)
+
+ def __str__(self):
+ return "Faked type(%s): %s" % (self.type, self._restrict)
+
+
+class AnyMatch(base):
+
+ """Apply a nested restriction to every item in a sequence."""
+
+ __slots__ = ('restriction', 'type', 'negate')
+
+ def __init__(self, childrestriction, node_type, negate=False):
+ """Initialize.
+
+ @type childrestriction: restriction
+ @param childrestriction: child restriction applied to every value.
+ @type node_type: string
+ @param node_type: type of this restriction.
+ """
+ sf = object.__setattr__
+ sf(self, "negate", negate)
+ sf(self, "restriction", childrestriction)
+ sf(self, "type", node_type)
+
+ def match(self, val):
+ for x in val:
+ if self.restriction.match(x):
+ return not self.negate
+ return self.negate
+
+ def __str__(self):
+ return "any: %s match" % (self.restriction,)
+
+ def __repr__(self):
+ return '<%s restriction=%r @%#8x>' % (
+ self.__class__.__name__, self.restriction, id(self))
+
+
+def curry_node_type(klass, node_type, extradoc=None):
+ """Helper function for creating restrictions of a certain type.
+
+ This uses L{partial} to pass a node_type to the wrapped class,
+ and extends the docstring.
+
+ @param klass: callable (usually a class) that is wrapped.
+ @param node_type: value passed as node_type.
+ @param extradoc: addition to the docstring. Defaults to
+ "Automatically set to %s type." % node_type
+
+ @return: a wrapped callable.
+ """
+ if extradoc is None:
+ extradoc = "Automatically set to %s type." % (node_type,)
+ doc = klass.__doc__
+ result = partial(klass, node_type=node_type)
+ if doc is None:
+ doc = ''
+ else:
+ # do this so indentation on pydoc __doc__ is sane
+ doc = "\n".join(line.lstrip() for line in doc.split("\n")) + "\n"
+ doc += extradoc
+ return pretty_docs(result, doc)
+
+
+value_type = "values"
+package_type = "package"
+valid_types = (value_type, package_type)
diff --git a/pkgcore/restrictions/util.py b/pkgcore/restrictions/util.py
new file mode 100644
index 0000000..e926000
--- /dev/null
+++ b/pkgcore/restrictions/util.py
@@ -0,0 +1,37 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+restriction related utilities
+"""
+
+from pkgcore.restrictions import packages, boolean, restriction
+from snakeoil.lists import iflatten_func
+
+def _is_package_instance(inst):
+ return (getattr(inst, "type", None) == packages.package_type
+ and not isinstance(inst, boolean.base))
+
+def collect_package_restrictions(restrict, attrs=None):
+ """Collect PackageRestriction instances inside a restriction.
+
+ @param restrict: package instance to scan
+ @param attrs: None (return all package restrictions), or a sequence of
+ specific attrs the package restriction must work against.
+ """
+ if not isinstance(restrict, (list, tuple)):
+ restrict = [restrict]
+ for r in restrict:
+ if not isinstance(r, restriction.base):
+ raise TypeError(
+ "restrict must be of a restriction.base, not %s: %r" % (
+ r.__class__.__name__, r))
+ if attrs is None:
+ for r in iflatten_func(restrict, _is_package_instance):
+ yield r
+ else:
+ if isinstance(attrs, (list, tuple)):
+ attrs = frozenset(attrs)
+ for r in iflatten_func(restrict, _is_package_instance):
+ if getattr(r, "attr", None) in attrs:
+ yield r
diff --git a/pkgcore/restrictions/values.py b/pkgcore/restrictions/values.py
new file mode 100644
index 0000000..8e7b7fd
--- /dev/null
+++ b/pkgcore/restrictions/values.py
@@ -0,0 +1,685 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""
+value restrictions
+
+works hand in hand with L{pkgcore.restrictions.packages}, these
+classes match against a value handed in, package restrictions pull the
+attr from a package instance and hand it to their wrapped restriction
+(which is a value restriction).
+"""
+
+from pkgcore.restrictions import restriction, boolean, packages
+from snakeoil.klass import generic_equality
+from snakeoil import demandload
+demandload.demandload(globals(), 're', 'snakeoil:lists')
+
+# Backwards compatibility.
+value_type = restriction.value_type
+
+try:
+ from pkgcore.restrictions import _restrictions as extension
+except ImportError:
+ extension = None
+
+class base(restriction.base):
+ """Base restriction matching object for values.
+
+ Beware: do not check for instances of this to detect value
+ restrictions! Use the C{type} attribute instead.
+ """
+
+ __slots__ = ()
+
+ type = restriction.value_type
+
+ def force_True(self, pkg, attr, val):
+ return self.match(val)
+
+ def force_False(self, pkg, attr, val):
+ return not self.match(val)
+
+
+def reflective_hash(self):
+ return self._hash
+
+class hashed_base(base):
+
+ __slots__ = ("_hash")
+ __hash__ = reflective_hash
+
+
+class GetAttrRestriction(packages.PackageRestriction):
+
+ """Restriction pulling an attribute and applying a child restriction."""
+
+ __slots__ = ()
+ type = restriction.value_type
+
+ # XXX this needs further thought.
+ #
+ # The api for force_{True,False} is a ValueRestriction gets called
+ # with a package instance, the attribute name (string), and the
+ # current attribute value. We cannot really provide a child
+ # restriction with a sensible pkg and a sensible attribute name,
+ # so we just punt and return True/False depending on the current
+ # state without "forcing" anything (default implementation in
+ # "base").
+
+ def force_True(self, pkg, attr, val):
+ return self.match(val)
+
+ def force_False(self, pkg, attr, val):
+ return not self.match(val)
+
+
+class VersionRestriction(base):
+ """use this as base for version restrictions.
+
+ Gives a clue to what the restriction does.
+ """
+ __slots__ = ()
+
+
+class StrRegex(hashed_base):
+
+ """
+ regex based matching
+ """
+
+ __slots__ = ('flags', 'regex', '_matchfunc', 'ismatch', 'negate')
+ __metaclass__ = generic_equality
+ __attr_comparison__ = ('_hash',) + __slots__
+ __inst_caching__ = True
+
+ def __init__(self, regex, case_sensitive=True, match=False, negate=False):
+
+ """
+ @param regex: regex pattern to match
+ @param case_sensitive: should the match be case sensitive?
+ @param match: should C{re.match} be used instead of C{re.search}?
+ @param negate: should the match results be negated?
+ """
+
+ sf = object.__setattr__
+ sf(self, "regex", regex)
+ sf(self, "ismatch", match)
+ sf(self, "negate", negate)
+ flags = 0
+ if not case_sensitive:
+ flags = re.I
+ sf(self, "flags", flags)
+ compiled_re = re.compile(regex, flags)
+ if match:
+ sf(self, "_matchfunc", compiled_re.match)
+ else:
+ sf(self, "_matchfunc", compiled_re.search)
+ sf(self, "_hash", hash((self.regex, self.negate, self.flags, self.ismatch)))
+
+ def match(self, value):
+ if not isinstance(value, basestring):
+ # Be too clever for our own good --marienz
+ if value is None:
+ value = ''
+ else:
+ value = str(value)
+ return (self._matchfunc(value) is not None) != self.negate
+
+ def intersect(self, other):
+ if self == other:
+ return self
+ return None
+
+ def __repr__(self):
+ result = [self.__class__.__name__, repr(self.regex)]
+ if self.negate:
+ result.append('negated')
+ if self.ismatch:
+ result.append('match')
+ else:
+ result.append('search')
+ result.append('@%#8x' % (id(self),))
+ return '<%s>' % (' '.join(result),)
+
+ def __str__(self):
+ if self.ismatch:
+ result = 'match '
+ else:
+ result = 'search '
+ result += self.regex
+ if self.negate:
+ return 'not ' + result
+ return result
+
+
+class native_StrExactMatch(object):
+
+ """
+ exact string comparison match
+ """
+
+ __slots__ = ('_hash', 'exact', 'case_sensitive', 'negate')
+ __metaclass__ = generic_equality
+ __attr_comparison__ = __slots__
+
+ def __init__(self, exact, case_sensitive=True, negate=False):
+
+ """
+ @param exact: exact string to match
+ @param case_sensitive: should the match be case sensitive?
+ @param negate: should the match results be negated?
+ """
+
+ sf = object.__setattr__
+ sf(self, "negate", negate)
+ sf(self, "case_sensitive", case_sensitive)
+ if not case_sensitive:
+ sf(self, "exact", str(exact).lower())
+ else:
+ sf(self, "exact", str(exact))
+ sf(self, "_hash", hash((self.exact, self.negate, self.case_sensitive)))
+
+ def match(self, value):
+ if self.case_sensitive:
+ return (self.exact == value) != self.negate
+ else:
+ return (self.exact == value.lower()) != self.negate
+
+ __hash__ = reflective_hash
+
+if extension is None:
+ base_StrExactMatch = native_StrExactMatch
+else:
+ base_StrExactMatch = extension.StrExactMatch
+
+# these are broken out so that it is easier to
+# generate native/cpy version of the class for
+# testing each.
+def _StrExact_intersect(self, other):
+ s1, s2 = self.exact, other.exact
+ if other.case_sensitive and not self.case_sensitive:
+ s1 = s1.lower()
+ elif self.case_sensitive and not other.case_sensitive:
+ s2 = s2.lower()
+ if s1 == s2 and self.negate == other.negate:
+ if other.case_sensitive:
+ return other
+ return self
+ return None
+
+def _StrExact__repr__(self):
+ if self.negate:
+ string = '<%s %r negated @%#8x>'
+ else:
+ string = '<%s %r @%#8x>'
+ return string % (self.__class__.__name__, self.exact, id(self))
+
+def _StrExact__str__(self):
+ if self.negate:
+ return "!= "+self.exact
+ return "== "+self.exact
+
+class StrExactMatch(base_StrExactMatch, base):
+
+ __slots__ = ()
+ __inst_caching__ = True
+
+ intersect = _StrExact_intersect
+ __repr__ = _StrExact__repr__
+ __str__ = _StrExact__str__
+
+
+class StrGlobMatch(hashed_base):
+
+ """
+ globbing matches; essentially startswith and endswith matches
+ """
+
+ __slots__ = ('glob', 'prefix', 'negate', 'flags')
+ __attr_comparison__ = ('_hash',) + __slots__
+ __metaclass__ = generic_equality
+ __inst_caching__ = True
+
+ def __init__(self, glob, case_sensitive=True, prefix=True, negate=False):
+
+ """
+ @param glob: string chunk that must be matched
+ @param case_sensitive: should the match be case sensitive?
+ @param prefix: should the glob be a prefix check for matching,
+ or postfix matching
+ @param negate: should the match results be negated?
+ """
+
+ sf = object.__setattr__
+ sf(self, "negate", negate)
+ if not case_sensitive:
+ sf(self, "flags", re.I)
+ sf(self, "glob", str(glob).lower())
+ else:
+ sf(self, "flags", 0)
+ sf(self, "glob", str(glob))
+ sf(self, "prefix", prefix)
+ sf(self, "_hash", hash((self.glob, self.negate, self.flags, self.prefix)))
+
+ def match(self, value):
+ value = str(value)
+ if self.flags == re.I:
+ value = value.lower()
+ if self.prefix:
+ f = value.startswith
+ else:
+ f = value.endswith
+ return f(self.glob) ^ self.negate
+
+ def intersect(self, other):
+ if self.match(other.glob):
+ if self.negate == other.negate:
+ return other
+ elif other.match(self.glob):
+ if self.negate == other.negate:
+ return self
+ return None
+
+ def __repr__(self):
+ if self.negate:
+ string = '<%s %r case_sensitive=%r negated @%#8x>'
+ else:
+ string = '<%s %r case_sensitive=%r @%#8x>'
+ if self.prefix:
+ g = self.glob + ".*"
+ else:
+ g = ".*" + self.glob
+ return string % (self.__class__.__name__, g,
+ self.flags == re.I and True or False,
+ id(self))
+
+ def __str__(self):
+ s = ''
+ if self.negate:
+ s = 'not '
+ if self.prefix:
+ return "%s%s*" % (s, self.glob)
+ return "%s*%s" % (s, self.glob)
+
+
+def EqualityMatch(val, negate=False):
+ """
+ equality test wrapping L{ComparisonMatch}
+ """
+ return ComparisonMatch(cmp, val, [0], negate=negate)
+
+def _mangle_cmp_val(val):
+ if val < 0:
+ return -1
+ elif val > 0:
+ return 1
+ return 0
+
+
+class ComparisonMatch(hashed_base):
+ """Match if the comparison funcs return value is what's required."""
+
+ _op_converter = {"=": (0,)}
+ _rev_op_converter = {(0,): "="}
+
+ for k, v in (("<", (-1,)), (">", (1,))):
+ _op_converter[k] = v
+ _op_converter[k+"="] = tuple(sorted(v + (0,)))
+ _rev_op_converter[v] = k
+ _rev_op_converter[tuple(sorted(v+(0,)))] = k+"="
+ _op_converter["!="] = _op_converter["<>"] = (-1, 1)
+ _rev_op_converter[(-1, 1)] = "!="
+ del k, v
+
+ __slots__ = ('cmp_func', 'data', 'matching_vals')
+ __metaclass__ = generic_equality
+ __attr_comparison__ = __slots__
+
+ @classmethod
+ def convert_str_op(cls, op_str):
+ return cls._op_converter[op_str]
+
+ @classmethod
+ def convert_op_str(cls, op):
+ return cls._rev_op_converter[tuple(sorted(op))]
+
+ def __init__(self, cmp_func, data, matching_vals, negate=False):
+
+ """
+ @param cmp_func: comparison function that compares data against what
+ is passed in during match
+ @param data: data to base comparison against
+ @param matching_vals: sequence, composed of
+ [-1 (less then), 0 (equal), and 1 (greater then)].
+ If you specify [-1,0], you're saying
+ "result must be less then or equal to".
+ @param negate: should the results be negated?
+ """
+
+ sf = object.__setattr__
+ sf(self, "cmp_func", cmp_func)
+
+ if not isinstance(matching_vals, (tuple, list)):
+ if isinstance(matching_vals, basestring):
+ matching_vals = self.convert_str_op(matching_vals)
+ elif isinstance(matching_vals, int):
+ matching_vals = [matching_vals]
+ else:
+ raise TypeError("matching_vals must be a list/tuple")
+
+ sf(self, "data", data)
+ if negate:
+ sf(self, "matching_vals",
+ tuple(set([-1, 0, 1]).difference(_mangle_cmp_val(x)
+ for x in matching_vals)))
+ else:
+ sf(self, "matching_vals",
+ tuple(_mangle_cmp_val(x) for x in matching_vals))
+
+ def __hash__(self):
+ return hash((self.cmp_func, self.matching_vals, self.data))
+
+ def match(self, actual_val):
+ return _mangle_cmp_val(
+ self.cmp_func(actual_val, self.data)) in self.matching_vals
+
+ def __repr__(self):
+ return '<%s %s %r @%#8x>' % (
+ self.__class__.__name__, self.convert_op_str(self.matching_vals),
+ self.data, id(self))
+
+ def __str__(self):
+ return "%s %s" % (self.convert_op_str(self.matching_vals), self.data)
+
+
+class ContainmentMatch(hashed_base):
+
+ """used for an 'in' style operation, 'x86' in ['x86','~x86'] for example
+ note that negation of this *does* not result in a true NAND when all is on.
+ """
+
+ __slots__ = ('vals', 'all', 'negate')
+ __metaclass__ = generic_equality
+ __attr_comparison__ = ('_hash',) + __slots__
+ __inst_caching__ = True
+
+ def __init__(self, *vals, **kwds):
+
+ """
+ @param vals: what values to look for during match
+ @keyword all: must all vals be present, or just one for a match
+ to succeed?
+ @keyword negate: should the match results be negated?
+ """
+
+ sf = object.__setattr__
+ sf(self, "all", bool(kwds.pop("all", False)))
+
+ # note that we're discarding any specialized __getitem__ on vals here.
+ # this isn't optimal, and should be special cased for known
+ # types (lists/tuples fex)
+ sf(self, "vals", frozenset(vals))
+ sf(self, "negate", kwds.get("negate", False))
+ sf(self, "_hash", hash((self.all, self.negate, self.vals)))
+
+ def match(self, val):
+ if isinstance(val, basestring):
+ for fval in self.vals:
+ if fval in val:
+ return not self.negate
+ return self.negate
+
+ # this can, and should be optimized to do len checks- iterate
+ # over the smaller of the two see above about special casing
+ # bits. need the same protection here, on the offchance (as
+ # contents sets do), the __getitem__ is non standard.
+ try:
+ if self.all:
+ i = iter(val)
+ return bool(self.vals.difference(i)) == self.negate
+ for x in self.vals:
+ if x in val:
+ return not self.negate
+ return self.negate
+ except TypeError:
+ # other way around. rely on contains.
+ if self.all:
+ for k in self.vals:
+ if k not in val:
+ return self.negate
+ return not self.negate
+ for k in self.vals:
+ if k in val:
+ return not self.negate
+
+
+ def force_False(self, pkg, attr, val):
+
+ # "More than one statement on a single line"
+ # pylint: disable-msg=C0321
+
+ # XXX pretty much positive this isn't working.
+ if isinstance(val, basestring) or not getattr(pkg, 'configurable',
+ False):
+ # unchangable
+ return not self.match(val)
+
+ if self.negate:
+ if self.all:
+ def filter(truths):
+ return False in truths
+ def true(r, pvals):
+ return pkg.request_enable(attr, r)
+ def false(r, pvals):
+ return pkg.request_disable(attr, r)
+
+ truths = [x in val for x in self.vals]
+
+ for x in boolean.iterative_quad_toggling(
+ pkg, None, list(self.vals), 0, len(self.vals), truths,
+ filter, desired_false=false, desired_true=true):
+ return True
+ else:
+ if pkg.request_disable(attr, *self.vals):
+ return True
+ return False
+
+ if not self.all:
+ if pkg.request_disable(attr, *self.vals):
+ return True
+ else:
+ l = len(self.vals)
+ def filter(truths): return truths.count(True) < l
+ def true(r, pvals): return pkg.request_enable(attr, r)
+ def false(r, pvals): return pkg.request_disable(attr, r)
+ truths = [x in val for x in self.vals]
+ for x in boolean.iterative_quad_toggling(
+ pkg, None, list(self.vals), 0, l, truths, filter,
+ desired_false=false, desired_true=true):
+ return True
+ return False
+
+
+ def force_True(self, pkg, attr, val):
+
+ # "More than one statement on a single line"
+ # pylint: disable-msg=C0321
+
+ # XXX pretty much positive this isn't working.
+
+ if isinstance(val, basestring) or not getattr(pkg, 'configurable',
+ False):
+ # unchangable
+ return self.match(val)
+
+ if not self.negate:
+ if not self.all:
+ def filter(truths):
+ return True in truths
+ def true(r, pvals):
+ return pkg.request_enable(attr, r)
+ def false(r, pvals):
+ return pkg.request_disable(attr, r)
+
+ truths = [x in val for x in self.vals]
+
+ for x in boolean.iterative_quad_toggling(
+ pkg, None, list(self.vals), 0, len(self.vals), truths,
+ filter, desired_false=false, desired_true=true):
+ return True
+ else:
+ if pkg.request_enable(attr, *self.vals):
+ return True
+ return False
+
+ # negation
+ if not self.all:
+ if pkg.request_disable(attr, *self.vals):
+ return True
+ else:
+ def filter(truths): return True not in truths
+ def true(r, pvals): return pkg.request_enable(attr, r)
+ def false(r, pvals): return pkg.request_disable(attr, r)
+ truths = [x in val for x in self.vals]
+ for x in boolean.iterative_quad_toggling(
+ pkg, None, list(self.vals), 0, len(self.vals), truths, filter,
+ desired_false=false, desired_true=true):
+ return True
+ return False
+
+ def __repr__(self):
+ if self.negate:
+ string = '<%s %r all=%s negated @%#8x>'
+ else:
+ string = '<%s %r all=%s @%#8x>'
+ return string % (
+ self.__class__.__name__, tuple(self.vals), self.all, id(self))
+
+ def __str__(self):
+ if self.negate:
+ s = "not contains [%s]"
+ else:
+ s = "contains [%s]"
+ return s % ', '.join(map(str, self.vals))
+
+
+class FlatteningRestriction(hashed_base):
+
+ """Flatten the values passed in and apply the nested restriction."""
+
+ __slots__ = ('dont_iter', 'restriction', 'negate')
+ __hash__ = object.__hash__
+
+ def __init__(self, dont_iter, childrestriction, negate=False):
+ """Initialize.
+
+ @type dont_iter: type or tuple of types
+ @param dont_iter: type(s) not to flatten.
+ Passed to L{snakeoil.lists.iflatten_instance}.
+ @type childrestriction: restriction
+ @param childrestriction: restriction applied to the flattened list.
+ """
+ object.__setattr__(self, "negate", negate)
+ object.__setattr__(self, "dont_iter", dont_iter)
+ object.__setattr__(self, "restriction", childrestriction)
+
+ def match(self, val):
+ return self.restriction.match(
+ lists.iflatten_instance(val, self.dont_iter)) != self.negate
+
+ def __str__(self):
+ return 'flattening_restriction: dont_iter = %s, restriction = %s' % (
+ self.dont_iter, self.restriction)
+
+ def __repr__(self):
+ return '<%s restriction=%r dont_iter=%r negate=%r @%#8x>' % (
+ self.__class__.__name__,
+ self.restriction, self.dont_iter, self.negate,
+ id(self))
+
+
+class FunctionRestriction(hashed_base):
+
+ """Convenience class for creating special restrictions."""
+
+ __slots__ = ('func', 'negate')
+
+ __hash__ = object.__hash__
+
+ def __init__(self, func, negate=False):
+ """Initialize.
+
+ C{func} is used as match function.
+
+ It will usually be impossible for the backend to optimize this
+ restriction. So even though you can implement an arbitrary
+ restriction using this class you should only use it if it is
+ very unlikely backend-specific optimizations will be possible.
+ """
+ object.__setattr__(self, 'negate', negate)
+ object.__setattr__(self, 'func', func)
+
+ def match(self, val):
+ return self.func(val) != self.negate
+
+ def __repr__(self):
+ return '<%s func=%r negate=%r @%#8x>' % (
+ self.__class__.__name__, self.func, self.negate, id(self))
+
+
+class StrConversion(hashed_base):
+
+ """convert passed in data to a str object"""
+
+ __hash__ = object.__hash__
+ __slots__ = ('restrict',)
+
+ def __init__(self, restrict):
+ object.__setattr__(self, "restrict", restrict)
+
+ def match(self, val):
+ return self.restrict.match(str(val))
+
+
+class UnicodeConversion(StrConversion):
+
+ """convert passed in data to a unicode obj"""
+
+ def match(self, val):
+ return self.restrict.match(unicode(val))
+
+
+class AnyMatch(restriction.AnyMatch):
+
+ __slots__ = ()
+
+ __hash__ = object.__hash__
+
+ def __init__(self, childrestriction, negate=False):
+ # Hack: skip calling base.__init__. Doing this would make
+ # restriction.base.__init__ run twice.
+ restriction.AnyMatch.__init__(
+ self, childrestriction, restriction.value_type, negate=negate)
+
+ def force_True(self, pkg, attr, val):
+ return self.match(val)
+
+ def force_False(self, pkg, attr, val):
+ return not self.match(val)
+
+
+# "Invalid name" (pylint uses the module const regexp, not the class regexp)
+# pylint: disable-msg=C0103
+
+AndRestriction = restriction.curry_node_type(boolean.AndRestriction,
+ restriction.value_type)
+OrRestriction = restriction.curry_node_type(boolean.OrRestriction,
+ restriction.value_type)
+
+AlwaysBool = restriction.curry_node_type(restriction.AlwaysBool,
+ restriction.value_type)
+
+AlwaysTrue = AlwaysBool(negate=True)
+AlwaysFalse = AlwaysBool(negate=False)
diff --git a/pkgcore/scripts/__init__.py b/pkgcore/scripts/__init__.py
new file mode 100644
index 0000000..089357c
--- /dev/null
+++ b/pkgcore/scripts/__init__.py
@@ -0,0 +1,16 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""Commandline scripts.
+
+Modules in here are accessible through the pwrapper script. They
+should have an C{OptionParser} attribute that is a
+L{snakeoil.commandline.OptionParser} subclass and a C{main}
+attribute that is a function usable with
+L{snakeoil.commandline.main}.
+
+The goal of this is avoiding boilerplate and making sure the scripts
+have a similar look and feel. If your script needs to do something
+L{snakeoil.commandline} does not support please improve it instead
+of bypassing it.
+"""
diff --git a/pkgcore/scripts/filter_env.py b/pkgcore/scripts/filter_env.py
new file mode 100644
index 0000000..d83b333
--- /dev/null
+++ b/pkgcore/scripts/filter_env.py
@@ -0,0 +1,95 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""Commandline interface to L{pkgcore.ebuild.filter_env}."""
+
+
+import sys
+
+from pkgcore.util import commandline
+# ordering here matters; commandline does a trick to copy to avoid the heavy inspect load.
+import optparse
+from pkgcore.ebuild import filter_env
+from pkgcore.log import logger
+
+
+def input_callback(option, opt_str, value, parser):
+ if parser.values.input is not None:
+ raise optparse.OptionValueError('-i cannot be specified twice')
+ try:
+ parser.values.input = open(value, 'r')
+ except (IOError, OSError), e:
+ raise optparse.OptionValueError('error opening %r (%s)' % (value, e))
+
+
+def append_comma_separated(option, opt_str, value, parser):
+ parser.values.ensure_value(option.dest, []).extend(
+ v for v in value.split(',') if v)
+
+
+class OptionParser(commandline.OptionParser):
+
+ def __init__(self, **kwargs):
+ commandline.OptionParser.__init__(self, **kwargs)
+ self.add_option(
+ '-V', '--var-match', action='store_false', default=True)
+ self.add_option(
+ '-F', '--func-match', action='store_false', default=True)
+ self.add_option(
+ '--input', '-i', action='callback', type='string',
+ callback=input_callback,
+ help='Filename to read the env from (uses stdin if omitted).')
+ self.add_option(
+ '--funcs', '-f', action='callback', type='string',
+ callback=append_comma_separated)
+ self.add_option(
+ '--vars', '-v', action='callback', type='string',
+ callback=append_comma_separated)
+
+ def check_values(self, values, args):
+ values, args = commandline.OptionParser.check_values(
+ self, values, args)
+
+ if values.input is None:
+ # Hack: use stdin if it is not a tty. No util.commandline
+ # support for this kind of thing, so mess around with sys
+ # directly.
+ if sys.stdin.isatty():
+ self.error('No input file supplied (and stdin is a tty).')
+ values.input = sys.stdin
+
+ return values, args
+
+
+def main(options, out, err):
+ if options.debug:
+ if options.funcs is None:
+ logger.debug('=== Funcs: None')
+ else:
+ logger.debug('=== Funcs:')
+ for thing in options.funcs:
+ logger.debug(repr(thing))
+ if options.vars is None:
+ logger.debug('=== Vars: None')
+ else:
+ logger.debug('=== Vars:')
+ for thing in options.vars:
+ logger.debug(repr(thing))
+ logger.debug('var_match: %r, func_match: %r',
+ options.var_match, options.func_match)
+
+ if options.funcs:
+ funcs = filter_env.build_regex_string(options.funcs)
+ else:
+ funcs = None
+
+ if options.vars:
+ vars = filter_env.build_regex_string(options.vars)
+ else:
+ vars = None
+
+ file_buff = options.input.read() + '\0'
+
+ # Hack: write to the stream directly.
+ filter_env.run(out.stream, file_buff, vars, funcs,
+ options.var_match, options.func_match)
diff --git a/pkgcore/scripts/pclone_cache.py b/pkgcore/scripts/pclone_cache.py
new file mode 100644
index 0000000..b55269e
--- /dev/null
+++ b/pkgcore/scripts/pclone_cache.py
@@ -0,0 +1,73 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""Clone a repository cache."""
+
+
+import time
+
+from pkgcore.util import commandline
+
+
+class OptionParser(commandline.OptionParser):
+
+ def __init__(self, **kwargs):
+ commandline.OptionParser.__init__(
+ self, description=__doc__, usage='%prog [options] source target',
+ **kwargs)
+ self.add_option('--verbose', '-v', action='store_true',
+ help='print keys as they are processed')
+
+ def check_values(self, values, args):
+ values, args = commandline.OptionParser.check_values(
+ self, values, args)
+ if len(args) != 2:
+ self.error(
+ 'Need two arguments: cache label to read from and '
+ 'cache label to write to.')
+
+ config = values.config
+ try:
+ values.source = config.cache[args[0]]
+ except KeyError:
+ self.error("read cache label '%s' isn't defined." % (args[0],))
+ try:
+ values.target = config.cache[args[1]]
+ except KeyError:
+ self.error("write cache label '%s' isn't defined." % (args[1],))
+
+ if values.target.readonly:
+ self.error("can't update cache label '%s', it's marked readonly." %
+ (args[1],))
+
+ return values, ()
+
+
+def main(options, out, err):
+ source, target = options.source, options.target
+ if not target.autocommits:
+ target.sync_rate = 1000
+ if options.verbose:
+ out.write("grabbing target's existing keys")
+ valid = set()
+ start = time.time()
+ if options.verbose:
+ for k, v in source.iteritems():
+ out.write("updating %s" % (k,))
+ target[k] = v
+ valid.add(k)
+ else:
+ for k, v in source.iteritems():
+ target[k] = v
+ valid.add(k)
+
+ for x in target.iterkeys():
+ if not x in valid:
+ if options.verbose:
+ out.write("deleting %s" % (x,))
+ del target[x]
+
+ if options.verbose:
+ out.write("took %i seconds" % int(time.time() - start))
diff --git a/pkgcore/scripts/pconfig.py b/pkgcore/scripts/pconfig.py
new file mode 100644
index 0000000..06c06f5
--- /dev/null
+++ b/pkgcore/scripts/pconfig.py
@@ -0,0 +1,317 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""Configuration querying utility."""
+
+
+import traceback
+
+from pkgcore.config import errors, basics
+from pkgcore.plugin import get_plugins
+from pkgcore.util import commandline
+from snakeoil import modules
+
+class DescribeClassParser(commandline.OptionParser):
+
+ """Our option parser."""
+
+ def check_values(self, values, args):
+ values, args = commandline.OptionParser.check_values(
+ self, values, args)
+ if len(args) != 1:
+ self.error('need exactly one argument: class to describe.')
+ try:
+ values.describe_class = modules.load_attribute(args[0])
+ except modules.FailedImport, e:
+ self.error(str(e))
+ return values, ()
+
+
+def dump_section(config, out):
+ out.first_prefix.append(' ')
+ out.write('# typename of this section: %s' % (config.type.name,))
+ out.write('class %s.%s;' % (config.type.callable.__module__,
+ config.type.callable.__name__))
+ if config.default:
+ out.write('default true;')
+ for key, val in sorted(config.config.iteritems()):
+ typename = config.type.types.get(key)
+ if typename is None:
+ if config.type.allow_unknowns:
+ typename = 'str'
+ else:
+ raise ValueError('no type set for %s (%r)' % (key, val))
+ out.write('# type: %s' % (typename,))
+ if typename.startswith('lazy_refs'):
+ typename = typename[5:]
+ val = list(ref.collapse() for ref in val)
+ elif typename.startswith('lazy_ref'):
+ typename = typename[5:]
+ val = val.collapse()
+ if typename == 'str':
+ out.write('%s %r;' % (key, val))
+ elif typename == 'bool':
+ out.write('%s %s;' % (key, bool(val)))
+ elif typename == 'list':
+ out.write('%s %s;' % (
+ key, ' '.join(repr(string) for string in val)))
+ elif typename == 'callable':
+ out.write('%s %s.%s;' % (key, val.__module__, val.__name__))
+ elif typename.startswith('ref:'):
+ if val.name is None:
+ out.write('%s {' % (key,))
+ dump_section(val, out)
+ out.write('};')
+ else:
+ out.write('%s %r;' % (key, val.name))
+ elif typename.startswith('refs:'):
+ out.autoline = False
+ out.write('%s' % (key,))
+ for i, subconf in enumerate(val):
+ if subconf.name is None:
+ out.autoline = True
+ out.write(' {')
+ dump_section(subconf, out)
+ out.autoline = False
+ out.write('}')
+ else:
+ out.write(' %r' % (subconf.name,))
+ out.autoline = True
+ out.write(';')
+ else:
+ out.write('# %s = %r of unknown type %s' % (key, val, typename))
+ out.first_prefix.pop()
+
+
+def get_classes(configs):
+ # Not particularly efficient (doesn't memoize already visited configs)
+ classes = set()
+ for config in configs:
+ classes.add('%s.%s' % (config.type.callable.__module__,
+ config.type.callable.__name__))
+ for key, val in config.config.iteritems():
+ typename = config.type.types.get(key)
+ if typename is None:
+ continue
+ if typename.startswith('ref:'):
+ classes.update(get_classes((val,)))
+ elif typename.startswith('refs:'):
+ classes.update(get_classes(val))
+ elif typename.startswith('lazy_refs'):
+ classes.update(get_classes(c.collapse() for c in val))
+ elif typename.startswith('lazy_ref'):
+ classes.update(get_classes((val.collapse(),)))
+ return classes
+
+def classes_main(options, out, err):
+ """List all classes referenced by the config."""
+ configmanager = options.config
+ sections = []
+ for name in configmanager.sections():
+ try:
+ sections.append(configmanager.collapse_named_section(name))
+ except errors.CollapseInheritOnly:
+ pass
+ for classname in sorted(get_classes(sections)):
+ out.write(classname)
+
+
+def write_type(out, type_obj):
+ out.write('typename is %s' % (type_obj.name,))
+ if type_obj.doc:
+ for line in type_obj.doc.split('\n'):
+ out.write(line.strip(), wrap=True)
+ if type_obj.allow_unknowns:
+ out.write('values not listed are handled as strings')
+ out.write()
+ for name, typename in sorted(type_obj.types.iteritems()):
+ out.write('%s: %s' % (name, typename), autoline=False)
+ if name in type_obj.required:
+ out.write(' (required)', autoline=False)
+ out.write()
+
+
+def describe_class_main(options, out, err):
+ """Describe the arguments a class needs."""
+ try:
+ type_obj = basics.ConfigType(options.describe_class)
+ except errors.TypeDefinitionError:
+ err.write('Not a valid type!')
+ return 1
+ write_type(out, type_obj)
+
+
+def uncollapsable_main(options, out, err):
+ """Show things that could not be collapsed."""
+ config = options.config
+ for name in config.sections():
+ try:
+ config.collapse_named_section(name)
+ except errors.CollapseInheritOnly:
+ pass
+ except errors.ConfigurationError, e:
+ if options.debug:
+ traceback.print_exc()
+ else:
+ out.write(str(e))
+ out.write()
+
+
+class _TypeNameParser(commandline.OptionParser):
+
+ """Base for subcommands that take an optional type name."""
+
+ def check_values(self, values, args):
+ values, args = commandline.OptionParser.check_values(self, values,
+ args)
+ if len(args) > 1:
+ self.error('pass at most one typename')
+ if args:
+ values.typename = args[0]
+ else:
+ values.typename = None
+ return values, ()
+
+
+class DumpParser(_TypeNameParser):
+
+ def __init__(self, **kwargs):
+ # Make sure we do not pass two description kwargs if kwargs has one.
+ kwargs['description'] = (
+ 'Dump the entire configuration. '
+ 'The format used is similar to the ini-like default '
+ 'format, but do not rely on this to always write a '
+ 'loadable config. There may be quoting issues. '
+ 'With a typename argument only that type is dumped.')
+ kwargs['usage'] = '%prog [options] [typename]'
+ _TypeNameParser.__init__(self, **kwargs)
+
+
+def dump_main(options, out, err):
+ """Dump the entire configuration."""
+ config = options.config
+ if options.typename is None:
+ names = config.sections()
+ else:
+ names = getattr(config, options.typename).iterkeys()
+ for name in sorted(names):
+ try:
+ section = config.collapse_named_section(name)
+ except errors.CollapseInheritOnly:
+ continue
+ out.write('%r {' % (name,))
+ dump_section(section, out)
+ out.write('}')
+ out.write()
+
+
+class ConfigurablesParser(_TypeNameParser):
+
+ def __init__(self, **kwargs):
+ # Make sure we do not pass two description kwargs if kwargs has one.
+ kwargs['description'] = (
+ 'List registered configurables (may not be complete). '
+ 'With a typename argument only configurables of that type are '
+ 'listed.')
+ kwargs['usage'] = '%prog [options] [typename]'
+ _TypeNameParser.__init__(self, **kwargs)
+
+
+def configurables_main(options, out, err):
+ """List registered configurables."""
+ for configurable in get_plugins('configurable'):
+ type_obj = basics.ConfigType(configurable)
+ if options.typename is not None and type_obj.name != options.typename:
+ continue
+ out.write(out.bold, '%s.%s' % (
+ configurable.__module__, configurable.__name__))
+ write_type(out, type_obj)
+ out.write()
+ out.write()
+
+
+def _dump_uncollapsed_section(config, out, err, section):
+ """Write a single section."""
+ if isinstance(section, basestring):
+ out.write('named section %r' % (section,))
+ return
+ for key in sorted(section.keys()):
+ kind, value = section.get_value(config, key, 'repr')
+ out.write('# type: %s' % (kind,))
+ if kind == 'list':
+ for name, val in zip((
+ key + '.prepend', key, key + '.append'), value):
+ if val:
+ out.write(
+ repr(name), ' = ', ' '.join(repr(v) for v in val))
+ continue
+ if kind in ('refs', 'str'):
+ for name, val in zip((
+ key + '.prepend', key, key + '.append'), value):
+ if not val:
+ continue
+ out.write(repr(name), ' = ', autoline=False)
+ if kind == 'str':
+ out.write(repr(val))
+ else:
+ out.write()
+ out.first_prefix.append(' ')
+ try:
+ for subnr, subsection in enumerate(val):
+ subname = 'nested section %s' % (subnr + 1,)
+ out.write(subname)
+ out.write('=' * len(subname))
+ _dump_uncollapsed_section(config, out, err, subsection)
+ out.write()
+ finally:
+ out.first_prefix.pop()
+ continue
+ out.write('%r = ' % (key,), autoline=False)
+ if kind == 'callable':
+ out.write(value.__module__, value.__name__)
+ elif kind == 'bool':
+ out.write(str(value))
+ elif kind == 'ref':
+ out.first_prefix.append(' ')
+ try:
+ out.write()
+ _dump_uncollapsed_section(config, out, err, value)
+ finally:
+ out.first_prefix.pop()
+ else:
+ err.error('unsupported type %r' % (kind,))
+
+
+def dump_uncollapsed_main(options, out, err):
+ """dump the configuration in a raw, uncollapsed form.
+ Not directly usable as a configuration file, mainly used for inspection
+ """
+ out.write('''# Warning:
+# Do not copy this output to a configuration file directly,
+# because the types you see here are only guesses.
+# A value used as "list" in the collapsed config will often
+# show up as "string" here and may need to be converted
+# (for example from space-separated to comma-separated)
+# to work in a config file with a different format.
+''')
+ for i, source in enumerate(options.config.configs):
+ s = 'Source %s' % (i + 1,)
+ out.write(out.bold, '*' * len(s))
+ out.write(out.bold, s)
+ out.write(out.bold, '*' * len(s))
+ out.write()
+ for name, section in sorted(source.iteritems()):
+ out.write('%s' % (name,))
+ out.write('=' * len(name))
+ _dump_uncollapsed_section(options.config, out, err, section)
+ out.write()
+
+
+commandline_commands = {
+ 'dump': (DumpParser, dump_main),
+ 'classes': (commandline.OptionParser, classes_main),
+ 'uncollapsable': (commandline.OptionParser, uncollapsable_main),
+ 'describe_class': (DescribeClassParser, describe_class_main),
+ 'configurables': (ConfigurablesParser, configurables_main),
+ 'dump-uncollapsed': (commandline.OptionParser, dump_uncollapsed_main),
+ }
diff --git a/pkgcore/scripts/pebuild.py b/pkgcore/scripts/pebuild.py
new file mode 100644
index 0000000..4f40078
--- /dev/null
+++ b/pkgcore/scripts/pebuild.py
@@ -0,0 +1,57 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""Low-level ebuild operations."""
+
+
+from pkgcore.util import commandline
+from pkgcore.ebuild import atom, errors
+
+
+class OptionParser(commandline.OptionParser):
+
+ def __init__(self, **kwargs):
+ commandline.OptionParser.__init__(
+ self, description=__doc__, usage='%prog [options] atom phases',
+ **kwargs)
+ self.add_option("--no-auto", action='store_true', default=False,
+ help="run just the specified phases. may explode.")
+
+ def check_values(self, values, args):
+ values, args = commandline.OptionParser.check_values(
+ self, values, args)
+ if len(args) < 2:
+ self.error('Specify an atom and at least one phase.')
+ try:
+ values.atom = atom.atom(args[0])
+ except errors.MalformedAtom, e:
+ self.error(str(e))
+ values.phases = args[1:]
+ return values, ()
+
+def main(options, out, err):
+ pkgs = options.config.get_default('domain').all_repos.match(options.atom)
+ if not pkgs:
+ err.write('got no matches for %s\n' % (options.atom,))
+ return 1
+ if len(pkgs) > 1:
+ err.write('got multiple matches for %s: %s\n' % (options.atom, pkgs))
+ return 1
+ # pull clean out.
+ l = list(x for x in options.phases if x != "clean")
+ clean = len(l) != len(options.phases)
+ if clean:
+ options.phases = l
+ kwds = {}
+ if options.no_auto:
+ kwds["ignore_deps"] = True
+ if "setup" in l:
+ options.phases.insert(0, "fetch")
+ build = pkgs[0].build(clean=clean)
+ phase_funcs = list(getattr(build, x) for x in options.phases)
+ for phase, f in zip(options.phases, phase_funcs):
+ out.write()
+ out.write('executing phase %s' % (phase,))
+ f(**kwds)
diff --git a/pkgcore/scripts/pmaint.py b/pkgcore/scripts/pmaint.py
new file mode 100644
index 0000000..d61bc43
--- /dev/null
+++ b/pkgcore/scripts/pmaint.py
@@ -0,0 +1,393 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""
+repository maintainence
+"""
+
+__all__ = ('CopyParser', 'DigestParser', 'RegenParser', 'SyncParser')
+
+from pkgcore.util.commandline import convert_to_restrict, OptionParser
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'errno',
+ 'threading:Event',
+ 'threading:Thread',
+ 'Queue:Queue,Empty',
+ 'time:time,sleep',
+ 'snakeoil.osutils:pjoin',
+ 'pkgcore.repository:multiplex',
+ 'pkgcore.package:mutated',
+ 'pkgcore.fs:contents,livefs',
+ 'pkgcore.ebuild:atom,errors,digest',
+ 'pkgcore.restrictions.boolean:OrRestriction',
+)
+
+commandline_commands = {}
+
+def format_seq(seq, formatter=repr):
+ if not seq:
+ seq = None
+ elif len(seq) == 1:
+ seq = seq[0]
+ else:
+ seq = tuple(sorted(str(x) for x in seq))
+ return formatter(seq)
+
+
+class SyncParser(OptionParser):
+
+ def __init__(self, **kwargs):
+ OptionParser.__init__(self, description=
+ "update a local repository to match its parent", **kwargs)
+ self.add_option("--force", action='store_true', default=False,
+ help="force an action")
+
+ def check_values(self, values, args):
+ values, args = OptionParser.check_values(
+ self, values, args)
+
+ if not args:
+ values.repos = values.config.repo.keys()
+ else:
+ for x in args:
+ if x not in values.config.repo:
+ self.error("repo %r doesn't exist:\nvalid repos %r" %
+ (x, values.config.repo.keys()))
+ values.repos = args
+ return values, []
+
+def sync_main(options, out, err):
+ """Update a local repositories to match their remote parent"""
+ config = options.config
+ succeeded, failed = [], []
+ seen = set()
+ for x in options.repos:
+ r = config.repo[x]
+ if r in seen:
+ continue
+ seen.add(r)
+ if not r.syncable:
+ continue
+ out.write("*** syncing %r..." % x)
+ if not r.sync(force=options.force):
+ out.write("*** failed syncing %r" % x)
+ failed.append(x)
+ else:
+ succeeded.append(x)
+ out.write("*** synced %r" % x)
+ if len(succeeded) + len(failed) > 1:
+ out.write("*** synced %s" % format_seq(sorted(succeeded)))
+ if failed:
+ err.write("!!! failed sync'ing %s" % format_seq(sorted(failed)))
+ if failed:
+ return 1
+ return 0
+
+commandline_commands['sync'] = (SyncParser, sync_main)
+
+
+class CopyParser(OptionParser):
+
+ def __init__(self, **kwargs):
+ OptionParser.__init__(self, description=
+ "copy built pkg(s) into a repository", **kwargs)
+ self.add_option("-s", "--source-repo",
+ help="copy from just the specified repository; else defaults "
+ "to finding any match")
+ self.add_option("-t", "--target-repo", default=None,
+ help="repository to copy packages into; if specified, "
+ "you don't need to specify the target repo as the last arg. "
+ "Mainly useful for xargs invocations")
+ self.add_option("--ignore-existing", "-i", default=False,
+ action='store_true',
+ help="skip existing pkgs, instead of treating it as an overwrite "
+ "error")
+ self.add_option("--copy-missing", action="store_true", default=False,
+ help="Copy packages missing in target repo from source repo")
+ self.add_option("--force", action='store_true', default=False,
+ help="try and force the copy if the target repository is marked as "
+ "immutable")
+
+ def check_values(self, values, args):
+ l = len(args)
+ if not values.target_repo and l < 2:
+ self.error("target_report wasn't specified- specify it either as "
+ "the last arguement, or via --target-repo")
+
+ if values.target_repo is not None:
+ target_repo = values.target_repo
+ else:
+ target_repo = args.pop(-1)
+
+ try:
+ values.target_repo = values.config.repo[target_repo]
+ except KeyError:
+ self.error("target repo %r was not found, known repos-\n%s" %
+ (target_repo, format_seq(values.config.repo.keys())))
+
+ if values.target_repo.frozen and not values.force:
+ self.error("target repo %r is frozen; --force is required to "
+ "override this" % target_repo)
+
+ if values.source_repo:
+ try:
+ values.source_repo = values.config.repo[values.source_repo]
+ except KeyError:
+ self.error("source repo %r was not found, known repos-\n%s" %
+ (values.source_repo, format_seq(values.config.repo.keys())))
+ else:
+ values.source_repo = multiplex.tree(*values.config.repos.values())
+
+ values.candidates = []
+ if values.copy_missing:
+ restrict = OrRestriction(*convert_to_restrict(args))
+ for package in values.source_repo.itermatch(restrict):
+ if not values.target_repo.match(package.versioned_atom):
+ values.candidates.append(package.versioned_atom)
+ else:
+ values.candidates = convert_to_restrict(args)
+
+ return values, []
+
+
+def copy_main(options, out, err):
+ """Copy pkgs between repositories."""
+
+ trg_repo = options.target_repo
+ src_repo = options.source_repo
+
+ failures = False
+ kwds = {'force': options.force}
+
+ for candidate in options.candidates:
+ matches = src_repo.match(candidate)
+ if not matches:
+ err.write("didn't find any matching pkgs for %r" % candidate)
+ failures = True
+ continue
+
+ for src in matches:
+ existing = trg_repo.match(src.versioned_atom)
+ args = []
+ pkg = src
+ if len(existing) > 1:
+ err.write(
+ "skipping %r; tried to replace more then one pkg %r..." %
+ (src, format_seq(existing)))
+ failures = True
+ continue
+ elif len(existing) == 1:
+ if options.ignore_existing:
+ out.write("skipping %s, since %s exists already" %
+ (src, existing[0]))
+ continue
+ out.write("replacing %s with %s... " % (src, existing[0]))
+ op = trg_repo.replace
+ args = existing
+ else:
+ out.write("copying %s... " % src)
+ op = trg_repo.install
+
+ if src.repo.livefs:
+ out.write("forcing regen of contents due to src being livefs..")
+ new_contents = contents.contentsSet(mutable=True)
+ for fsobj in src.contents:
+ try:
+ new_contents.add(livefs.gen_obj(fsobj.location))
+ except OSError, oe:
+ if oe.errno != errno.ENOENT:
+ err.write("failed accessing fs obj %r; %r\n"
+ "aborting this copy" %
+ (fsobj, oe))
+ failures = True
+ new_contents = None
+ break
+ err.write("warning: dropping fs obj %r since it "
+ "doesn't exist" % fsobj)
+ if new_contents is None:
+ continue
+ pkg = mutated.MutatedPkg(src, {'contents':new_contents})
+
+ op = op(*(args + [pkg]), **kwds)
+ op.finish()
+
+ out.write("completed\n")
+ if failures:
+ return 1
+ return 0
+
+commandline_commands['copy'] = (CopyParser, copy_main)
+
+
+class RegenParser(OptionParser):
+
+ def __init__(self, **kwargs):
+ OptionParser.__init__(
+ self, description=__doc__, usage='%prog [options] repo [threads]',
+ **kwargs)
+
+ def check_values(self, values, args):
+ values, args = OptionParser.check_values(
+ self, values, args)
+ if not args:
+ self.error('Need a repository name.')
+ if len(args) > 2:
+ self.error('I do not know what to do with more than 2 arguments')
+
+ if len(args) == 2:
+ try:
+ values.thread_count = int(args[1])
+ except ValueError:
+ self.error('%r should be an integer' % (args[1],))
+ if values.thread_count <= 0:
+ self.error('thread count needs to be at least 1')
+ else:
+ values.thread_count = 1
+
+ try:
+ values.repo = values.config.repo[args[0]]
+ except KeyError:
+ self.error('repo %r was not found! known repos: %s' % (
+ args[0], ', '.join(str(x) for x in values.config.repo)))
+
+ return values, ()
+
+
+def regen_iter(iterable, err):
+ for x in iterable:
+ try:
+ x.keywords
+ except RuntimeError:
+ raise
+ except Exception, e:
+ err.write("caught exception %s for %s" % (e, x))
+
+def reclaim_threads(threads, err):
+ for x in threads:
+ try:
+ x.join()
+ except RuntimeError:
+ raise
+ except Exception, e:
+ err.write("caught exception %s reclaiming thread" % (e,))
+
+def regen_main(options, out, err):
+ """Regenerate a repository cache."""
+ start_time = time()
+ # HACK: store this here so we can assign to it from inside def passthru.
+ options.count = 0
+ if options.thread_count == 1:
+ def passthru(iterable):
+ for x in iterable:
+ options.count += 1
+ yield x
+ regen_iter(passthru(options.repo), err)
+ else:
+ queue = Queue(options.thread_count * 2)
+ kill = Event()
+ kill.clear()
+ def iter_queue(kill, qlist, timeout=0.25):
+ while not kill.isSet():
+ try:
+ yield qlist.get(timeout=timeout)
+ except Empty:
+ continue
+ regen_threads = [
+ Thread(
+ target=regen_iter, args=(iter_queue(kill, queue), err))
+ for x in xrange(options.thread_count)]
+ out.write('starting %d threads' % (options.thread_count,))
+ try:
+ for x in regen_threads:
+ x.start()
+ out.write('started')
+ # now we feed the queue.
+ for pkg in options.repo:
+ options.count += 1
+ queue.put(pkg)
+ except Exception:
+ kill.set()
+ reclaim_threads(regen_threads, err)
+ raise
+
+ # by now, queue is fed. reliable for our uses since the queue
+ # is only subtracted from.
+ while not queue.empty():
+ sleep(.5)
+ kill.set()
+ reclaim_threads(regen_threads, err)
+ assert queue.empty()
+ out.write("finished %d nodes in in %.2f seconds" % (options.count,
+ time() - start_time))
+ return 0
+
+commandline_commands['regen'] = (RegenParser, regen_main)
+
+
+class DigestParser(OptionParser):
+
+ def __init__(self, **kwargs):
+ OptionParser.__init__(
+ self, description="generate digests for given atoms", **kwargs)
+ self.add_option('-t', '--type', type='choice',
+ choices=("manifest1", "manifest2", "both"), default="both",
+ help="type of manifest to generate (defaults to both). "
+ "valid values are: 'manifest1', 'manifest2', 'both'")
+
+ def check_values(self, values, args):
+ values, args = OptionParser.check_values(
+ self, values, args)
+
+ if not args:
+ self.error('Specify a repo')
+ repo = args.pop(0)
+ try:
+ values.repo = values.config.repo[repo]
+ except KeyError:
+ self.error("repo %r was not found, known repos-\n%s" %
+ (repo, format_seq(values.config.repo.keys())))
+
+ if values.type == "both":
+ values.type = ("manifest1", "manifest2")
+ else:
+ values.type = (values.type,)
+
+ if not args:
+ self.error('Specify an atom')
+ values.atoms = []
+ for arg in args:
+ try:
+ values.atoms.append(atom.atom(arg))
+ except errors.MalformedAtom, e:
+ self.error(str(e))
+
+ return values, ()
+
+
+def digest_main(options, out, err):
+ """Write Manifests and digests"""
+
+ for atom in options.atoms:
+ pkgs = options.repo.match(atom)
+ if not pkgs:
+ err.write('No matches for %s\n' % (options.atom,))
+ return 1
+ for pkg in pkgs:
+ if "manifest1" in options.type:
+ if options.debug:
+ out.write('Writing digest for %s:' % pkg.cpvstr)
+ location = pjoin(pkg.repo.location, pkg.key, "files",
+ "digest-%s-%s" % (pkg.versioned_atom.package,
+ pkg.versioned_atom.fullver))
+ digest.serialize_digest(open(location, 'w'), pkg.fetchables)
+ if "manifest2" in options.type:
+ if options.debug:
+ out.write('Writing Manifest for %s:' % pkg.cpvstr)
+ digest.serialize_manifest("%s/%s" %(pkg.repo.location, pkg.key),
+ pkg.fetchables)
+
+# XXX: harring disabled this for 0.3.
+# re-enable it when the bits update manifest.
+#commandline_commands['digest'] = (DigestParser, digest_main)
diff --git a/pkgcore/scripts/pmerge.py b/pkgcore/scripts/pmerge.py
new file mode 100644
index 0000000..8885f1f
--- /dev/null
+++ b/pkgcore/scripts/pmerge.py
@@ -0,0 +1,561 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""Mess with the resolver and vdb."""
+
+from time import time
+
+from pkgcore.util import commandline, parserestrict, repo_utils
+from pkgcore.ebuild import resolver
+from pkgcore.repository import multiplex
+from pkgcore.interfaces import observer, format
+from pkgcore.pkgsets.glsa import KeyedAndRestriction
+from pkgcore.ebuild.atom import atom
+
+from snakeoil import lists
+from snakeoil.formatters import ObserverFormatter
+from snakeoil.compatibility import any
+from pkgcore.resolver.util import reduce_to_failures
+
+class OptionParser(commandline.OptionParser):
+
+ def __init__(self, **kwargs):
+ commandline.OptionParser.__init__(self, description=__doc__, **kwargs)
+ self.add_option('--deep', '-D', action='store_true',
+ help='force the resolver to verify already installed dependencies')
+ self.add_option('--unmerge', '-C', action='store_true',
+ help='unmerge a package')
+ self.add_option('--clean', action='store_true',
+ help='remove installed packages that are not referenced by any '
+ 'target packages/sets; defaults to -s world -s system if no targets'
+ ' are specified. Use with *caution*, this option used incorrectly '
+ 'can render your system unusable. Implies --deep'),
+ self.add_option('--upgrade', '-u', action='store_true',
+ help='try to upgrade already installed packages/depencies')
+ self.add_option('--set', '-s', action='append',
+ help='specify a pkgset to use')
+ self.add_option('--ignore-failures', action='store_true',
+ help='ignore resolution failures')
+ self.add_option('--preload-vdb-state', action='store_true',
+ help=\
+"""enable preloading of the installed packages database
+This causes the resolver to work with a complete graph, thus disallowing
+actions that conflict with installed packages. If disabled, it's possible
+for the requested action to conflict with already installed dependencies
+that aren't involved in the graph of the requested operation""")
+
+ self.add_option('--pretend', '-p', action='store_true',
+ help="do the resolution, but don't merge/fetch anything")
+ self.add_option('--ask', '-a', action='store_true',
+ help="do the resolution, but ask to merge/fetch anything")
+ self.add_option('--fetchonly', '-f', action='store_true',
+ help="do only the fetch steps of the resolved plan")
+ self.add_option('--ignore-cycles', '-i', action='store_true',
+ help=\
+"""ignore cycles if they're found to be unbreakable;
+a depends on b, and b depends on a, with neither built is an example""")
+
+ self.add_option('-B', '--with-built-depends', action='store_true',
+ default=False,
+ help="whether or not to process build depends for pkgs that "
+ "are already built; defaults to ignoring them"),
+ self.add_option('--nodeps', action='store_true',
+ help='disable dependency resolution')
+ self.add_option('--noreplace', action='store_false',
+ dest='replace', default=True,
+ help="don't reinstall target atoms if they're already installed")
+ self.add_option('--usepkg', '-k', action='store_true',
+ help="prefer to use binpkgs")
+ self.add_option('--usepkgonly', '-K', action='store_true',
+ help="use only built packages")
+ self.add_option('--empty', '-e', action='store_true',
+ help="force rebuilding of all involved packages, using installed "
+ "packages only to satisfy building the replacements")
+ self.add_option('--force', action='store_true',
+ dest='force',
+ help="force merging to a repo, regardless of if it's frozen")
+ self.add_option('--oneshot', '-o', '-1', action='store_true',
+ default=False,
+ help="do not record changes in the world file; if a set is "
+ "involved, defaults to forcing oneshot")
+ self.add_option(
+ '--formatter', '-F', action='callback', type='string',
+ callback=commandline.config_callback,
+ callback_args=('pmerge_formatter',),
+ help='which formatter to output --pretend or --ask output through.')
+ self.add_option('--domain', action='callback', type='string',
+ callback=commandline.config_callback, callback_args=('domain',),
+ help='specify which domain to use; else uses the "default" domain')
+
+ def check_values(self, options, args):
+ options, args = commandline.OptionParser.check_values(
+ self, options, args)
+ options.targets = args
+
+ # TODO this is rather boilerplate-ish, the commandline module
+ # should somehow do this for us.
+ if options.formatter is None:
+ options.formatter = options.config.get_default('pmerge_formatter')
+ if options.formatter is None:
+ self.error(
+ 'No default formatter found, fix your configuration '
+ 'or pass --formatter (Valid formatters: %s)' % (
+ ', '.join(options.config.pmerge_formatter),))
+
+ if options.domain is None:
+ options.domain = options.config.get_default('domain')
+ if options.domain is None:
+ self.error(
+ 'No default domain found, fix your configuration or pass '
+ '--domain (valid domains: %s)' %
+ (', '.join(options.config.domain),))
+
+ if options.unmerge:
+ if options.set:
+ self.error("Using sets with -C probably isn't wise, aborting")
+ if options.upgrade:
+ self.error("Cannot upgrade and unmerge simultaneously")
+ if not options.targets:
+ self.error("You must provide at least one atom")
+ if options.clean:
+ self.error("Cannot use -C with --clean")
+ if options.clean:
+ if options.set or options.targets:
+ self.error("--clean currently has set/targets disabled; in "
+ "other words, accepts no args")
+ options.set = ['world', 'system']
+ options.deep = True
+ if options.usepkgonly or options.usepkg:
+ self.error(
+ '--usepkg and --usepkgonly cannot be used with --clean')
+ elif options.usepkgonly and options.usepkg:
+ self.error('--usepkg is redundant when --usepkgonly is used')
+ if options.set:
+ options.replace = False
+ if not options.targets and not options.set:
+ self.error('Need at least one atom/set')
+ return options, ()
+
+class AmbiguousQuery(parserestrict.ParseError):
+ def __init__(self, token, keys):
+ parserestrict.ParseError.__init__(
+ self, '%s: multiple matches (%s)' % (token, ', '.join(keys)))
+ self.token = token
+ self.keys = keys
+
+class NoMatches(parserestrict.ParseError):
+ def __init__(self, token):
+ parserestrict.ParseError.__init__(self, '%s: no matches' % (token,))
+
+def parse_atom(token, repo, return_none=False):
+ """Use L{parserestrict.parse_match} to produce a single atom.
+
+ This matches the restriction against the repo, raises
+ AmbiguousQuery if they belong to multiple cat/pkgs, returns an
+ atom otherwise.
+
+ @param token: string to convert.
+ @param repo: L{pkgcore.repository.prototype.tree} instance to search in.
+ @param return_none: indicates if no matches raises or returns C{None}
+
+ @return: an atom or C{None}.
+ """
+ # XXX this should be in parserestrict in some form, perhaps.
+ restriction = parserestrict.parse_match(token)
+ key_matches = set(x.key for x in repo.itermatch(restriction))
+ if not key_matches:
+ raise NoMatches(token)
+ elif len(key_matches) > 1:
+ raise AmbiguousQuery(token, sorted(key_matches))
+ if isinstance(restriction, atom):
+ # atom is guranteed to be fine, since it's cat/pkg
+ return restriction
+ return KeyedAndRestriction(restriction, key=key_matches.pop())
+
+
+class Failure(ValueError):
+ """Raised internally to indicate an "expected" failure condition."""
+
+
+def unmerge(out, err, vdb, tokens, options, formatter, world_set=None):
+ """Unmerge tokens. hackish, should be rolled back into the resolver"""
+ all_matches = set()
+ for token in tokens:
+ # Catch restrictions matching across more than one category.
+ # Multiple matches in the same category are acceptable.
+
+ # The point is that matching across more than one category is
+ # nearly always unintentional ("pmerge -C spork" without
+ # realising there are sporks in more than one category), but
+ # matching more than one cat/pkg is impossible without
+ # explicit wildcards.
+ restriction = parserestrict.parse_match(token)
+ matches = vdb.match(restriction)
+ if not matches:
+ raise Failure('Nothing matches %s' % (token,))
+ categories = set(pkg.category for pkg in matches)
+ if len(categories) > 1:
+ raise parserestrict.ParseError(
+ '%s is in multiple categories (%s)' % (
+ token, ', '.join(set(pkg.key for pkg in matches))))
+ all_matches.update(matches)
+
+ matches = sorted(all_matches)
+ out.write(out.bold, 'The following packages are to be unmerged:')
+ out.prefix = [out.bold, ' * ', out.reset]
+ for match in matches:
+ out.write(match.cpvstr)
+ out.prefix = []
+
+ repo_obs = observer.file_repo_observer(ObserverFormatter(out))
+
+ if options.pretend:
+ return
+
+ if (options.ask and not
+ formatter.ask("Would you like to unmerge these packages?")):
+ return
+ return do_unmerge(options, out, err, vdb, matches, world_set, repo_obs)
+
+def do_unmerge(options, out, err, vdb, matches, world_set, repo_obs):
+ if vdb.frozen:
+ if options.force:
+ out.write(
+ out.fg('red'), out.bold,
+ 'warning: vdb is frozen, overriding')
+ vdb.frozen = False
+ else:
+ raise Failure('vdb is frozen')
+
+ for idx, match in enumerate(matches):
+ out.write("removing %i of %i: %s" % (idx + 1, len(matches), match))
+ out.title("%i/%i: %s" % (idx + 1, len(matches), match))
+ op = vdb.uninstall(match, observer=repo_obs)
+ ret = op.finish()
+ if not ret:
+ if not options.ignore_failures:
+ raise Failure('failed unmerging %s' % (match,))
+ out.write(out.fg('red'), 'failed unmerging ', match)
+ update_worldset(world_set, match, remove=True)
+ out.write("finished; removed %i packages" % len(matches))
+
+
+def get_pkgset(config, err, setname):
+ try:
+ return config.pkgset[setname]
+ except KeyError:
+ err.write('No set called %r!\nknown sets: %r' %
+ (setname, config.pkgset.keys()))
+ return None
+
+def display_failures(out, sequence, first_level=True):
+ sequence = iter(sequence)
+ frame = sequence.next()
+ if first_level:
+ # pops below need to exactly match.
+ out.first_prefix.extend((out.fg("red"), "!!!", out.reset))
+ out.first_prefix.append(" ")
+ out.write("request %s, mode %s" % (frame.atom, frame.mode))
+ for pkg, steps in sequence:
+ out.write("trying %s" % str(pkg.cpvstr))
+ out.first_prefix.append(" ")
+ for step in steps:
+ if isinstance(step, list):
+ display_failures(out, step, False)
+ elif step[0] == 'reduce':
+ continue
+ elif step[0] == 'blocker':
+ out.write("blocker %s failed due to %s existing" % (step[1],
+ ', '.join(str(x) for x in step[2])))
+ elif step[0] == 'cycle':
+ out.write("%s cycle on %s: %s" % (step[2].mode, step[2].atom, step[3]))
+ elif step[0] == 'viable' and not step[1]:
+ out.write("%s: failed %s" % (step[3], step[4]))
+ else:
+ out.write(step)
+ out.first_prefix.pop()
+ out.first_prefix.pop()
+ if first_level:
+ [out.first_prefix.pop() for x in (1,2,3)]
+
+def update_worldset(world_set, pkg, remove=False):
+ if world_set is None:
+ return
+ if remove:
+ try:
+ world_set.remove(pkg)
+ except KeyError:
+ # nothing to remove, thus skip the flush
+ return
+ else:
+ world_set.add(pkg)
+ world_set.flush()
+
+def main(options, out, err):
+ config = options.config
+ if options.debug:
+ resolver.plan.limiters.add(None)
+
+ domain = options.domain
+ vdb = domain.all_vdbs
+
+ formatter = options.formatter(out=out, err=err,
+ use_expand=domain.use_expand,
+ use_expand_hidden=domain.use_expand_hidden)
+
+ # This mode does not care about sets and packages so bypass all that.
+ if options.unmerge:
+ world_set = None
+ if not options.oneshot:
+ world_set = get_pkgset(config, err, "world")
+ if world_set is None:
+ err.write("Disable world updating via --oneshot, or fix your "
+ "configuration")
+ return 1
+ try:
+ unmerge(
+ out, err, vdb, options.targets, options, formatter, world_set)
+ except (parserestrict.ParseError, Failure), e:
+ out.error(str(e))
+ return 1
+ return
+
+ all_repos = domain.all_repos
+ repos = list(all_repos.trees)
+ if options.usepkgonly or options.usepkg:
+ if options.usepkgonly:
+ repos = [
+ repo for repo in all_repos.trees
+ if getattr(repo, 'format_magic', None) != 'ebuild_src']
+ else:
+ repos = [
+ repo for repo in all_repos.trees
+ if getattr(repo, 'format_magic', None) == 'ebuild_built'] + [
+ repo for repo in all_repos.trees
+ if getattr(repo, 'format_magic', None) != 'ebuild_built']
+ all_repos = multiplex.tree(*repos)
+
+ atoms = []
+ for setname in options.set:
+ pkgset = get_pkgset(config, err, setname)
+ if pkgset is None:
+ return 1
+ l = list(pkgset)
+ if not l:
+ out.write("skipping set %s: set is empty, nothing to update" % setname)
+ else:
+ atoms.extend(l)
+
+ for token in options.targets:
+ try:
+ a = parse_atom(token, all_repos, return_none=True)
+ except parserestrict.ParseError, e:
+ out.error(str(e))
+ return 1
+ if a is None:
+ if token in config.pkgset:
+ out.error(
+ 'No package matches %r, but there is a set with '
+ 'that name. Use -s to specify a set.' % (token,))
+ return 2
+ elif not options.ignore_failures:
+ out.error('No matches for %r; ignoring it' % token)
+ else:
+ return -1
+ else:
+ atoms.append(a)
+
+ if not atoms:
+ out.error('No targets specified; nothing to do')
+ return 1
+
+ atoms = lists.stable_unique(atoms)
+
+ world_set = None
+ if (not options.set or options.clean) and not options.oneshot:
+ world_set = get_pkgset(config, err, 'world')
+ if world_set is None:
+ err.write("Disable world updating via --oneshot, or fix your "
+ "configuration")
+ return 1
+
+ if options.upgrade:
+ resolver_kls = resolver.upgrade_resolver
+ else:
+ resolver_kls = resolver.min_install_resolver
+
+ extra_kwargs = {}
+ if options.empty:
+ extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan
+ if options.debug:
+ extra_kwargs['debug'] = True
+
+ resolver_inst = resolver_kls(
+ vdb, repos, verify_vdb=options.deep, nodeps=options.nodeps,
+ drop_cycles=options.ignore_cycles, force_replacement=options.replace,
+ process_built_depends=options.with_built_depends,
+ **extra_kwargs)
+
+ if options.preload_vdb_state:
+ out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ')
+ vdb_time = time()
+ resolver_inst.load_vdb_state()
+ vdb_time = time() - vdb_time
+ else:
+ vdb_time = 0.0
+
+ failures = []
+ resolve_time = time()
+ out.write(out.bold, ' * ', out.reset, 'Resolving...')
+ out.title('Resolving...')
+ for restrict in atoms:
+ ret = resolver_inst.add_atom(restrict)
+ if ret:
+ out.error('resolution failed')
+ just_failures = reduce_to_failures(ret[1])
+ display_failures(out, just_failures)
+ failures.append(restrict)
+ if not options.ignore_failures:
+ break
+ resolve_time = time() - resolve_time
+ if failures:
+ out.write()
+ out.write('Failures encountered:')
+ for restrict in failures:
+ out.error("failed '%s'" % (restrict,))
+ out.write('potentials:')
+ match_count = 0
+ for r in repo_utils.get_raw_repos(repos):
+ l = r.match(restrict)
+ if l:
+ out.write(
+ "repo %s: [ %s ]" % (r, ", ".join(str(x) for x in l)))
+ match_count += len(l)
+ if not match_count:
+ out.write("No matches found in %s" % (repos,))
+ out.write()
+ if not options.ignore_failures:
+ return 1
+
+ if options.clean:
+ out.write(out.bold, ' * ', out.reset, 'Packages to be removed:')
+ vset = set(vdb)
+ len_vset = len(vset)
+ vset.difference_update(y.pkg for y in
+ resolver_inst.state.iter_ops(True))
+ wipes = sorted(x for x in vset if x.package_is_real)
+ for x in wipes:
+ out.write("Remove %s" % x)
+ out.write()
+ if wipes:
+ out.write("removing %i packages of %i installed, %0.2f%%." %
+ (len(wipes), len_vset, 100*(len(wipes)/float(len_vset))))
+ else:
+ out.write("no packages to remove")
+ if options.pretend:
+ return 0
+ if options.ask:
+ if not formatter.ask("Do you wish to proceed?", default_answer=False):
+ return 1
+ out.write()
+ repo_obs = observer.file_repo_observer(ObserverFormatter(out))
+ do_unmerge(options, out, err, vdb, wipes, world_set, repo_obs)
+ return 0
+
+ changes = list(x for x in resolver_inst.state.iter_ops()
+ if x.pkg.package_is_real)
+
+ if options.ask or options.pretend:
+ for op in changes:
+ formatter.format(op)
+ formatter.end()
+
+
+ if vdb_time:
+ out.write(out.bold, 'Took %.2f' % (vdb_time,), out.reset,
+ ' seconds to preload vdb state')
+ if options.pretend:
+ return
+
+ if (options.ask and not
+ formatter.ask("Would you like to merge these packages?")):
+ return
+
+ build_obs = observer.file_build_observer(ObserverFormatter(out))
+ repo_obs = observer.file_repo_observer(ObserverFormatter(out))
+
+ change_count = len(changes)
+ for count, op in enumerate(changes):
+ out.write("Processing %i of %i: %s" % (count + 1, change_count,
+ op.pkg.cpvstr))
+ out.title("%i/%i: %s" % (count + 1, change_count, op.pkg.cpvstr))
+ if op.desc != "remove":
+ if not options.fetchonly and options.debug:
+ out.write("Forcing a clean of workdir")
+ buildop = op.pkg.build(observer=build_obs, clean=True)
+ if options.fetchonly:
+ out.write("\n%i files required-" % len(op.pkg.fetchables))
+ try:
+ ret = buildop.fetch()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception, e:
+ ret = e
+ if ret != True:
+ out.error("got %s for a phase execution for %s" % (ret, op.pkg))
+ if not options.ignore_failures:
+ return 1
+ buildop.cleanup()
+ del buildop, ret
+ continue
+
+ ret = None
+ try:
+ built_pkg = buildop.finalize()
+ if built_pkg is False:
+ ret = built_pkg
+ except format.errors, e:
+ ret = e
+ if ret is not None:
+ out.error("Failed to build %s: %s" % (op.pkg, ret))
+ if not options.ignore_failures:
+ return 1
+ continue
+
+ out.write()
+ if op.desc == "replace":
+ if op.old_pkg == op.pkg:
+ out.write(">>> Reinstalling %s" % (built_pkg.cpvstr))
+ else:
+ out.write(">>> Replacing %s with %s" % (
+ op.old_pkg.cpvstr, built_pkg.cpvstr))
+ i = vdb.replace(op.old_pkg, built_pkg, observer=repo_obs)
+
+ else:
+ out.write(">>> Installing %s" % built_pkg.cpvstr)
+ i = vdb.install(built_pkg, observer=repo_obs)
+
+ # force this explicitly- can hold onto a helluva lot more
+ # then we would like.
+ del built_pkg
+ else:
+ out.write(">>> Removing %s" % op.pkg.cpvstr)
+ i = vdb.uninstall(op.pkg, observer=repo_obs)
+ ret = i.finish()
+ if ret != True:
+ out.error("got %s for a phase execution for %s" % (ret, op.pkg))
+ if not options.ignore_failures:
+ return 1
+ buildop.cleanup()
+ if world_set:
+ if op.desc == "remove":
+ out.write('>>> Removing %s from world file' % op.pkg.cpvstr)
+ update_worldset(world_set, op.pkg, remove=True)
+ elif any(x.match(op.pkg) for x in atoms):
+ if not options.upgrade:
+ out.write('>>> Adding %s to world file' % op.pkg.cpvstr)
+ update_worldset(world_set, op.pkg)
+ out.write("finished")
+ return 0
diff --git a/pkgcore/scripts/pplugincache.py b/pkgcore/scripts/pplugincache.py
new file mode 100644
index 0000000..ce5cdc7
--- /dev/null
+++ b/pkgcore/scripts/pplugincache.py
@@ -0,0 +1,40 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""Update the plugin cache."""
+
+
+from pkgcore.util import commandline
+from pkgcore import plugin
+from snakeoil import modules
+
+class OptionParser(commandline.OptionParser):
+
+ def __init__(self, **kwargs):
+ commandline.OptionParser.__init__(
+ self, description=__doc__, usage='%prog [packages]', **kwargs)
+
+ def check_values(self, values, args):
+ """Sanity check and postprocess after parsing."""
+ values, args = commandline.OptionParser.check_values(
+ self, values, args)
+ if not args:
+ args = ['pkgcore.plugins']
+ values.packages = []
+ for arg in args:
+ try:
+ package = modules.load_module(arg)
+ except modules.FailedImport, e:
+ self.error('Failed to import %s (%s)' % (arg, e))
+ if not getattr(package, '__path__', False):
+ self.error('%s is not a package' % (arg,))
+ values.packages.append(package)
+ return values, ()
+
+
+def main(options, out, err):
+ """Update caches."""
+ for package in options.packages:
+ out.write('Updating cache for %s...' % (package.__name__,))
+ plugin.initialize_cache(package)
diff --git a/pkgcore/scripts/pquery.py b/pkgcore/scripts/pquery.py
new file mode 100644
index 0000000..280f182
--- /dev/null
+++ b/pkgcore/scripts/pquery.py
@@ -0,0 +1,882 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+# Based on pquery by Brian Harring <ferringb@gmail.com>
+
+
+"""Extract information from repositories."""
+
+
+
+from pkgcore.restrictions import packages, values, boolean, restriction
+from pkgcore.ebuild import conditionals, atom
+from pkgcore.util import (
+ commandline, repo_utils, parserestrict, packages as pkgutils)
+
+# ordering here matters; pkgcore does a trick to commandline to avoid the
+# heavy inspect loadup hit.
+import optparse
+
+
+# To add a new restriction you have to do the following:
+# - add a parse function for it here.
+# - add the parse function to the PARSE_FUNCS dict.
+# - add an optparse option using the name you used in the dict as
+# both the typename and the long option name.
+
+def parse_revdep(value):
+ """Value should be an atom, packages with deps intersecting that match."""
+ try:
+ targetatom = atom.atom(value)
+ except atom.MalformedAtom, e:
+ raise parserestrict.ParseError(str(e))
+ val_restrict = values.FlatteningRestriction(
+ atom.atom,
+ values.AnyMatch(values.FunctionRestriction(targetatom.intersects)))
+ return packages.OrRestriction(finalize=True, *list(
+ packages.PackageRestriction(dep, val_restrict)
+ for dep in ('depends', 'rdepends', 'post_rdepends')))
+
+def parse_description(value):
+ """Value is used as a regexp matching description or longdescription."""
+ matcher = values.StrRegex(value, case_sensitive=False)
+ return packages.OrRestriction(finalize=True, *list(
+ packages.PackageRestriction(attr, matcher)
+ for attr in ('description', 'longdescription')))
+
+def parse_ownsre(value):
+ """Value is a regexp matched against the string form of an fs object.
+
+ This means the object kind is prepended to the path the regexp has
+ to match.
+ """
+ return packages.PackageRestriction(
+ 'contents', values.AnyMatch(values.GetAttrRestriction(
+ 'location', values.StrRegex(value))))
+
+
+class DataSourceRestriction(values.base):
+
+ """Turn a data_source into a line iterator and apply a restriction."""
+
+ def __init__(self, childrestriction, **kwargs):
+ values.base.__init__(self, **kwargs)
+ self.restriction = childrestriction
+
+ def __str__(self):
+ return 'DataSourceRestriction: %s negate=%s' % (
+ self.restriction, self.negate)
+
+ def __repr__(self):
+ if self.negate:
+ string = '<%s restriction=%r negate @%#8x>'
+ else:
+ string = '<%s restriction=%r @%#8x>'
+ return string % (self.__class__.__name__, self.restriction, id(self))
+
+ def match(self, value):
+ return self.restriction.match(iter(value.get_fileobj())) ^ self.negate
+
+ __hash__ = object.__hash__
+
+
+def parse_envmatch(value):
+ """Apply a regexp to the environment."""
+ return packages.PackageRestriction(
+ 'environment', DataSourceRestriction(values.AnyMatch(
+ values.StrRegex(value))))
+
+
+def parse_maintainer_email(value):
+ """
+ Case insensitive Regex match on the email bit of metadata.xml's
+ maintainer data.
+ """
+ return packages.PackageRestriction(
+ 'maintainers', values.AnyMatch(values.GetAttrRestriction(
+ 'email', values.StrRegex(value.lower(),
+ case_sensitive=False))))
+
+def parse_maintainer_name(value):
+ """
+ Case insensitive Regex match on the name bit of metadata.xml's
+ maintainer data.
+ """
+ return packages.PackageRestriction(
+ 'maintainers', values.AnyMatch(values.GetAttrRestriction(
+ 'name', values.StrRegex(value.lower(),
+ case_sensitive=False))))
+
+def parse_maintainer(value):
+ """
+ Case insensitive Regex match on the combined 'name <email>' bit of
+ metadata.xml's maintainer data.
+ """
+ return packages.PackageRestriction(
+ 'maintainers', values.AnyMatch(
+ values.UnicodeConversion(
+ values.StrRegex(value.lower(),
+ case_sensitive=False))))
+
+
+def parse_expression(string):
+ """Convert a string to a restriction object using pyparsing."""
+ # Two reasons to delay this import: we want to deal if it is
+ # not there and the import is slow (needs to compile a bunch
+ # of regexps).
+ try:
+ import pyparsing as pyp
+ except ImportError:
+ raise parserestrict.ParseError('pyparsing is not installed.')
+
+ grammar = getattr(parse_expression, 'grammar', None)
+ if grammar is None:
+
+ anystring = pyp.quotedString.copy().setParseAction(pyp.removeQuotes)
+ anystring |= pyp.Word(pyp.alphanums + ',')
+
+ def funcall(name, parser):
+ """Create a pyparsing expression from a name and parse func."""
+ # This function cannot be inlined below: we use its scope to
+ # "store" the parser function. If we store the parser function
+ # as default argument to the _parse function pyparsing passes
+ # different arguments (it detects the number of arguments the
+ # function takes).
+ result = (pyp.Suppress('%s(' % (name,)) + anystring +
+ pyp.Suppress(')'))
+ def _parse(tokens):
+ return parser(tokens[0])
+ result.setParseAction(_parse)
+ return result
+
+
+ boolcall = pyp.Forward()
+ expr = boolcall
+ for name, func in PARSE_FUNCS.iteritems():
+ expr |= funcall(name, func)
+
+ andcall = (pyp.Suppress(pyp.CaselessLiteral('and') + '(') +
+ pyp.delimitedList(expr) + pyp.Suppress(')'))
+ def _parse_and(tokens):
+ return packages.AndRestriction(*tokens)
+ andcall.setParseAction(_parse_and)
+
+ orcall = (pyp.Suppress(pyp.CaselessLiteral('or') + '(') +
+ pyp.delimitedList(expr) + pyp.Suppress(')'))
+ def _parse_or(tokens):
+ return packages.OrRestriction(*tokens)
+ orcall.setParseAction(_parse_or)
+
+ notcall = (pyp.Suppress(pyp.CaselessLiteral('not') + '(') + expr +
+ pyp.Suppress(')'))
+ def _parse_not(tokens):
+ return restriction.Negate(tokens[0])
+ notcall.setParseAction(_parse_not)
+
+ # "Statement seems to have no effect"
+ # pylint: disable-msg=W0104
+ boolcall << (notcall | andcall | orcall)
+
+ # This forces a match on the entire thing, without it trailing
+ # unparsed data is ignored.
+ grammar = pyp.stringStart + expr + pyp.stringEnd
+
+ # grammar.validate()
+
+ parse_expression.grammar = grammar
+
+ try:
+ return grammar.parseString(string)[0]
+ except pyp.ParseException, e:
+ raise parserestrict.ParseError(e.msg)
+
+
+PARSE_FUNCS = {
+ 'restrict_revdep': parse_revdep,
+ 'description': parse_description,
+ 'ownsre': parse_ownsre,
+ 'environment': parse_envmatch,
+ 'expr': parse_expression,
+ 'maintainer': parse_maintainer,
+ 'maintainer_name': parse_maintainer_name,
+ 'maintainer_email': parse_maintainer_email,
+ }
+
+# This is not just a blind "update" because we really need a config
+# option for everything in this dict (so parserestrict growing parsers
+# would break us).
+for _name in ['match']:
+ PARSE_FUNCS[_name] = parserestrict.parse_funcs[_name]
+
+for _name, _attr in [
+ ('herd', 'herds'),
+ ('license', 'license'),
+ ('hasuse', 'iuse'),
+ ('owns', 'contents'),
+ ]:
+ PARSE_FUNCS[_name] = parserestrict.comma_separated_containment(_attr)
+
+del _name, _attr
+
+
+def optparse_type(parsefunc):
+ """Wrap a parsefunc shared with the expression-style code for optparse."""
+ def _typecheck(option, opt, value):
+ try:
+ return parsefunc(value)
+ except parserestrict.ParseError, e:
+ raise optparse.OptionValueError('option %s: %s' % (opt, e))
+ return _typecheck
+
+
+def atom_type(option, opt, value):
+ try:
+ return atom.atom(value)
+ except atom.MalformedAtom, e:
+ raise optparse.OptionValueError('option %s: %s' % (opt, e))
+
+
+extras = dict((parser_name, optparse_type(parser_func))
+ for parser_name, parser_func in PARSE_FUNCS.iteritems())
+extras['atom'] = atom_type
+
+class Option(commandline.Option):
+ """C{optparse.Option} subclass supporting our custom types."""
+ TYPES = optparse.Option.TYPES + tuple(extras.keys())
+ # Copy the original dict
+ TYPE_CHECKER = dict(optparse.Option.TYPE_CHECKER)
+ TYPE_CHECKER.update(extras)
+
+
+def append_const_callback(option, opt_str, value, parser, const):
+ """Callback version of python 2.5's append_const action."""
+ parser.values.ensure_value(option.dest, []).append(const)
+
+
+def revdep_callback(option, opt_str, value, parser):
+ try:
+ parser.values.ensure_value('restrict_revdep', []).append(
+ parse_revdep(value))
+ parser.values.ensure_value('print_revdep', []).append(atom.atom(value))
+ except (parserestrict.ParseError, atom.MalformedAtom), e:
+ raise optparse.OptionValueError('option %s: %s' % (opt_str, e))
+
+
+class OptionParser(commandline.OptionParser):
+
+ """Option parser with custom option postprocessing and validation."""
+
+ def __init__(self, **kwargs):
+ commandline.OptionParser.__init__(
+ self, description=__doc__, option_class=Option, **kwargs)
+
+ self.set_default('pkgset', [])
+ self.set_default('restrict', [])
+
+ self.add_option('--domain', action='callback', type='string',
+ callback=commandline.config_callback,
+ callback_args=('domain',),
+ help='domain name to use (default used if omitted).')
+ self.add_option('--repo', action='callback', type='string',
+ callback=commandline.config_callback,
+ callback_args=('repo',),
+ help='repo to use (default from domain if omitted).')
+ self.add_option('--early-out', action='store_true', dest='earlyout',
+ help='stop when first match is found.')
+ self.add_option('--no-version', '-n', action='store_true',
+ dest='noversion',
+ help='collapse multiple matching versions together')
+ self.add_option('--min', action='store_true',
+ help='show only the lowest version for each package.')
+ self.add_option('--max', action='store_true',
+ help='show only the highest version for each package.')
+
+ repo = self.add_option_group('Source repo')
+ repo.add_option('--raw', action='store_true',
+ help='Without this switch your configuration affects '
+ 'what packages are visible (through masking) and what '
+ 'USE flags are applied to depends and fetchables. '
+ "With this switch your configuration values aren't "
+ 'used and you see the "raw" repository data.')
+ repo.add_option(
+ '--virtuals', action='store', choices=('only', 'disable'),
+ help='arg "only" for only matching virtuals, "disable" to not '
+ 'match virtuals at all. Default is to match everything.')
+ repo.add_option('--vdb', action='store_true',
+ help='match only vdb (installed) packages.')
+ repo.add_option('--all-repos', action='store_true',
+ help='search all repos, vdb included')
+
+ restrict = self.add_option_group(
+ 'Package matching',
+ 'Each option specifies a restriction packages must match. '
+ 'Specifying the same option twice means "or" unless stated '
+ 'otherwise. Specifying multiple types of restrictions means "and" '
+ 'unless stated otherwise.')
+ restrict.add_option('--all', action='callback',
+ callback=append_const_callback,
+ callback_args=(packages.AlwaysTrue,),
+ dest='restrict',
+ help='Match all packages (equivalent to -m "*")')
+ restrict.add_option(
+ '--match', '-m', action='append', type='match',
+ help='Glob-like match on category/package-version.')
+ restrict.add_option('--has-use', action='append', type='hasuse',
+ dest='hasuse',
+ help='Exact string match on a USE flag.')
+ restrict.add_option(
+ '--revdep', action='callback', callback=revdep_callback,
+ type='string',
+ help='shorthand for --restrict-revdep atom --print-revdep atom. '
+ '--print-revdep is slow, use just --restrict-revdep if you just '
+ 'need a list.')
+ restrict.add_option(
+ '--restrict-revdep', action='append', type='restrict_revdep',
+ help='Dependency on an atom.')
+ restrict.add_option('--description', '-S', action='append',
+ type='description',
+ help='regexp search on description and longdescription.')
+ restrict.add_option('--herd', action='append', type='herd',
+ help='exact match on a herd.')
+ restrict.add_option('--license', action='append', type='license',
+ help='exact match on a license.')
+ restrict.add_option('--owns', action='append', type='owns',
+ help='exact match on an owned file/dir.')
+ restrict.add_option(
+ '--owns-re', action='append', type='ownsre', dest='ownsre',
+ help='like "owns" but using a regexp for matching.')
+ restrict.add_option('--maintainer', action='append', type='maintainer',
+ help='comma-separated list of regexes to search for '
+ 'maintainers.')
+ restrict.add_option('--maintainer-name', action='append', type='maintainer_name',
+ help='comma-separated list of maintainer name regexes '
+ 'to search for.')
+ restrict.add_option('--maintainer-email', action='append', type='maintainer_email',
+ help='comma-separated list of maintainer email regexes '
+ 'to search for.')
+ restrict.add_option(
+ '--environment', action='append', type='environment',
+ help='regexp search in environment.bz2.')
+ restrict.add_option(
+ '--expr', action='append', type='expr',
+ help='Boolean combinations of other restrictions, like '
+ '\'and(not(herd("python")), match("dev-python/*"))\'. '
+ 'WARNING: currently not completely reliable.',
+ long_help='Boolean combinations of other restrictions, like '
+ '``and(not(herd("python")), match("dev-python/*"))``. '
+ '*WARNING*: currently not completely reliable.'
+ )
+ # XXX fix the negate stuff and remove that warning.
+ restrict.add_option(
+ '--pkgset', action='callback', type='string',
+ callback=commandline.config_append_callback,
+ callback_args=('pkgset',),
+ help='is inside a named set of packages (like "world").')
+
+ printable_attrs = ('rdepends', 'depends', 'post_rdepends', 'provides',
+ 'use', 'iuse', 'description', 'longdescription',
+ 'herds', 'license', 'uris', 'files',
+ 'slot', 'maintainers', 'restrict', 'repo',
+ 'alldepends', 'path', 'environment', 'keywords',
+ 'homepage', 'fetchables')
+
+ output = self.add_option_group('Output formatting')
+ output.add_option(
+ '--cpv', action='store_true',
+ help='Print the category/package-version. This is done '
+ 'by default, this option re-enables this if another '
+ 'output option (like --contents) disabled it.')
+ output.add_option('--atom', '-a', action='store_true',
+ help='print =cat/pkg-3 instead of cat/pkg-3. '
+ 'Implies --cpv, has no effect with --no-version')
+ output.add_option('--attr', action='append', choices=printable_attrs,
+ help="Print this attribute's value (can be specified more than "
+ "once). --attr=help will get you the list of valid attrs.")
+ output.add_option('--one-attr', choices=printable_attrs,
+ help="Print one attribute. Suppresses other output.")
+ output.add_option('--force-attr', action='append', dest='attr',
+ help='Like --attr but accepts any string as '
+ 'attribute name instead of only explicitly '
+ 'supported names.')
+ output.add_option('--force-one-attr',
+ help='Like --oneattr but accepts any string as '
+ 'attribute name instead of only explicitly '
+ 'supported names.')
+ output.add_option(
+ '--contents', action='store_true',
+ help='list files owned by the package. Implies --vdb.')
+ output.add_option('--verbose', '-v', action='store_true',
+ help='human-readable multi-line output per package')
+ output.add_option('--highlight-dep', action='append', type='atom',
+ help='highlight dependencies matching this atom')
+ output.add_option(
+ '--blame', action='store_true',
+ help='shorthand for --attr maintainers --attr herds')
+ output.add_option(
+ '--print-revdep', type='atom', action='append',
+ help='print what condition(s) trigger a dep.')
+
+ def check_values(self, values, args):
+ """Sanity check and postprocess after parsing."""
+ vals, args = commandline.OptionParser.check_values(self, values, args)
+ # Interpret args with parens in them as --expr additions, the
+ # rest as --match additions (since parens are invalid in --match).
+ try:
+ for arg in args:
+ if '(' in arg:
+ vals.expr.append(parse_expression(arg))
+ else:
+ vals.match.append(parserestrict.parse_match(arg))
+ except parserestrict.ParseError, e:
+ self.error(str(e))
+
+ # TODO come up with something better than "match" for this.
+ for highlight in vals.highlight_dep:
+ if not isinstance(highlight, atom.atom):
+ self.error('highlight-dep must be an atom')
+
+ if vals.contents or vals.owns or vals.ownsre:
+ vals.vdb = True
+
+ if vals.atom:
+ vals.cpv = True
+
+ if vals.noversion:
+ if vals.contents:
+ self.error(
+ 'both --no-version and --contents does not make sense.')
+ if vals.min or vals.max:
+ self.error(
+ '--no-version with --min or --max does not make sense.')
+ if vals.print_revdep:
+ self.error(
+ '--print-revdep with --no-version does not make sense.')
+
+ if vals.blame:
+ vals.attr.extend(['herds', 'maintainers'])
+
+ if 'alldepends' in vals.attr:
+ vals.attr.remove('alldepends')
+ vals.attr.extend(['depends', 'rdepends', 'post_rdepends'])
+
+ if vals.verbose:
+ # slice assignment to an empty range; behaves as an insertion.
+ vals.attr[0:0] = ['repo', 'description', 'homepage']
+
+ if vals.force_one_attr:
+ if vals.one_attr:
+ self.error(
+ '--one-attr and --force-one-attr are mutually exclusive.')
+ vals.one_attr = vals.force_one_attr
+
+ if vals.one_attr and vals.print_revdep:
+ self.error(
+ '--print-revdep with --force-one-attr or --one-attr does not '
+ 'make sense.')
+
+ # Build up a restriction.
+ for attr in PARSE_FUNCS:
+ val = getattr(vals, attr)
+ if len(val) == 1:
+ # Omit the boolean.
+ vals.restrict.append(val[0])
+ elif val:
+ vals.restrict.append(
+ packages.OrRestriction(finalize=True, *val))
+
+ all_atoms = []
+ for pkgset in vals.pkgset:
+ atoms = list(pkgset)
+ if not atoms:
+ # This is currently an error because I am unsure what
+ # it should do.
+ self.error('Cannot use empty pkgsets')
+ all_atoms.extend(atoms)
+ if all_atoms:
+ vals.restrict.append(packages.OrRestriction(finalize=True,
+ *all_atoms))
+
+ if not vals.restrict:
+ self.error('No restrictions specified.')
+
+ if len(vals.restrict) == 1:
+ # Single restriction, omit the AndRestriction for a bit of speed
+ vals.restrict = vals.restrict[0]
+ else:
+ # "And" them all together
+ vals.restrict = packages.AndRestriction(*vals.restrict)
+
+ if vals.repo and (vals.vdb or vals.all_repos):
+ self.error(
+ '--repo with --vdb, --all-repos makes no sense')
+
+ # Get a domain object if needed.
+ if vals.domain is None and (
+ vals.verbose or vals.noversion or not vals.repo):
+ vals.domain = vals.config.get_default('domain')
+ if vals.domain is None:
+ self.error(
+ 'No default domain found, fix your configuration '
+ 'or pass --domain (Valid domains: %s)' % (
+ ', '.join(vals.config.domain),))
+
+ domain = vals.domain
+ # Get the vdb if we need it.
+ if vals.verbose and vals.noversion:
+ vals.vdbs = domain.vdb
+ else:
+ vals.vdbs = None
+ # Get repo(s) to operate on.
+ if vals.vdb:
+ vals.repos = domain.vdb
+ elif vals.all_repos:
+ vals.repos = domain.repos + domain.vdb
+ elif vals.repo:
+ vals.repos = [vals.repo]
+ else:
+ vals.repos = domain.repos
+ if vals.raw or vals.virtuals:
+ vals.repos = repo_utils.get_raw_repos(vals.repos)
+ if vals.virtuals:
+ vals.repos = repo_utils.get_virtual_repos(
+ vals.repos, vals.virtuals == 'only')
+
+ return vals, ()
+
+
+def stringify_attr(config, pkg, attr):
+ """Grab a package attr and convert it to a string."""
+ # config is currently unused but may affect display in the future.
+ if attr in ('files', 'uris'):
+ data = getattr(pkg, 'fetchables', None)
+ if data is None:
+ return 'MISSING'
+ if attr == 'files':
+ def _format(node):
+ return node.filename
+ else:
+ def _format(node):
+ return ' '.join(node.uri or ())
+ return conditionals.stringify_boolean(data, _format)
+
+ if attr == 'use':
+ # Combine a list of all enabled (including irrelevant) and all
+ # available flags into a "enabled -disabled" style string.
+ use = set(getattr(pkg, 'use', set()))
+ iuse = set(getattr(pkg, 'iuse', set()))
+ result = sorted(iuse & use) + sorted('-' + val for val in (iuse - use))
+ return ' '.join(result)
+
+ # TODO: is a missing or None attr an error?
+ value = getattr(pkg, attr, None)
+ if value is None:
+ return 'MISSING'
+
+ if attr in ('herds', 'iuse', 'maintainers'):
+ return ' '.join(sorted(unicode(v) for v in value))
+ if attr == 'keywords':
+ return ' '.join(sorted(value, key=lambda x:x.lstrip("~")))
+ if attr == 'environment':
+ return ''.join(value.get_fileobj())
+ if attr == 'repo':
+ return str(getattr(value, 'repo_id', 'no repo id'))
+ return str(value)
+
+
+def _default_formatter(out, node):
+ out.write(node, autoline=False)
+ return False
+
+
+def format_depends(out, node, func=_default_formatter):
+ """Pretty-print a depset to a formatter.
+
+ @param out: formatter.
+ @param node: a L{conditionals.DepSet}.
+ @param func: callable taking a formatter and a depset payload.
+ If it can format its value in a single line it should do that
+ without writing a newline and return C{False}.
+ If it needs multiple lines it should first write a newline, not write
+ a terminating newline, and return C{True}.
+ @returns: The same kind of boolean func should return.
+ """
+ oldwrap = out.wrap
+ out.wrap = False
+ try:
+ # Do this first since if it is a DepSet it is also an
+ # AndRestriction (DepSet subclasses that).
+ if isinstance(node, conditionals.DepSet):
+ if not node.restrictions:
+ return False
+ if len(node.restrictions) == 1:
+ return format_depends(out, node.restrictions[0], func)
+ out.write()
+ for child in node.restrictions[:-1]:
+ format_depends(out, child, func)
+ out.write()
+ format_depends(out, node.restrictions[-1], func)
+ return True
+
+ prefix = None
+ if isinstance(node, boolean.OrRestriction):
+ prefix = '|| ('
+ children = node.restrictions
+ elif (isinstance(node, boolean.AndRestriction) and not
+ isinstance(node, atom.atom)):
+ prefix = '('
+ children = node.restrictions
+ elif isinstance(node, packages.Conditional):
+ assert len(node.restriction.vals) == 1
+ prefix = '%s%s? (' % (node.restriction.negate and '!' or '',
+ list(node.restriction.vals)[0])
+ children = node.payload
+
+ if prefix:
+ children = list(children)
+ if len(children) == 1:
+ out.write(prefix, ' ', autoline=False)
+ out.first_prefix.append(' ')
+ newline = format_depends(out, children[0], func)
+ out.first_prefix.pop()
+ if newline:
+ out.write()
+ out.write(')')
+ return True
+ else:
+ out.write(' )', autoline=False)
+ return False
+ else:
+ out.write(prefix)
+ out.first_prefix.append(' ')
+ for child in children:
+ format_depends(out, child, func)
+ out.write()
+ out.first_prefix.pop()
+ out.write(')', autoline=False)
+ return True
+ else:
+ return func(out, node)
+ finally:
+ out.wrap = oldwrap
+
+def format_attr(config, out, pkg, attr):
+ """Grab a package attr and print it through a formatter."""
+ # config is currently unused but may affect display in the future.
+ if attr in ('depends', 'rdepends', 'post_rdepends', 'restrict'):
+ data = getattr(pkg, attr, None)
+ if data is None:
+ out.write('MISSING')
+ else:
+ out.first_prefix.append(' ')
+ if config.highlight_dep:
+ def _format(out, node):
+ for highlight in config.highlight_dep:
+ if highlight.intersects(node):
+ out.write(out.bold, out.fg('cyan'), node,
+ autoline=False)
+ return
+ out.write(node, autoline=False)
+ format_depends(out, data, _format)
+ else:
+ format_depends(out, data)
+ out.first_prefix.pop()
+ out.write()
+ elif attr in ('files', 'uris'):
+ data = getattr(pkg, 'fetchables', None)
+ if data is None:
+ out.write('MISSING')
+ return
+ if attr == 'files':
+ def _format(out, node):
+ out.write(node.filename, autoline=False)
+ else:
+ def _format(out, node):
+ uris = list(node.uri)
+ if not uris:
+ return False
+ if len(uris) == 1:
+ out.write(uris[0], autoline=False)
+ return False
+ out.write('|| (')
+ out.first_prefix.append(' ')
+ for uri in uris:
+ out.write(uri)
+ out.first_prefix.pop()
+ out.write(')', autoline=False)
+ return True
+ out.first_prefix.append(' ')
+ format_depends(out, data, _format)
+ out.first_prefix.pop()
+ out.write()
+ else:
+ out.write(stringify_attr(config, pkg, attr))
+
+
+def print_package(options, out, err, pkg):
+ """Print a package."""
+ if options.verbose:
+ green = out.fg('green')
+ out.write(out.bold, green, ' * ', out.fg(), pkg.cpvstr)
+ out.wrap = True
+ out.later_prefix = [' ']
+ for attr in options.attr:
+ out.write(green, ' %s: ' % (attr,), out.fg(), autoline=False)
+ format_attr(options, out, pkg, attr)
+ for revdep in options.print_revdep:
+ for name in ('depends', 'rdepends', 'post_rdepends'):
+ depset = getattr(pkg, name)
+ find_cond = getattr(depset, 'find_cond_nodes', None)
+ if find_cond is None:
+ out.write(
+ green, ' revdep: ', out.fg(), name, ' on ',
+ str(revdep))
+ continue
+ for key, restricts in depset.find_cond_nodes(
+ depset.restrictions, True):
+ if not restricts and key.intersects(revdep):
+ out.write(
+ green, ' revdep: ', out.fg(), name, ' on ',
+ autoline=False)
+ if key == revdep:
+ # this is never reached...
+ out.write(out.bold, str(revdep))
+ else:
+ out.write(
+ str(revdep), ' through dep ', out.bold,
+ str(key))
+ for key, restricts in depset.node_conds.iteritems():
+ if key.intersects(revdep):
+ out.write(
+ green, ' revdep: ', out.fg(), name, ' on ',
+ autoline=False)
+ if key == revdep:
+ out.write(
+ out.bold, str(revdep), out.reset,
+ autoline=False)
+ else:
+ out.write(
+ str(revdep), ' through dep ', out.bold,
+ str(key), out.reset, autoline=False)
+ out.write(' if USE matches one of:')
+ for r in restricts:
+ out.write(' ', str(r))
+ out.write()
+ out.later_prefix = []
+ out.wrap = False
+ elif options.one_attr:
+ if options.atom:
+ out.write('=', autoline=False)
+ if options.atom or options.cpv:
+ out.write(pkg.cpvstr, ':', autoline=False)
+ out.write(stringify_attr(options, pkg, options.one_attr))
+ else:
+ printed_something = False
+ out.autoline = False
+ if (not options.contents) or options.cpv:
+ printed_something = True
+ if options.atom:
+ out.write('=')
+ out.write(pkg.cpvstr)
+ for attr in options.attr:
+ if printed_something:
+ out.write(' ')
+ printed_something = True
+ out.write('%s="%s"' % (attr, stringify_attr(options, pkg, attr)))
+ for revdep in options.print_revdep:
+ for name in ('depends', 'rdepends', 'post_rdepends'):
+ depset = getattr(pkg, name)
+ if getattr(depset, 'find_cond_nodes', None) is None:
+ # TODO maybe be smarter here? (this code is
+ # triggered by virtuals currently).
+ out.write(' %s on %s' % (name, revdep))
+ continue
+ for key, restricts in depset.find_cond_nodes(
+ depset.restrictions, True):
+ if not restricts and key.intersects(revdep):
+ out.write(' %s on %s through %s' % (name, revdep, key))
+ for key, restricts in depset.node_conds.iteritems():
+ if key.intersects(revdep):
+ out.write(' %s on %s through %s if USE %s,' % (
+ name, revdep, key, ' or '.join(
+ str(r) for r in restricts)))
+ # If we printed anything at all print the newline now
+ out.autoline = True
+ if printed_something:
+ out.write()
+
+ if options.contents:
+ for location in sorted(obj.location
+ for obj in getattr(pkg, 'contents', ())):
+ out.write(location)
+
+def print_packages_noversion(options, out, err, pkgs):
+ """Print a summary of all versions for a single package."""
+ if options.verbose:
+ green = out.fg('green')
+ out.write(out.bold, green, ' * ', out.fg(), pkgs[0].key)
+ out.wrap = True
+ out.later_prefix = [' ']
+ versions = ' '.join(pkg.fullver for pkg in sorted(pkgs))
+ out.write(green, ' versions: ', out.fg(), versions)
+ # If we are already matching on all repos we do not need to duplicate.
+ if not (options.vdb or options.all_repos):
+ versions = sorted(
+ pkg.fullver for vdb in options.vdbs
+ for pkg in vdb.itermatch(pkgs[0].unversioned_atom))
+ out.write(green, ' installed: ', out.fg(), ' '.join(versions))
+ for attr in options.attr:
+ out.write(green, ' %s: ' % (attr,), out.fg(),
+ stringify_attr(options, pkgs[-1], attr))
+ out.write()
+ out.wrap = False
+ out.later_prefix = []
+ elif options.one_attr:
+ if options.atom:
+ out.write('=', autoline=False)
+ if options.atom or options.cpv:
+ out.write(pkgs[0].key, ':', autoline=False)
+ out.write(stringify_attr(options, pkgs[-1], options.one_attr))
+ else:
+ out.autoline = False
+ out.write(pkgs[0].key)
+ for attr in options.attr:
+ out.write(' %s="%s"' % (attr, stringify_attr(options, pkgs[-1],
+ attr)))
+ out.autoline = True
+ out.write()
+
+
+def main(options, out, err):
+ """Run a query."""
+ if options.debug:
+ for repo in options.repos:
+ out.write('repo: %r' % (repo,))
+ out.write('restrict: %r' % (options.restrict,))
+ out.write()
+
+ for repo in options.repos:
+ try:
+ for pkgs in pkgutils.groupby_pkg(
+ repo.itermatch(options.restrict, sorter=sorted)):
+ pkgs = list(pkgs)
+ if options.noversion:
+ print_packages_noversion(options, out, err, pkgs)
+ elif options.min or options.max:
+ if options.min:
+ print_package(options, out, err, min(pkgs))
+ if options.max:
+ print_package(options, out, err, max(pkgs))
+ else:
+ for pkg in pkgs:
+ print_package(options, out, err, pkg)
+ if options.earlyout:
+ break
+ if options.earlyout:
+ break
+
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ err.write('caught an exception!')
+ err.write('repo: %r' % (repo,))
+ err.write('restrict: %r' % (options.restrict,))
+ raise
diff --git a/pkgcore/spawn.py b/pkgcore/spawn.py
new file mode 100644
index 0000000..7eb8a0b
--- /dev/null
+++ b/pkgcore/spawn.py
@@ -0,0 +1,532 @@
+# Copyright: 2005-2006 Jason Stubbs <jstubbs@gmail.com>
+# Copyright: 2004-2006 Brian Harring <ferringb@gmail.com>
+# Copyright: 2004-2005 Gentoo Foundation
+# License: GPL2
+
+
+"""
+subprocess related functionality
+"""
+
+__all__ = [
+ "cleanup_pids", "spawn", "spawn_sandbox", "spawn_bash", "spawn_fakeroot",
+ "spawn_get_output", "find_binary"]
+
+import os, atexit, signal, sys
+
+from pkgcore.const import (
+ BASH_BINARY, SANDBOX_BINARY, FAKED_PATH, LIBFAKEROOT_PATH)
+
+from snakeoil.osutils import listdir
+from snakeoil.mappings import ProtectedDict
+
+
+try:
+ import resource
+ max_fd_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
+except ImportError:
+ max_fd_limit = 256
+
+def slow_get_open_fds():
+ return xrange(max_fd_limit)
+if os.path.isdir("/proc/%i/fd" % os.getpid()):
+ def get_open_fds():
+ try:
+ return map(int, listdir("/proc/%i/fd" % os.getpid()))
+ except (OSError, IOError):
+ return slow_get_open_fds()
+ except ValueError, v:
+ import warnings
+ warnings.warn(
+ "extremely odd, got a value error '%s' while scanning "
+ "/proc/%i/fd; OS allowing string names in fd?" %
+ (v, os.getpid()))
+ return slow_get_open_fds()
+else:
+ get_open_fds = slow_get_open_fds
+
+
+def spawn_bash(mycommand, debug=False, opt_name=None, **keywords):
+ """spawn the command via bash -c"""
+
+ args = [BASH_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ if debug:
+ # Print commands and their arguments as they are executed.
+ args.append("-x")
+ args.append("-c")
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_sandbox(mycommand, opt_name=None, **keywords):
+ """spawn the command under sandboxed"""
+
+ if not is_sandbox_capable():
+ return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+ args = [SANDBOX_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+_exithandlers = []
+def atexit_register(func, *args, **kargs):
+ """Wrapper around atexit.register that is needed in order to track
+ what is registered. For example, when portage restarts itself via
+ os.execv, the atexit module does not work so we have to do it
+ manually by calling the run_exitfuncs() function in this module."""
+ _exithandlers.append((func, args, kargs))
+
+def run_exitfuncs():
+ """This should behave identically to the routine performed by
+ the atexit module at exit time. It's only necessary to call this
+ function when atexit will not work (because of os.execv, for
+ example)."""
+
+ # This function is a copy of the private atexit._run_exitfuncs()
+ # from the python 2.4.2 sources. The only difference from the
+ # original function is in the output to stderr.
+ exc_info = None
+ while _exithandlers:
+ func, targs, kargs = _exithandlers.pop()
+ try:
+ func(*targs, **kargs)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ except:
+ exc_info = sys.exc_info()
+
+ if exc_info is not None:
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+atexit.register(run_exitfuncs)
+
+# We need to make sure that any processes spawned are killed off when
+# we exit. spawn() takes care of adding and removing pids to this list
+# as it creates and cleans up processes.
+spawned_pids = []
+def cleanup_pids(pids=None):
+ """reap list of pids if specified, else all children"""
+
+ if pids is None:
+ pids = spawned_pids
+ elif pids is not spawned_pids:
+ pids = list(pids)
+
+ while pids:
+ pid = pids.pop()
+ try:
+ if os.waitpid(pid, os.WNOHANG) == (0, 0):
+ os.kill(pid, signal.SIGTERM)
+ os.waitpid(pid, 0)
+ except OSError:
+ # This pid has been cleaned up outside
+ # of spawn().
+ pass
+
+ if spawned_pids is not pids:
+ try:
+ spawned_pids.remove(pid)
+ except ValueError:
+ pass
+
+def spawn(mycommand, env=None, opt_name=None, fd_pipes=None, returnpid=False,
+ uid=None, gid=None, groups=None, umask=None, logfile=None,
+ chdir=None, path_lookup=True):
+
+ """wrapper around execve
+
+ @type mycommand: list or string
+ @type env: mapping with string keys and values
+ @param opt_name: controls what the process is named
+ (what it would show up as under top for example)
+ @type fd_pipes: mapping from existing fd to fd (inside the new process)
+ @param fd_pipes: controls what fd's are left open in the spawned process-
+ @param returnpid: controls whether spawn waits for the process to finish,
+ or returns the pid.
+ """
+ if env is None:
+ env = {}
+ # mycommand is either a str or a list
+ if isinstance(mycommand, str):
+ mycommand = mycommand.split()
+
+ # If an absolute path to an executable file isn't given
+ # search for it unless we've been told not to.
+ binary = mycommand[0]
+ if not path_lookup:
+ if find_binary(binary) != binary:
+ raise CommandNotFound(binary)
+ else:
+ binary = find_binary(binary)
+
+ # If we haven't been told what file descriptors to use
+ # default to propogating our stdin, stdout and stderr.
+ if fd_pipes is None:
+ fd_pipes = {0:0, 1:1, 2:2}
+
+ # mypids will hold the pids of all processes created.
+ mypids = []
+
+ if logfile:
+ # Using a log file requires that stdout and stderr
+ # are assigned to the process we're running.
+ if 1 not in fd_pipes or 2 not in fd_pipes:
+ raise ValueError(fd_pipes)
+
+ # Create a pipe
+ (pr, pw) = os.pipe()
+
+ # Create a tee process, giving it our stdout and stderr
+ # as well as the read end of the pipe.
+ mypids.extend(spawn(('tee', '-i', '-a', logfile), returnpid=True,
+ fd_pipes={0:pr, 1:fd_pipes[1], 2:fd_pipes[2]}))
+
+ # We don't need the read end of the pipe, so close it.
+ os.close(pr)
+
+ # Assign the write end of the pipe to our stdout and stderr.
+ fd_pipes[1] = pw
+ fd_pipes[2] = pw
+
+
+ pid = os.fork()
+
+ if not pid:
+ # 'Catch "Exception"'
+ # pylint: disable-msg=W0703
+ try:
+ _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups,
+ uid, umask, chdir)
+ except Exception, e:
+ # We need to catch _any_ exception so that it doesn't
+ # propogate out of this function and cause exiting
+ # with anything other than os._exit()
+ sys.stderr.write("%s:\n %s\n" % (e, " ".join(mycommand)))
+ os._exit(1)
+
+ # Add the pid to our local and the global pid lists.
+ mypids.append(pid)
+ spawned_pids.append(pid)
+
+ # If we started a tee process the write side of the pipe is no
+ # longer needed, so close it.
+ if logfile:
+ os.close(pw)
+
+ # If the caller wants to handle cleaning up the processes, we tell
+ # it about all processes that were created.
+ if returnpid:
+ return mypids
+
+ try:
+ # Otherwise we clean them up.
+ while mypids:
+
+ # Pull the last reader in the pipe chain. If all processes
+ # in the pipe are well behaved, it will die when the process
+ # it is reading from dies.
+ pid = mypids.pop(0)
+
+ # and wait for it.
+ retval = os.waitpid(pid, 0)[1]
+
+ # When it's done, we can remove it from the
+ # global pid list as well.
+ spawned_pids.remove(pid)
+
+ if retval:
+ # If it failed, kill off anything else that
+ # isn't dead yet.
+ for pid in mypids:
+ if os.waitpid(pid, os.WNOHANG) == (0, 0):
+ os.kill(pid, signal.SIGTERM)
+ os.waitpid(pid, 0)
+ spawned_pids.remove(pid)
+
+ return process_exit_code(retval)
+ finally:
+ cleanup_pids(mypids)
+
+ # Everything succeeded
+ return 0
+
+def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
+ chdir):
+ """internal function to handle exec'ing the child process.
+
+ If it succeeds this function does not return. It might raise an
+ exception, and since this runs after fork calling code needs to
+ make sure this is caught and os._exit is called if it does (or
+ atexit handlers run twice).
+ """
+
+ # If the process we're creating hasn't been given a name
+ # assign it the name of the executable.
+ if not opt_name:
+ opt_name = os.path.basename(binary)
+
+ # Set up the command's argument list.
+ myargs = [opt_name]
+ myargs.extend(mycommand[1:])
+
+ # Set up the command's pipes.
+ my_fds = {}
+ # To protect from cases where direct assignment could
+ # clobber needed fds ({1:2, 2:1}) we first dupe the fds
+ # into unused fds.
+
+ protected_fds = set(fd_pipes.itervalues())
+
+ for trg_fd, src_fd in fd_pipes.iteritems():
+ # if it's not the same we care
+ if trg_fd != src_fd:
+ if trg_fd not in protected_fds:
+ # if nothing we care about is there... do it now.
+ # we're not updating protected_fds here due to the fact
+ # dup will not overwrite existing fds, and that the target is
+ # not stated as a src at this point.
+ os.dup2(src_fd, trg_fd)
+ else:
+ x = os.dup(src_fd)
+ protected_fds.add(x)
+ my_fds[trg_fd] = x
+
+ # reassign whats required now.
+ for trg_fd, src_fd in my_fds.iteritems():
+ os.dup2(src_fd, trg_fd)
+
+ # Then close _all_ fds that haven't been explictly
+ # requested to be kept open.
+ for fd in get_open_fds():
+ # if it's not a target fd, close it.
+ if fd not in fd_pipes:
+ try:
+ os.close(fd)
+ except OSError:
+ pass
+
+ if chdir is not None:
+ os.chdir(chdir)
+
+ # Set requested process permissions.
+ if gid:
+ os.setgid(gid)
+ if groups:
+ os.setgroups(groups)
+ if uid:
+ os.setuid(uid)
+ if umask:
+ os.umask(umask)
+
+ # And switch to the new process.
+ os.execve(binary, myargs, env)
+
+
+def find_binary(binary, paths=None):
+ """look through the PATH environment, finding the binary to execute"""
+
+ if os.path.isabs(binary):
+ if not (os.path.isfile(binary) and os.access(binary, os.X_OK)):
+ raise CommandNotFound(binary)
+ return binary
+
+ if paths is None:
+ paths = os.environ.get("PATH", "").split(":")
+
+ for path in paths:
+ filename = "%s/%s" % (path, binary)
+ if os.access(filename, os.X_OK) and os.path.isfile(filename):
+ return filename
+
+ raise CommandNotFound(binary)
+
+def spawn_fakeroot(mycommand, save_file, env=None, opt_name=None,
+ returnpid=False, **keywords):
+ """spawn a process via fakeroot
+
+ refer to the fakeroot manpage for specifics of using fakeroot
+ """
+ if env is None:
+ env = {}
+ else:
+ env = ProtectedDict(env)
+
+ if opt_name is None:
+ opt_name = "fakeroot %s" % mycommand
+
+ args = [
+ FAKED_PATH,
+ "--unknown-is-real", "--foreground", "--save-file", save_file]
+
+ rd_fd, wr_fd = os.pipe()
+ daemon_fd_pipes = {1:wr_fd, 2:wr_fd}
+ if os.path.exists(save_file):
+ args.append("--load")
+ daemon_fd_pipes[0] = os.open(save_file, os.O_RDONLY)
+ else:
+ daemon_fd_pipes[0] = os.open("/dev/null", os.O_RDONLY)
+
+ pids = None
+ pids = spawn(args, fd_pipes=daemon_fd_pipes, returnpid=True)
+ try:
+ try:
+ rd_f = os.fdopen(rd_fd)
+ line = rd_f.readline()
+ rd_f.close()
+ rd_fd = None
+ except:
+ cleanup_pids(pids)
+ raise
+ finally:
+ for x in (rd_fd, wr_fd, daemon_fd_pipes[0]):
+ if x is not None:
+ try:
+ os.close(x)
+ except OSError:
+ pass
+
+ line = line.strip()
+
+ try:
+ fakekey, fakepid = map(int, line.split(":"))
+ except ValueError:
+ raise ExecutionFailure("output from faked was unparsable- %s" % line)
+
+ # by now we have our very own daemonized faked. yay.
+ env["FAKEROOTKEY"] = str(fakekey)
+ env["LD_PRELOAD"] = ":".join(
+ [LIBFAKEROOT_PATH] + env.get("LD_PRELOAD", "").split(":"))
+
+ try:
+ ret = spawn(
+ mycommand, opt_name=opt_name, env=env, returnpid=returnpid,
+ **keywords)
+ if returnpid:
+ return ret + [fakepid] + pids
+ return ret
+ finally:
+ if not returnpid:
+ cleanup_pids([fakepid] + pids)
+
+def spawn_get_output(
+ mycommand, spawn_type=spawn, raw_exit_code=False, collect_fds=(1,),
+ fd_pipes=None, split_lines=True, **keywords):
+
+ """Call spawn, collecting the output to fd's specified in collect_fds list.
+
+ @param spawn_type: the passed in function to call-
+ typically spawn_bash, spawn, spawn_sandbox, or spawn_fakeroot.
+ defaults to spawn
+ """
+
+ pr, pw = None, None
+ if fd_pipes is None:
+ fd_pipes = {0:0}
+ else:
+ fd_pipes = ProtectedDict(fd_pipes)
+ try:
+ pr, pw = os.pipe()
+ for x in collect_fds:
+ fd_pipes[x] = pw
+ keywords["returnpid"] = True
+ mypid = spawn_type(mycommand, fd_pipes=fd_pipes, **keywords)
+ os.close(pw)
+ pw = None
+
+ if not isinstance(mypid, (list, tuple)):
+ raise ExecutionFailure()
+
+ fd = os.fdopen(pr, "r")
+ try:
+ if not split_lines:
+ mydata = fd.read()
+ else:
+ mydata = fd.readlines()
+ finally:
+ fd.close()
+ pw = None
+
+ retval = os.waitpid(mypid[0], 0)[1]
+ cleanup_pids(mypid)
+ if raw_exit_code:
+ return [retval, mydata]
+ return [process_exit_code(retval), mydata]
+
+ finally:
+ if pr is not None:
+ try:
+ os.close(pr)
+ except OSError:
+ pass
+ if pw is not None:
+ try:
+ os.close(pw)
+ except OSError:
+ pass
+
+def process_exit_code(retval):
+ """Process a waitpid returned exit code.
+
+ @return: The exit code if it exit'd, the signal if it died from signalling.
+ """
+ # If it got a signal, return the signal that was sent.
+ if retval & 0xff:
+ return (retval & 0xff) << 8
+
+ # Otherwise, return its exit code.
+ return retval >> 8
+
+
+class ExecutionFailure(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self, msg)
+ self.msg = msg
+ def __str__(self):
+ return "Execution Failure: %s" % self.msg
+
+class CommandNotFound(ExecutionFailure):
+ def __init__(self, command):
+ ExecutionFailure.__init__(
+ self, "CommandNotFound Exception: Couldn't find '%s'" % (command,))
+ self.command = command
+
+# cached capabilities
+
+def is_fakeroot_capable(force=False):
+ if not force:
+ try:
+ return is_fakeroot_capable.cached_result
+ except AttributeError:
+ pass
+ if not (os.path.exists(FAKED_PATH) and os.path.exists(LIBFAKEROOT_PATH)):
+ res = False
+ else:
+ try:
+ r, s = spawn_get_output(["fakeroot", "--version"],
+ fd_pipes={2:1, 1:1})
+ res = (r == 0) and (len(s) == 1) and ("version 1." in s[0])
+ except ExecutionFailure:
+ res = False
+ is_fakeroot_capable.cached_result = res
+ return res
+
+def is_sandbox_capable(force=False):
+ if not force:
+ try:
+ return is_sandbox_capable.cached_result
+ except AttributeError:
+ pass
+ res = os.path.isfile(SANDBOX_BINARY) and os.access(SANDBOX_BINARY, os.X_OK)
+ is_sandbox_capable.cached_result = res
+ return res
+
+def is_userpriv_capable(force=False):
+ if not force:
+ try:
+ return is_userpriv_capable.cached_result
+ except AttributeError:
+ pass
+ res = is_userpriv_capable.cached_result = (os.getuid() == 0)
+ return res
diff --git a/pkgcore/sync/__init__.py b/pkgcore/sync/__init__.py
new file mode 100644
index 0000000..bd8f25d
--- /dev/null
+++ b/pkgcore/sync/__init__.py
@@ -0,0 +1,3 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
diff --git a/pkgcore/sync/base.py b/pkgcore/sync/base.py
new file mode 100644
index 0000000..8ebf046
--- /dev/null
+++ b/pkgcore/sync/base.py
@@ -0,0 +1,175 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.config import ConfigHint, configurable
+from snakeoil import demandload, descriptors
+demandload.demandload(globals(),
+ 'os',
+ 'pwd',
+ 'stat',
+ 'errno',
+ 'pkgcore:spawn',
+ 'pkgcore:plugin',
+ 'pkgcore:os_data',
+)
+
+
+class syncer_exception(Exception):
+ pass
+
+class uri_exception(syncer_exception):
+ pass
+
+class generic_exception(syncer_exception):
+ pass
+
+class missing_local_user(syncer_exception):
+ pass
+
+class missing_binary(syncer_exception):
+ pass
+
+
+class syncer(object):
+
+ forcable = False
+
+ supported_uris = ()
+
+ pkgcore_config_type = ConfigHint(
+ {'path':'str', 'uri':'str'}, typename='syncer')
+
+ def __init__(self, path, uri, default_verbosity=0):
+ self.verbose = default_verbosity
+ self.basedir = path.rstrip(os.path.sep) + os.path.sep
+ self.local_user, self.uri = self.split_users(uri)
+
+ @staticmethod
+ def split_users(raw_uri):
+ """
+ @param raw_uri: string uri to split users from; harring::ferringb:pass
+ for example is local user 'harring', remote 'ferringb',
+ password 'pass'
+ @return: (local user, remote user, remote pass), defaults to root_uid
+ if no local user specified
+ """
+ uri = raw_uri.split("::", 1)
+ if len(uri) == 1:
+ return os_data.root_uid, raw_uri
+ try:
+ if uri[1].startswith("@"):
+ uri[1] = uri[1][1:]
+ if '/' in uri[0] or ':' in uri[0]:
+ proto = uri[0].split("/", 1)
+ proto[1] = proto[1].lstrip("/")
+ uri[0] = proto[1]
+ uri[1] = "%s//%s" % (proto[0], uri[1])
+ return pwd.getpwnam(uri[0]).pw_uid, uri[1]
+ except KeyError, e:
+ raise missing_local_user(raw_uri, uri[0], e)
+
+ def sync(self, verbosity=None, force=False):
+ kwds = {}
+ if self.forcable and force:
+ kwds["force"] = True
+ if verbosity is None:
+ verbosity = self.verbose
+ # output_fd is harded coded as stdout atm.
+ return self._sync(verbosity, 1, **kwds)
+
+ def _sync(self, verbosity, output_fd, **kwds):
+ raise NotImplementedError(self, "_sync")
+
+ def __str__(self):
+ return "%s syncer: %s, %s" % (self.__class__,
+ self.basedir, self.uri)
+
+ @classmethod
+ def supports_uri(cls, uri):
+ for prefix, level in cls.supported_uris:
+ if uri.startswith(prefix):
+ return level
+ return 0
+
+
+class ExternalSyncer(syncer):
+
+ """Base class for syncers that spawn a binary to do the the actual work."""
+
+ sets_env = False
+ binary = None
+
+ def __init__(self, path, uri, default_verbosity=0):
+ syncer.__init__(self, path, uri, default_verbosity=default_verbosity)
+ if not self.sets_env:
+ self.env = {}
+ if not hasattr(self, 'binary_path'):
+ self.binary_path = self.require_binary(self.binary)
+
+ @staticmethod
+ def require_binary(bin_name, fatal=True):
+ try:
+ return spawn.find_binary(bin_name)
+ except spawn.CommandNotFound, e:
+ if fatal:
+ raise missing_binary(bin_name, e)
+ return None
+
+ @descriptors.classproperty
+ def disabled(cls):
+ disabled = getattr(cls, '_disabled', None)
+ if disabled is None:
+ path = getattr(cls, 'binary_path', None)
+ if path is None:
+ disabled = cls._disabled = (
+ cls.require_binary(cls.binary, fatal=False) is None)
+ else:
+ disabled = cls._disabled = os.path.exists(path)
+ return disabled
+
+ def set_binary_path(self):
+ self.binary_path = self.require_binary(self.binary)
+
+ def _spawn(self, command, pipes, **kwargs):
+ return spawn.spawn(command, fd_pipes=pipes, uid=self.local_user,
+ env=self.env, **kwargs)
+
+
+class dvcs_syncer(ExternalSyncer):
+
+ def _sync(self, verbosity, output_fd):
+ try:
+ st = os.stat(self.basedir)
+ except (IOError, OSError), ie:
+ if ie.errno != errno.ENOENT:
+ raise generic_exception(self, self.basedir, ie)
+ command = self._initial_pull()
+ chdir = None
+ else:
+ if not stat.S_ISDIR(st.st_mode):
+ raise generic_exception(self, self.basedir,
+ "isn't a directory")
+ command = self._update_existing()
+ chdir = self.basedir
+
+ ret = self._spawn(command, {1:output_fd, 2:output_fd, 0:0},
+ chdir=chdir)
+ return ret == 0
+
+ def _initial_pull(self):
+ raise NotImplementedError(self, "_initial_pull")
+
+ def _update_existing(self):
+ raise NotImplementedError(self, "_update_existing")
+
+@configurable({'basedir':'str', 'uri':'str'}, typename='syncer')
+def GenericSyncer(basedir, uri, default_verbosity=0):
+ """Syncer using the plugin system to find a syncer based on uri."""
+ plugins = list(
+ (plug.supports_uri(uri), plug)
+ for plug in plugin.get_plugins('syncer'))
+ plugins.sort()
+ if not plugins or plugins[-1][0] <= 0:
+ raise uri_exception('no known syncer supports %r' % (uri,))
+ # XXX this is random if there is a tie. Should we raise an exception?
+ return plugins[-1][1](basedir, uri, default_verbosity=default_verbosity)
diff --git a/pkgcore/sync/bzr.py b/pkgcore/sync/bzr.py
new file mode 100644
index 0000000..ee5fd8b
--- /dev/null
+++ b/pkgcore/sync/bzr.py
@@ -0,0 +1,28 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.sync import base
+
+class bzr_syncer(base.dvcs_syncer):
+
+ binary = "bzr"
+
+ supported_uris = (
+ ('bzr+', 5),
+ )
+
+ @staticmethod
+ def parse_uri(raw_uri):
+ if not raw_uri.startswith("bzr+"):
+ raise base.uri_exception(raw_uri, "doesn't start with bzr+")
+ return raw_uri[4:]
+
+ def __init__(self, basedir, uri, **kwargs):
+ uri = self.parse_uri(uri)
+ base.dvcs_syncer.__init__(self, basedir, uri, **kwargs)
+
+ def _initial_pull(self):
+ return [self.binary_path, "get", self.basedir, self.uri]
+
+ def _update_existing(self):
+ return [self.binary_path, "pull", self.uri]
diff --git a/pkgcore/sync/cvs.py b/pkgcore/sync/cvs.py
new file mode 100644
index 0000000..14e351d
--- /dev/null
+++ b/pkgcore/sync/cvs.py
@@ -0,0 +1,57 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.sync import base
+
+class cvs_syncer(base.dvcs_syncer):
+
+ sets_env = True
+ binary = "cvs"
+
+ supported_uris = (
+ ('cvs+', 5),
+ ('cvs://', 5),
+ )
+
+ @classmethod
+ def parse_uri(cls, raw_uri):
+ if not raw_uri.startswith("cvs") and \
+ not raw_uri.startswith("cvs+"):
+ raise base.uri_exception(raw_uri, "must be cvs:// or cvs+${RSH}")
+ if raw_uri.startswith("cvs://"):
+ return None, raw_uri[len("cvs://"):]
+ proto = raw_uri[len("cvs+"):].split(":", 1)
+ if not proto[0]:
+ raise base.uri_exception(raw_uri,
+ "cvs+ requires the rsh alternative to be specified")
+ if proto[0] == "anon":
+ proto[0] = None
+ elif proto[0] != "pserver":
+ proto[0] = cls.require_binary(proto[0])
+ return proto[0], proto[1].lstrip("/")
+
+ def __init__(self, basedir, raw_uri, **kwargs):
+ proto, uri = self.parse_uri(raw_uri)
+ self.rsh = proto
+ if self.rsh is None:
+ uri = ":anoncvs:%s" % uri
+ elif self.rsh == "pserver":
+ uri = ":pserver:%s" % uri
+ self.rsh = None
+ else:
+ uri = ":ext:%s" % uri
+ host, self.module = uri.rsplit(":", 1)
+ base.dvcs_syncer.__init__(self, basedir, host, **kwargs)
+
+ @property
+ def env(self):
+ k = {"CVSROOT":self.uri}
+ if self.rsh is not None:
+ k["CVS_RSH"] = self.rsh
+ return k
+
+ def _update_existing(self):
+ return [self.binary_path, "up"]
+
+ def _initial_pull(self):
+ return [self.binary_path, "co", "-d", self.basedir]
diff --git a/pkgcore/sync/darcs.py b/pkgcore/sync/darcs.py
new file mode 100644
index 0000000..3141a4e
--- /dev/null
+++ b/pkgcore/sync/darcs.py
@@ -0,0 +1,28 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.sync import base
+
+class darcs_syncer(base.dvcs_syncer):
+
+ binary = "darcs"
+
+ supported_uris = (
+ ('darcs+', 5),
+ )
+
+ @staticmethod
+ def parse_uri(raw_uri):
+ if not raw_uri.startswith("darcs+"):
+ raise base.uri_exception(raw_uri, "doesn't start with darcs+")
+ return raw_uri[6:]
+
+ def __init__(self, basedir, uri, **kwargs):
+ uri = self.parse_uri(uri)
+ base.dvcs_syncer.__init__(self, basedir, uri, **kwargs)
+
+ def _initial_pull(self):
+ return [self.binary_path, "clone", self.uri, self.basedir]
+
+ def _update_existing(self):
+ return [self.binary_path, "pull", self.uri]
diff --git a/pkgcore/sync/git.py b/pkgcore/sync/git.py
new file mode 100644
index 0000000..10bf4d6
--- /dev/null
+++ b/pkgcore/sync/git.py
@@ -0,0 +1,35 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.sync import base
+
+class git_syncer(base.dvcs_syncer):
+
+ binary = "git"
+
+ supported_uris = (
+ ('git://', 5),
+ ('git+', 5),
+ )
+
+ @staticmethod
+ def parse_uri(raw_uri):
+ if not raw_uri.startswith("git+") and not raw_uri.startswith("git://"):
+ raise base.uri_exception(raw_uri,
+ "doesn't start with git+ nor git://")
+ if raw_uri.startswith("git+"):
+ if raw_uri.startswith("git+:"):
+ raise base.uri_exception(raw_uri,
+ "need to specify the sub protocol if using git+")
+ return raw_uri[4:]
+ return raw_uri
+
+ def __init__(self, basedir, uri, **kwargs):
+ uri = self.parse_uri(uri)
+ base.dvcs_syncer.__init__(self, basedir, uri, **kwargs)
+
+ def _initial_pull(self):
+ return [self.binary_path, "clone", self.uri, self.basedir]
+
+ def _update_existing(self):
+ return [self.binary_path, "pull"]
diff --git a/pkgcore/sync/hg.py b/pkgcore/sync/hg.py
new file mode 100644
index 0000000..3ab3c78
--- /dev/null
+++ b/pkgcore/sync/hg.py
@@ -0,0 +1,28 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.sync import base
+
+class hg_syncer(base.dvcs_syncer):
+
+ binary = "hg"
+
+ supported_uris = (
+ ('hg+', 5),
+ )
+
+ @staticmethod
+ def parse_uri(raw_uri):
+ if not raw_uri.startswith("hg+"):
+ raise base.uri_exception(raw_uri, "doesn't start with hg+")
+ return raw_uri[3:]
+
+ def __init__(self, basedir, uri, **kwargs):
+ uri = self.parse_uri(uri)
+ base.dvcs_syncer.__init__(self, basedir, uri, **kwargs)
+
+ def _initial_pull(self):
+ return [self.binary_path, "clone", self.uri, self.basedir]
+
+ def _update_existing(self):
+ return [self.binary_path, "pull", "-u", self.uri]
diff --git a/pkgcore/sync/rsync.py b/pkgcore/sync/rsync.py
new file mode 100644
index 0000000..e46d257
--- /dev/null
+++ b/pkgcore/sync/rsync.py
@@ -0,0 +1,188 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.sync import base
+from pkgcore.config import ConfigHint
+from snakeoil.demandload import demandload
+
+demandload(globals(),
+ 'os',
+ 'socket',
+ 'errno',
+ 'snakeoil.osutils:pjoin',
+)
+
+class rsync_syncer(base.ExternalSyncer):
+
+ default_excludes = ["/distfiles", "/local", "/packages"]
+ default_includes = []
+ default_timeout = 180
+ default_opts = ["--recursive",
+ "--delete-after",
+ "--perms",
+ "--times",
+ "--force",
+ "--safe-links",
+ "--whole-file"] # this one probably shouldn't be a default.
+
+ default_retries = 5
+ binary = "rsync"
+
+ @classmethod
+ def parse_uri(cls, raw_uri):
+ if not raw_uri.startswith("rsync://") and \
+ not raw_uri.startswith("rsync+"):
+ raise base.uri_exception(raw_uri,
+ "doesn't start with rsync:// nor rsync+")
+
+ if raw_uri.startswith("rsync://"):
+ return None, raw_uri
+
+ proto = raw_uri.split(":", 1)
+ proto[0] = proto[0].split("+", 1)[1]
+ cls.require_binary(proto[0])
+ return proto[0], "rsync:%s" % proto[1]
+
+ pkgcore_config_type = ConfigHint({'basedir':'str', 'uri':'str',
+ 'timeout':'str', 'compress':'bool', 'excludes':'list',
+ 'includes':'list', 'retries':'str', 'extra_opts':'list'},
+ typename='syncer')
+
+ def __init__(self, basedir, uri, timeout=default_timeout,
+ compress=False, excludes=(), includes=(),
+ retries=default_retries,
+ extra_opts=[]):
+
+ uri = uri.rstrip(os.path.sep) + os.path.sep
+ self.rsh, uri = self.parse_uri(uri)
+ base.ExternalSyncer.__init__(self, basedir, uri, default_verbosity=2)
+ self.hostname = self.parse_hostname(self.uri)
+ if self.rsh:
+ self.rsh = self.require_binary(self.rsh)
+ self.opts = list(self.default_opts)
+ self.opts.extend(extra_opts)
+ if compress:
+ self.opts.append("--compress")
+ self.opts.append("--timeout=%i" % int(timeout))
+ self.excludes = list(self.default_excludes) + list(excludes)
+ self.includes = list(self.default_includes) + list(includes)
+ self.retries = int(retries)
+ self.is_ipv6 = "--ipv6" in self.opts or "-6" in self.opts
+ self.is_ipv6 = self.is_ipv6 and socket.has_ipv6
+
+ @staticmethod
+ def parse_hostname(uri):
+ return uri[len("rsync://"):].split("@", 1)[-1].split("/", 1)[0]
+
+ def _get_ips(self):
+ af_fam = socket.AF_INET
+ if self.is_ipv6:
+ af_fam = socket.AF_INET6
+ try:
+ for ipaddr in socket.getaddrinfo(self.hostname, None, af_fam,
+ socket.SOCK_STREAM):
+ if ipaddr[0] == socket.AF_INET6:
+ yield "[%s]" % ipaddr[4][0]
+ else:
+ yield ipaddr[4][0]
+
+ except socket.error, e:
+ raise base.syncer_exception(self.hostname, af_fam, str(e))
+
+
+ def _sync(self, verbosity, output_fd):
+ fd_pipes = {1:output_fd, 2:output_fd}
+ opts = list(self.opts)
+ if self.rsh:
+ opts.append("-e")
+ opts.append(self.rsh)
+ opts.extend("--exclude=%s" % x for x in self.excludes)
+ opts.extend("--include=%s" % x for x in self.includes)
+ if verbosity == 0:
+ opts.append("--quiet")
+ if verbosity >= 1:
+ opts.append("--progress")
+ if verbosity >= 2:
+ opts.append("--stats")
+ elif verbosity >= 3:
+ opts.append("--verbose")
+
+ # zip limits to the shortest iterable.
+ for count, ip in zip(xrange(self.retries), self._get_ips()):
+ o = [self.binary_path,
+ self.uri.replace(self.hostname, ip, 1),
+ self.basedir] + opts
+
+ ret = self._spawn(o, fd_pipes)
+ if ret == 0:
+ return True
+ elif ret == 1:
+ # syntax error.
+ raise base.syncer_exception(o, "syntax error")
+ elif ret == 11:
+ raise base.syncer_exception("rsync returned error code of "
+ "11; this is an out of space exit code")
+ # need to do something here instead of just restarting...
+ # else:
+ # print ret
+
+
+class rsync_timestamp_syncer(rsync_syncer):
+
+ forcable = True
+
+ def __init__(self, *args, **kwargs):
+ rsync_syncer.__init__(self, *args, **kwargs)
+ self.last_timestamp = self.current_timestamp()
+
+ def current_timestamp(self, path=None):
+ """
+ @param path: override the default path for the timestamp to read
+ @return: string of the timestamp data
+ """
+ if path is None:
+ path = pjoin(self.basedir, "metadata", "timestamp.chk")
+ try:
+ return open(path).read().strip()
+ except IOError, oe:
+ if oe.errno not in (errno.ENOENT, errno.ENOTDIR):
+ raise
+ return None
+
+ def _sync(self, verbosity, output_fd, force=False):
+ doit = force or self.last_timestamp is None
+ ret = None
+ try:
+ if not doit:
+ basedir = self.basedir
+ uri = self.uri
+ new_timestamp = pjoin(self.basedir, "metadata",
+ ".tmp.timestamp.chk")
+ try:
+ self.basedir = new_timestamp
+ self.uri = pjoin(self.uri, "metadata", "timestamp.chk")
+ ret = rsync_syncer._sync(self, verbosity, output_fd)
+ finally:
+ self.basedir = basedir
+ self.uri = uri
+ doit = ret == False or self.last_timestamp != \
+ self.current_timestamp(new_timestamp)
+ if not doit:
+ return True
+ ret = rsync_syncer._sync(self, verbosity, output_fd)
+ finally:
+ if ret is not None:
+ if ret:
+ return ret
+ # ensure the timestamp is back to the old.
+ try:
+ path = pjoin(self.basedir, "metadata", "timestamp.chk")
+ if self.last_timestamp is None:
+ os.remove(path)
+ else:
+ open(pjoin(self.basedir, "metadata", "timestamp.chk"),
+ "w").write(self.last_timestamp)
+ except (IOError, OSError):
+ # don't care...
+ pass
+ return ret
diff --git a/pkgcore/sync/svn.py b/pkgcore/sync/svn.py
new file mode 100644
index 0000000..cefeace
--- /dev/null
+++ b/pkgcore/sync/svn.py
@@ -0,0 +1,40 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.sync import base
+
+class svn_syncer(base.ExternalSyncer):
+
+ binary = "svn"
+
+ supported_uris = (
+ ('svn://', 5),
+ ('svn+', 5),
+ ('http+svn://',5),
+ ('https+svn://',5)
+ )
+
+ @staticmethod
+ def parse_uri(raw_uri):
+ if raw_uri.startswith("svn://"):
+ return True
+ elif raw_uri.startswith("http+svn://"):
+ return True
+ elif raw_uri.startswith("https+svn://"):
+ return True
+ elif raw_uri.startswith("svn+"):
+ if raw_uri.startswith("svn+:"):
+ raise base.uri_exception(raw_uri, "svn+:// isn't valid")
+ else:
+ raise base.uri_exception(raw_uri, "protocol unknown")
+ return True
+
+ def _sync(self, verbosity, output_fd):
+ uri = self.uri
+ if uri.startswith('svn+http://'):
+ uri = uri.replace('svn+http://', 'http://')
+ elif uri.startswith('svn+https://'):
+ uri = uri.replace('svn+https://', 'https://')
+ return 0 == self._spawn([self.binary_path, "co",
+ uri, self.basedir], {1:output_fd, 2:output_fd, 0:0})
+
diff --git a/pkgcore/util/__init__.py b/pkgcore/util/__init__.py
new file mode 100644
index 0000000..9ff5a09
--- /dev/null
+++ b/pkgcore/util/__init__.py
@@ -0,0 +1,4 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""misc. utility functions"""
diff --git a/pkgcore/util/bzip2.py b/pkgcore/util/bzip2.py
new file mode 100644
index 0000000..afbcb39
--- /dev/null
+++ b/pkgcore/util/bzip2.py
@@ -0,0 +1,69 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+bzip2 decompression/compression
+
+where possible, this module defers to cpython bz2 module- if it's not available,
+it results to executing bzip2 with tempfile arguements to do decompression
+and compression.
+
+Should use this module unless its absolutely critical that bz2 module be used
+"""
+
+
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'tempfile',
+ 'pkgcore.spawn:find_binary,spawn_get_output',
+)
+
+def process_compress(in_data, compress_level=9):
+ fd = None
+ fd = tempfile.TemporaryFile("w+")
+ fd.write(in_data)
+ fd.flush()
+ fd.seek(0)
+ try:
+ ret, data = spawn_get_output(
+ ["bzip2", "-%ic" % compress_level],
+ fd_pipes={0:fd.fileno()}, split_lines=False)
+ if ret != 0:
+ raise ValueError("failed compressing the data")
+ return data
+ finally:
+ if fd is not None:
+ fd.close()
+
+def process_decompress(in_data):
+ fd = None
+ fd = tempfile.TemporaryFile("wb+")
+ fd.write(in_data)
+ fd.flush()
+ fd.seek(0)
+ try:
+ ret, data = spawn_get_output(
+ ["bzip2", "-dc"], fd_pipes={0:fd.fileno()}, split_lines=False)
+ if ret != 0:
+ raise ValueError("failed decompressing the data")
+ return data
+ finally:
+ if fd is not None:
+ fd.close()
+
+# Unused import
+# pylint: disable-msg=W0611
+
+try:
+ from bz2 import compress, decompress
+except ImportError:
+ # We need this because if we are not native then TarFile.bz2open will fail
+ # (and some code needs to be able to check that).
+ native = False
+ # trigger it to throw a CommandNotFound if missing
+ find_binary("bzip2")
+ compress = process_compress
+ decompress = process_decompress
+else:
+ native = True
+
diff --git a/pkgcore/util/commandline.py b/pkgcore/util/commandline.py
new file mode 100644
index 0000000..f6cd123
--- /dev/null
+++ b/pkgcore/util/commandline.py
@@ -0,0 +1,425 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""Utilities for writing commandline utilities.
+
+pkgcore scripts should use the L{OptionParser} subclass here for a
+consistent commandline "look and feel" (and it tries to make life a
+bit easier too). They will probably want to use L{main} from an C{if
+__name__ == '__main__'} block too: it will take care of things like
+consistent exception handling.
+
+See dev-notes/commandline.rst for more complete documentation.
+"""
+
+
+import sys
+import os.path
+import logging
+
+from pkgcore.config import load_config, errors
+from snakeoil import formatters, demandload, fix_copy
+fix_copy.inject_copy()
+import optparse
+
+demandload.demandload(globals(),
+ 'snakeoil.fileutils:iter_read_bash',
+ 'pkgcore:version',
+ 'pkgcore.config:basics',
+ 'pkgcore.restrictions:packages',
+ 'pkgcore.util:parserestrict',
+)
+
+
+CONFIG_LOADED_MSG = (
+ 'Configuration already loaded. If moving the option earlier '
+ 'on the commandline does not fix this report it as a bug.')
+
+
+class FormattingHandler(logging.Handler):
+
+ """Logging handler printing through a formatter."""
+
+ def __init__(self, formatter):
+ logging.Handler.__init__(self)
+ # "formatter" clashes with a Handler attribute.
+ self.out = formatter
+
+ def emit(self, record):
+ if record.levelno >= logging.ERROR:
+ color = 'red'
+ elif record.levelno >= logging.WARNING:
+ color = 'yellow'
+ else:
+ color = 'cyan'
+ first_prefix = (self.out.fg(color), self.out.bold, record.levelname,
+ self.out.reset, ' ', record.name, ': ')
+ later_prefix = (len(record.levelname) + len(record.name)) * ' ' + ' : '
+ self.out.first_prefix.extend(first_prefix)
+ self.out.later_prefix.append(later_prefix)
+ try:
+ for line in self.format(record).split('\n'):
+ self.out.write(line, wrap=True)
+ finally:
+ self.out.later_prefix.pop()
+ for i in xrange(len(first_prefix)):
+ self.out.first_prefix.pop()
+
+
+# Mix in object here or properties do not work (Values is an oldstyle class).
+class Values(optparse.Values, object):
+
+ """Values with an autoloaded config property.
+
+ If you do not want the config autoloaded you can set the _config
+ attribute like this:
+
+ >>> parser = OptionParser()
+ >>> vals = parser.get_default_values()
+ >>> vals._config = my_custom_central
+ >>> parser.parse_args(args, vals)
+ """
+
+ def __init__(self, defaults=None):
+ optparse.Values.__init__(self, defaults)
+ self.new_config = {}
+ self.add_config = {}
+
+ def load_config(self):
+ """Override this if you need a different way of loading config."""
+ # This makes mixing --new-config and --add-config sort of
+ # work. Not sure if that is a good thing, but detecting and
+ # erroring is about as much work as making it mostly work :)
+ new_config = dict(
+ (name, basics.ConfigSectionFromStringDict(val))
+ for name, val in self.new_config.iteritems())
+ add_config = {}
+ for name, config in self.add_config.iteritems():
+ inherit = config.pop('inherit', None)
+ # XXX this will likely not be quite correctly quoted.
+ if inherit is None:
+ config['inherit'] = repr(name)
+ else:
+ config['inherit'] = '%s %r' % (inherit, name)
+ add_config[name] = basics.ConfigSectionFromStringDict(config)
+ # Triggers failures if these get mucked with after this point
+ # (instead of silently ignoring).
+ self.add_config = self.new_config = None
+ return load_config(
+ debug=self.debug, prepend_sources=(add_config, new_config),
+ skip_config_files=self.empty_config)
+
+ @property
+ def config(self):
+ try:
+ return self._config
+ except AttributeError:
+ self._config = self.load_config()
+ return self._config
+
+
+def read_file_callback(option, opt_str, value, parser):
+ """Read a file ignoring comments."""
+ if not os.path.isfile(value):
+ raise optparse.OptionValueError("'%s' is not a file" % value)
+ setattr(parser.values, option.dest, iter_read_bash(value))
+
+
+def config_callback(option, opt_str, value, parser, typename, typedesc=None):
+ """Retrieve a config section.
+
+ Pass the typename of the section as callback_args=('typename',),
+ and set type='string'. You can optionally pass a human-readable
+ typename as second element of callback_args.
+ """
+ if typedesc is None:
+ typedesc = typename
+ mapping = getattr(parser.values.config, typename)
+ try:
+ result = mapping[value]
+ except KeyError:
+ raise optparse.OptionValueError(
+ '%r is not a valid %s for %s (valid values: %s)' % (
+ value, typedesc, opt_str, ', '.join(repr(key)
+ for key in mapping)))
+ setattr(parser.values, option.dest, result)
+
+
+def config_append_callback(option, opt_str, value, parser, typename,
+ typedesc=None):
+ """Like L{config_callback} but appends instead of sets."""
+ if typedesc is None:
+ typedesc = typename
+ mapping = getattr(parser.values.config, typename)
+ try:
+ result = mapping[value]
+ except KeyError:
+ raise optparse.OptionValueError(
+ '%r is not a valid %s for %s (valid values: %s)' % (
+ value, typedesc, opt_str, ', '.join(repr(key)
+ for key in mapping)))
+ parser.values.ensure_value(option.dest, []).append(result)
+
+
+def debug_callback(option, opt_str, value, parser):
+ """Make sure the config central uses debug mode.
+
+ We do this because it is possible to access config from an option
+ callback before the entire commandline is parsed. This callback
+ makes sure any config usage after optparse hit the --debug switch
+ is properly in debug mode.
+
+ Ideally we would not need this, since needing this means things
+ accessing config too early still get the wrong debug setting. But
+ doing that would mean either (crappily) parsing the commandline
+ before optparse does or making config access from option callbacks
+ illegal. The former is hard to get "right" (impossible to get
+ completely "right" since you cannot know how many arguments an
+ option with a callback consumes without calling it) and the latter
+ is unwanted because accessing config from callbacks is useful
+ (pcheck will do this at the time of writing).
+ """
+ parser.values.debug = True
+ config = parser.values.config
+ config.debug = True
+ logging.root.setLevel(logging.DEBUG)
+ for collapsed in config.collapsed_configs.itervalues():
+ collapsed.debug = True
+
+
+def new_config_callback(option, opt_str, value, parser):
+ """Add a configsection to our values object.
+
+ Munges three arguments: section name, key name, value.
+
+ dest defines an attr name on the values object to store in.
+ """
+ if getattr(parser.values, '_config', None) is not None:
+ raise optparse.OptionValueError(CONFIG_LOADED_MSG)
+ section_name, key, val = value
+ section = getattr(parser.values, option.dest).setdefault(section_name, {})
+ if key in section:
+ raise optparse.OptionValueError(
+ '%r is already set (to %r)' % (key, section[key]))
+ section[key] = val
+
+
+def empty_config_callback(option, opt_str, value, parser):
+ """Remember not to load the user/system configuration.
+
+ Error out if we have already loaded it.
+ """
+ if getattr(parser.values, '_config', None) is not None:
+ raise optparse.OptionValueError(CONFIG_LOADED_MSG)
+ parser.values.empty_config = True
+
+
+class Option(optparse.Option):
+
+ def __init__(self, *args, **kwargs):
+ self.long_help = kwargs.pop('long_help', None)
+ optparse.Option.__init__(self, *args, **kwargs)
+
+
+class OptionParser(optparse.OptionParser):
+
+ """Our common OptionParser subclass.
+
+ Adds some common options, makes options that get "append"ed
+ default to an empty sequence instead of None, uses our custom
+ Values class with the config property.
+ """
+
+ # You can set this on an instance or subclass to use a different class.
+ values_class = Values
+
+ standard_option_list = optparse.OptionParser.standard_option_list + [
+ Option(
+ '--debug', '-d', action='callback', callback=debug_callback,
+ help='print some extra info useful for pkgcore devs. You may have '
+ 'to set this as first argument for debugging certain '
+ 'configuration problems.'),
+ Option('--nocolor', action='store_true',
+ help='disable color in the output.'),
+ Option('--version', action='version'),
+ Option(
+ '--add-config', action='callback', callback=new_config_callback,
+ type='str', nargs=3, help='Add a new configuration section. '
+ 'Takes three arguments: section name, value name, value.'),
+ Option(
+ '--new-config', action='callback', callback=new_config_callback,
+ type='str', nargs=3, help='Expand a configuration section. '
+ 'Just like --add-config but with an implied inherit=sectionname.'),
+ Option(
+ '--empty-config', action='callback',
+ callback=empty_config_callback,
+ help='Do not load the user or system configuration. Can be useful '
+ 'combined with --new-config.')
+ ]
+
+ def __init__(self, *args, **kwargs):
+ """Initialize."""
+ kwargs.setdefault('option_class', Option)
+ optparse.OptionParser.__init__(self, *args, **kwargs)
+ # It is a callback so it cannot set a default value the "normal" way.
+ self.set_default('debug', False)
+ self.set_default('empty_config', False)
+
+ def get_version(self):
+ """Add pkgcore's version to the version information."""
+ ver = optparse.OptionParser.get_version(self)
+ pkgcore_ver = version.get_version()
+ if ver:
+ return '\n'.join((ver, pkgcore_ver))
+ return pkgcore_ver
+
+ def print_version(self, file=None):
+ """Print the version to a filelike (defaults to stdout).
+
+ Overridden because the optparse one is a noop if self.version is false.
+ """
+ print >> file, self.get_version()
+
+ def _add_version_option(self):
+ """Override this to be a no-op.
+
+ Needed because optparse does not like our on-demand generation
+ of the version string.
+ """
+
+ def get_default_values(self):
+ """Slightly simplified copy of optparse code using our Values class."""
+ # Needed because optparse has the Values class hardcoded in
+ # (and no obvious way to get the defaults set on an existing
+ # Values instance).
+ defaults = self.defaults.copy()
+ for option in self._get_all_options():
+ default = defaults.get(option.dest)
+ if isinstance(default, basestring):
+ opt_str = option.get_opt_string()
+ defaults[option.dest] = option.check_value(opt_str, default)
+ return self.values_class(defaults)
+
+ def check_values(self, values, args):
+ """Do some basic sanity checking.
+
+ optparse defaults unset lists to None. An empty sequence is
+ much more convenient (lets you use them in a for loop without
+ a None check) so we fix those up (based on action "append").
+ """
+ for container in self.option_groups + [self]:
+ for option in container.option_list:
+ if option.action == 'append':
+ values.ensure_value(option.dest, [])
+ return values, args
+
+class MySystemExit(SystemExit):
+ """Subclass of SystemExit the tests can safely catch."""
+
+
+def main(subcommands, args=None, outfile=sys.stdout, errfile=sys.stderr,
+ script_name=None):
+ """Function to use in an "if __name__ == '__main__'" block in a script.
+
+ Takes one or more combinations of option parser and main func and
+ runs them, taking care of exception handling and some other things.
+
+ Any ConfigurationErrors raised from your function (by the config
+ manager) are handled. Other exceptions are not (trigger a traceback).
+
+ @type subcommands: mapping of string => (OptionParser class, main func)
+ @param subcommands: available commands.
+ The keys are a subcommand name or None for other/unknown/no subcommand.
+ The values are tuples of OptionParser subclasses and functions called
+ as main_func(config, out, err) with a L{Values} instance, two
+ L{snakeoil.formatters.Formatter} instances for output (stdout)
+ and errors (stderr). It should return an integer used as
+ exit status or None as synonym for 0.
+ @type args: sequence of strings
+ @param args: arguments to parse, defaulting to C{sys.argv[1:]}.
+ @type outfile: file-like object
+ @param outfile: File to use for stdout, defaults to C{sys.stdout}.
+ @type errfile: file-like object
+ @param errfile: File to use for stderr, defaults to C{sys.stderr}.
+ @type script_name: string
+ @param script_name: basename of this script, defaults to the basename
+ of C{sys.argv[0]}.
+ """
+ exitstatus = 1
+ if args is None:
+ args = sys.argv[1:]
+ if script_name is None:
+ prog = os.path.basename(sys.argv[0])
+ else:
+ prog = script_name
+ parser_class = None
+ if args:
+ parser_class, main_func = subcommands.get(args[0], (None, None))
+ if parser_class is not None:
+ prog = '%s %s' % (prog, args[0])
+ args = args[1:]
+ if parser_class is None:
+ try:
+ parser_class, main_func = subcommands[None]
+ except KeyError:
+ # This tries to print in a format very similar to optparse --help.
+ errfile.write(
+ 'Usage: %s <command>\n\nCommands:\n' % (prog,))
+ maxlen = max(len(subcommand) for subcommand in subcommands) + 1
+ for subcommand, (parser, main) in sorted(subcommands.iteritems()):
+ doc = main.__doc__
+ if doc is None:
+ errfile.write(' %s\n' % (subcommand,))
+ else:
+ doc = doc.split('\n', 1)[0]
+ errfile.write(' %-*s %s\n' % (maxlen, subcommand, doc))
+ errfile.write(
+ '\nUse --help after a subcommand for more help.\n')
+ raise MySystemExit(1)
+ options = None
+ option_parser = parser_class(prog=prog)
+ out = None
+ try:
+ options, args = option_parser.parse_args(args)
+ # Checked here and not in OptionParser because we want our
+ # check_values to run before the user's, not after it.
+ if args:
+ option_parser.error("I don't know what to do with %s" %
+ (' '.join(args),))
+ else:
+ if options.nocolor:
+ formatter_factory = formatters.PlainTextFormatter
+ else:
+ formatter_factory = formatters.get_formatter
+ out = formatter_factory(outfile)
+ err = formatter_factory(errfile)
+ if logging.root.handlers:
+ # Remove the default handler.
+ logging.root.handlers.pop(0)
+ logging.root.addHandler(FormattingHandler(err))
+ exitstatus = main_func(options, out, err)
+ except errors.ConfigurationError, e:
+ if options is not None and options.debug:
+ raise
+ errfile.write('Error in configuration:\n%s\n' % (e,))
+ except KeyboardInterrupt:
+ if options is not None and options.debug:
+ raise
+ if out is not None:
+ if exitstatus:
+ out.title('%s failed' % (prog,))
+ else:
+ out.title('%s succeeded' % (prog,))
+ raise MySystemExit(exitstatus)
+
+def convert_to_restrict(sequence, default=packages.AlwaysTrue):
+ """Convert an iterable to a list of atoms, or return the default"""
+ l = []
+ try:
+ for x in sequence:
+ l.append(parserestrict.parse_match(x))
+ except parserestrict.ParseError, e:
+ raise optparse.OptionValueError("arg %r isn't a valid atom: %s"
+ % (x, e))
+ return l or [default]
diff --git a/pkgcore/util/packages.py b/pkgcore/util/packages.py
new file mode 100644
index 0000000..bbdd649
--- /dev/null
+++ b/pkgcore/util/packages.py
@@ -0,0 +1,15 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+import itertools, operator
+
+def get_raw_pkg(pkg):
+ p = pkg
+ while hasattr(p, "_raw_pkg"):
+ p = p._raw_pkg
+ return p
+
+groupby_key_getter = operator.attrgetter("key")
+def groupby_pkg(iterable):
+ for key, pkgs in itertools.groupby(iterable, groupby_key_getter):
+ yield pkgs
diff --git a/pkgcore/util/parserestrict.py b/pkgcore/util/parserestrict.py
new file mode 100644
index 0000000..4f7f17c
--- /dev/null
+++ b/pkgcore/util/parserestrict.py
@@ -0,0 +1,153 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""Functions that turn a string into a restriction or raise ParseError.
+
+@var parse_funcs: dict of the functions that are available.
+"""
+
+from pkgcore.restrictions import packages, values, util
+from pkgcore.package import errors
+from pkgcore.ebuild import atom, cpv, errors
+from snakeoil.containers import InvertedContains
+
+
+class ParseError(ValueError):
+ """Raised if parsing a restriction expression failed."""
+
+
+def comma_separated_containment(attr):
+ """Helper for parsing comma-separated strings to a ContainmentMatch.
+
+ @param attr: name of the attribute.
+ @returns: a parse function: takes a string of comma-separated values,
+ returns a L{packages.PackageRestriction} matching packages that
+ have any of those values in the attribute passed to this function.
+ """
+ def _parse(value):
+ return packages.PackageRestriction(attr, values.ContainmentMatch(*(
+ piece.strip() for piece in value.split(','))))
+ return _parse
+
+
+def convert_glob(token):
+ if '*' in token[1:-1]:
+ raise ParseError(
+ "'*' must be specified at the end or beginning of a matching field")
+ l = len(token)
+ if token.startswith("*") and l > 1:
+ if token.endswith("*"):
+ if l == 2:
+ return None
+ return values.ContainmentMatch(token.strip("*"))
+ return values.StrGlobMatch(token.strip("*"), prefix=False)
+ elif token.endswith("*") and l > 1:
+ return values.StrGlobMatch(token.strip("*"), prefix=True)
+ elif l <= 1:
+ return None
+ return values.StrExactMatch(token)
+
+def collect_ops(text):
+ i = 0
+ while text[i] in ("<", "=", ">", "~"):
+ i += 1
+ return text[0:i], text[i:]
+
+def parse_match(text):
+
+ """generate appropriate restriction for text
+
+ Parsing basically breaks it down into chunks split by /, with each
+ chunk allowing for prefix/postfix globbing- note that a postfixed
+ glob on package token is treated as package attribute matching,
+ B{not} as necessarily a version match.
+
+ If only one chunk is found, it's treated as a package chunk.
+ Finally, it supports a nonstandard variation of atom syntax where
+ the category can be dropped.
+
+ Examples-
+ - "*": match all
+ - "dev-*/*": category must start with dev-
+ - "dev-*": package must start with dev-
+ - *-apps/portage*: category must end in -apps,
+ package must start with portage
+ - >=portage-2.1: atom syntax, package portage,
+ version greater then or equal to 2.1
+
+ @param text: string to attempt to parse
+ @type text: string
+ @return: L{package restriction<pkgcore.restrictions.packages>} derivative
+ """
+
+ orig_text = text = text.strip()
+ if "!" in text:
+ raise ParseError(
+ "!, or any form of blockers make no sense in this usage: %s" % (
+ text,))
+ tsplit = text.rsplit("/", 1)
+ if len(tsplit) == 1:
+ ops, text = collect_ops(text)
+ if not ops:
+ if "*" in text:
+ r = convert_glob(text)
+ if r is None:
+ return packages.AlwaysTrue
+ return packages.PackageRestriction("package", r)
+ elif text.startswith("*"):
+ raise ParseError(
+ "cannot do prefix glob matches with version ops: %s" % (
+ orig_text,))
+ # ok... fake category. whee.
+ try:
+ r = list(util.collect_package_restrictions(
+ atom.atom("%scategory/%s" % (ops, text)).restrictions,
+ attrs=InvertedContains(["category"])))
+ except errors.MalformedAtom, e:
+ raise ParseError(str(e))
+ if len(r) == 1:
+ return r[0]
+ return packages.AndRestriction(*r)
+ elif text[0] in "=<>~":
+ return atom.atom(text)
+ if "*" not in text:
+ try:
+ return atom.atom(text)
+ except errors.InvalidCPV, e:
+ raise ParseError(str(e))
+ r = map(convert_glob, tsplit)
+ if not r[0] and not r[1]:
+ return packages.AlwaysTrue
+ if not r[0]:
+ return packages.PackageRestriction("package", r[1])
+ elif not r[1]:
+ return packages.PackageRestriction("category", r[0])
+ return packages.AndRestriction(
+ packages.PackageRestriction("category", r[0]),
+ packages.PackageRestriction("package", r[1]))
+
+
+def parse_pv(repo, text):
+ """Return a CPV instance from either a cpv or a pv string.
+
+ If a pv is passed it needs to match a single cpv in repo.
+ """
+ try:
+ return cpv.CPV(text)
+ except errors.InvalidCPV:
+ restrict = parse_match('=%s' % (text,))
+ result = None
+ for match in repo.itermatch(restrict):
+ if result is not None:
+ raise ParseError('multiple matches for %s (%s, %s)' % (
+ text, result.cpvstr, match.cpvstr))
+ result = match
+ if result is None:
+ raise ParseError('no matches for %s' % (text,))
+ return cpv.CPV(result.category, result.package, result.version)
+
+
+parse_funcs = {
+ 'match': parse_match,
+ }
diff --git a/pkgcore/util/repo_utils.py b/pkgcore/util/repo_utils.py
new file mode 100644
index 0000000..2858ac4
--- /dev/null
+++ b/pkgcore/util/repo_utils.py
@@ -0,0 +1,35 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+misc. repository related tools
+"""
+
+from pkgcore.repository import virtual
+
+def get_raw_repos(repo):
+ """
+ returns a list of raw repos found.
+ repo can be either a repo instance, or a list
+ """
+ if isinstance(repo, (list, tuple)):
+ l = []
+ map(l.extend, (get_raw_repos(x) for x in repo))
+ return l
+ while getattr(repo, "raw_repo", None) is not None:
+ repo = repo.raw_repo
+ if hasattr(repo, "trees"):
+ l = []
+ map(l.extend, (get_raw_repos(x) for x in repo.trees))
+ return l
+ return [repo]
+
+def get_virtual_repos(repo, sentinel=True):
+ """
+ select only virtual repos
+ repo can be either a list, or a repo to descend through.
+ if sentinel is False, will select all non virtual repos
+ """
+ if not isinstance(repo, (tuple, list)):
+ repo = get_raw_repos(repo)
+ return [x for x in repo if isinstance(x, virtual.tree) == sentinel]
diff --git a/pkgcore/vdb/__init__.py b/pkgcore/vdb/__init__.py
new file mode 100644
index 0000000..6ffee72
--- /dev/null
+++ b/pkgcore/vdb/__init__.py
@@ -0,0 +1,30 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.restrictions.packages import OrRestriction
+from pkgcore.repository import multiplex, virtual
+from pkgcore.vdb.ondisk import tree as vdb_repository
+from snakeoil.currying import partial
+
+def _grab_virtuals(parent_repo):
+ virtuals = {}
+ for pkg in parent_repo:
+ for virtualpkg in pkg.provides.evaluate_depset(pkg.use):
+ virtuals.setdefault(virtualpkg.package, {}).setdefault(
+ pkg.fullver, []).append(pkg)
+
+ for pkg_dict in virtuals.itervalues():
+ for full_ver, rdep_atoms in pkg_dict.iteritems():
+ if len(rdep_atoms) == 1:
+ pkg_dict[full_ver] = rdep_atoms[0].unversioned_atom
+ else:
+ pkg_dict[full_ver] = OrRestriction(
+ finalize=True, *[x.unversioned_atom for x in rdep_atoms])
+ return virtuals
+
+def repository(*args, **kwargs):
+ r = vdb_repository(*args, **kwargs)
+ return multiplex.tree(
+ r, virtual.tree(partial(_grab_virtuals, r), livefs=True))
+
+repository = vdb_repository
diff --git a/pkgcore/vdb/contents.py b/pkgcore/vdb/contents.py
new file mode 100644
index 0000000..d82887a
--- /dev/null
+++ b/pkgcore/vdb/contents.py
@@ -0,0 +1,180 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from pkgcore.fs.contents import contentsSet
+from pkgcore.fs import fs
+from pkgcore.interfaces import data_source
+
+from snakeoil.fileutils import AtomicWriteFile
+from snakeoil.compatibility import any
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'os',
+ 'stat',
+ 'errno',
+ 'pkgcore.chksum:get_handler',
+ 'snakeoil.osutils:readlines',
+ 'pkgcore:os_data',
+)
+
+class LookupFsDev(fs.fsDev):
+
+ def __init__(self, path, **kwds):
+ if any(x not in kwds for x in ("major", "minor", "mode")):
+ try:
+ st = os.lstat(path)
+ except OSError, oe:
+ if oe.errno != errno.ENOENT:
+ raise
+ st = None
+ if st is None or any(f(st.st_mode) for f in
+ (stat.S_ISREG, stat.S_ISDIR, stat.S_ISFIFO)):
+ kwds["strict"] = True
+ else:
+ major, minor = fs.get_major_minor(st)
+ kwds["major"] = major
+ kwds["minor"] = minor
+ kwds["mode"] = st.st_mode
+ fs.fsDev.__init__(self, path, **kwds)
+
+
+class ContentsFile(contentsSet):
+ """class wrapping a contents file"""
+
+ def __init__(self, source, mutable=False, create=False):
+
+ if not isinstance(source, (data_source.base, basestring)):
+ raise TypeError("source must be either data_source, or a filepath")
+ contentsSet.__init__(self, mutable=True)
+ self._source = source
+
+ if not create:
+ self._read()
+
+ self.mutable = mutable
+
+ def clone(self, empty=False):
+ # create is used to block it from reading.
+ cset = self.__class__(self._source, mutable=True, create=True)
+ if not empty:
+ cset.update(self)
+ return cset
+
+ def add(self, obj):
+ if isinstance(obj, fs.fsFile):
+ # strict checks
+ if obj.chksums is None or "md5" not in obj.chksums:
+ raise TypeError("fsFile objects need to be strict")
+ elif not isinstance(obj, (fs.fsDir, fs.fsSymlink, fs.fsFifo, fs.fsDev)):
+ raise TypeError(
+ "obj must be of fsObj, fsDir, fsLink, fsFifo, fsDev class "
+ "or derivative")
+
+ contentsSet.add(self, obj)
+
+ def _get_fd(self, write=False):
+ if isinstance(self._source, basestring):
+ if write:
+ return AtomicWriteFile(self._source, uid=os_data.root_uid,
+ gid=os_data.root_gid, perms=0644)
+ return readlines(self._source, True)
+ fobj = self._source.get_fileobj()
+ if write:
+ fobj.seek(0, 0)
+ fobj.truncate(0)
+ return fobj
+
+ def flush(self):
+ return self._write()
+
+ def _parse_old(self, line):
+ """parse old contents, non tab based format"""
+ # specifically force splitting on spaces.
+ s = line.split()
+ if not s:
+ # stupid; just whitespace/newline. ignore it.
+ return None
+ if s[0] in ("dir", "dev", "fif"):
+ return s[0], ' '.join(s[1:])
+ elif s[0] == "obj":
+ return "obj", ' '.join(s[1:-2]), s[-2], s[-1]
+ elif s[0] == "sym":
+ try:
+ p = s.index("->")
+ return "sym", ' '.join(s[1:p]), ' '.join(s[p+1:-1]), long(s[-1])
+
+ except ValueError:
+ # XXX throw a corruption error
+ raise
+ else:
+ return s[0], ' '.join(s[1:])
+
+ def _read(self):
+ self.clear()
+ for line in self._get_fd():
+ line = self._parse_old(line)
+ if line is None:
+ continue
+# if "\t" not in line:
+# line = self._parse_old(line)
+# else:
+# line = line.split("\t")
+
+ if line[0] == "dir":
+ obj = fs.fsDir(line[1], strict=False)
+ elif line[0] == "fif":
+ obj = fs.fsDir(line[1], strict=False)
+ elif line[0] == "dev":
+ obj = LookupFsDev(line[1], strict=False)
+ elif line[0] == "obj":
+ #file: path, md5, time
+ obj = fs.fsFile(
+ line[1], chksums={"md5":long(line[2], 16)},
+ mtime=long(line[3]),
+ strict=False)
+ elif line[0] == "sym":
+ #path, target, ' -> ', mtime
+ obj = fs.fsLink(
+ line[1], line[2], mtime=line[3], strict=False)
+ else:
+ if len(line) > 2:
+ line = line[0], ' '.join(line[1:])
+ raise Exception(
+ "unknown entry type %s: %s" % (line[0], line[1]))
+ self.add(obj)
+
+ def _write(self):
+ md5_handler = get_handler('md5')
+ outfile = None
+ try:
+ outfile = self._get_fd(True)
+
+ for obj in sorted(self):
+
+ if isinstance(obj, fs.fsFile):
+ s = " ".join(("obj", obj.location,
+ md5_handler.long2str(obj.chksums["md5"]),
+ str(long(obj.mtime))))
+
+ elif isinstance(obj, fs.fsLink):
+ s = " ".join(("sym", obj.location, "->",
+ obj.target, str(long(obj.mtime))))
+
+ elif isinstance(obj, fs.fsDir):
+ s = "dir " + obj.location
+
+ elif isinstance(obj, fs.fsDev):
+ s = "dev " + obj.location
+
+ elif isinstance(obj, fs.fsFifo):
+ s = "fif " + obj.location
+
+ else:
+ raise Exception(
+ "unknown type %s: %s" % (type(obj), obj))
+ outfile.write(s + "\n")
+ outfile.close()
+
+ finally:
+ # if atomic, it forces the update to be wiped.
+ del outfile
diff --git a/pkgcore/vdb/ondisk.py b/pkgcore/vdb/ondisk.py
new file mode 100644
index 0000000..d0964b7
--- /dev/null
+++ b/pkgcore/vdb/ondisk.py
@@ -0,0 +1,201 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+import os, stat, errno
+
+from pkgcore.repository import prototype, errors
+from pkgcore.vdb import virtuals
+from pkgcore.plugin import get_plugin
+from pkgcore.interfaces import data_source
+from pkgcore.repository import multiplex
+from pkgcore.config import ConfigHint
+#needed to grab the PN
+from pkgcore.ebuild.cpv import CPV as cpv
+
+from snakeoil.osutils import pjoin
+from snakeoil.mappings import IndeterminantDict
+from snakeoil.currying import partial
+from snakeoil.osutils import listdir_dirs, readfile
+from pkgcore.util import bzip2
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'pkgcore.vdb:repo_ops',
+ 'pkgcore.vdb.contents:ContentsFile',
+)
+
+
+class bz2_data_source(data_source.base):
+
+ def __init__(self, location, mutable=False):
+ data_source.base.__init__(self)
+ self.location = location
+ self.mutable = mutable
+
+ def get_fileobj(self):
+ data = bzip2.decompress(readfile(self.location))
+ if self.mutable:
+ return data_source.write_StringIO(self._set_data, data)
+ return data_source.read_StringIO(data)
+
+ def _set_data(self, data):
+ open(self.location, "wb").write(bzip2.compress(data))
+
+
+class tree(prototype.tree):
+ livefs = True
+ configured = False
+ configurables = ("domain", "settings")
+ configure = None
+ format_magic = "ebuild_built"
+
+ pkgcore_config_type = ConfigHint({'location': 'str',
+ 'cache_location': 'str', 'repo_id':'str',
+ 'disable_cache': 'bool'}, typename='repo')
+
+ def __init__(self, location, cache_location=None, repo_id='vdb',
+ disable_cache=False):
+ prototype.tree.__init__(self, frozen=False)
+ self.repo_id = repo_id
+ self.base = self.location = location
+ if disable_cache:
+ cache_location = None
+ elif cache_location is None:
+ cache_location = pjoin("/var/cache/edb/dep",
+ location.lstrip("/"))
+ self.cache_location = cache_location
+ self._versions_tmp_cache = {}
+ try:
+ st = os.stat(self.base)
+ if not stat.S_ISDIR(st.st_mode):
+ raise errors.InitializationError(
+ "base not a dir: %r" % self.base)
+ elif not st.st_mode & (os.X_OK|os.R_OK):
+ raise errors.InitializationError(
+ "base lacks read/executable: %r" % self.base)
+
+ except OSError:
+ raise errors.InitializationError(
+ "lstat failed on base %r" % self.base)
+
+ self.package_class = get_plugin('format.' + self.format_magic)(self)
+
+ def _get_categories(self, *optional_category):
+ # return if optional_category is passed... cause it's not yet supported
+ if optional_category:
+ return {}
+ try:
+ try:
+ return tuple(x for x in listdir_dirs(self.base) if not
+ x.startswith('.'))
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching categories: %s" % str(e))
+ finally:
+ pass
+
+ def _get_packages(self, category):
+ cpath = pjoin(self.base, category.lstrip(os.path.sep))
+ l = set()
+ d = {}
+ try:
+ for x in listdir_dirs(cpath):
+ if x.startswith(".tmp.") or x.endswith(".lockfile") \
+ or x.startswith("-MERGING-"):
+ continue
+ x = cpv(category+"/"+x)
+ l.add(x.package)
+ d.setdefault((category, x.package), []).append(x.fullver)
+ except (OSError, IOError), e:
+ raise KeyError("failed fetching packages for category %s: %s" % \
+ (pjoin(self.base, category.lstrip(os.path.sep)), str(e)))
+
+ self._versions_tmp_cache.update(d)
+ return tuple(l)
+
+ def _get_versions(self, catpkg):
+ return tuple(self._versions_tmp_cache.pop(catpkg))
+
+ def _get_ebuild_path(self, pkg):
+ s = "%s-%s" % (pkg.package, pkg.fullver)
+ return pjoin(self.base, pkg.category, s, s+".ebuild")
+
+ _metadata_rewrites = {
+ "depends":"DEPEND", "rdepends":"RDEPEND", "post_rdepends":"PDEPEND",
+ "use":"USE", "eapi":"EAPI", "CONTENTS":"contents", "provides":"PROVIDE"}
+
+ def _get_metadata(self, pkg):
+ return IndeterminantDict(partial(self._internal_load_key,
+ pjoin(self.base, pkg.category,
+ "%s-%s" % (pkg.package, pkg.fullver))))
+
+ def _internal_load_key(self, path, key):
+ key = self._metadata_rewrites.get(key, key)
+ if key == "contents":
+ data = ContentsFile(pjoin(path, "CONTENTS"), mutable=True)
+ elif key == "environment":
+ fp = pjoin(path, key)
+ if not os.path.exists(fp+".bz2"):
+ if not os.path.exists(fp):
+ # icky.
+ raise KeyError("environment: no environment file found")
+ data = data_source.local_source(fp)
+ else:
+ data = bz2_data_source(fp+".bz2")
+ elif key == "ebuild":
+ fp = pjoin(path,
+ os.path.basename(path.rstrip(os.path.sep))+".ebuild")
+ data = data_source.local_source(fp)
+ else:
+ data = readfile(pjoin(path, key), True)
+ if data is None:
+ raise KeyError(key)
+ return data
+
+ def notify_remove_package(self, pkg):
+ remove_it = len(self.packages[pkg.category]) == 1
+ prototype.tree.notify_remove_package(self, pkg)
+ if remove_it:
+ try:
+ os.rmdir(pjoin(self.base, pkg.category))
+ except OSError, oe:
+ if oe.errno != errno.ENOTEMPTY:
+ raise
+ # silently swallow it;
+ del oe
+
+ def __str__(self):
+ return '%s.%s: location %s' % (
+ self.__class__.__module__, self.__class__.__name__, self.base)
+
+
+class ConfiguredTree(multiplex.tree):
+
+ livefs = True
+
+ def __init__(self, raw_vdb, domain, domain_settings):
+ self.domain = domain
+ self.domain_settings = domain_settings
+ self.raw_vdb = raw_vdb
+ if raw_vdb.cache_location is not None:
+ self.old_style_virtuals = virtuals.caching_virtuals(raw_vdb,
+ raw_vdb.cache_location)
+ else:
+ self.old_style_virtuals = virtuals.non_caching_virtuals(raw_vdb)
+ multiplex.tree.__init__(self, raw_vdb, self.old_style_virtuals)
+ self.frozen = raw_vdb.frozen
+
+ def _install(self, pkg, *a, **kw):
+ # need to verify it's not in already...
+ kw['offset'] = self.domain.root
+ return repo_ops.install(self.domain_settings, self.raw_vdb, pkg, *a, **kw)
+
+ def _uninstall(self, pkg, *a, **kw):
+ kw['offset'] = self.domain.root
+ return repo_ops.uninstall(self.domain_settings, self.raw_vdb, pkg, *a, **kw)
+
+ def _replace(self, oldpkg, newpkg, *a, **kw):
+ kw['offset'] = self.domain.root
+ return repo_ops.replace(
+ self.domain_settings, self.raw_vdb, oldpkg, newpkg, *a, **kw)
+
+
+tree.configure = ConfiguredTree
diff --git a/pkgcore/vdb/repo_ops.py b/pkgcore/vdb/repo_ops.py
new file mode 100644
index 0000000..e61be54
--- /dev/null
+++ b/pkgcore/vdb/repo_ops.py
@@ -0,0 +1,157 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+import os, shutil
+
+from pkgcore.interfaces import repo as repo_interfaces
+#needed to grab the PN
+
+from pkgcore.const import VERSION
+
+from snakeoil.osutils import ensure_dirs, pjoin
+from pkgcore.util import bzip2
+from snakeoil.demandload import demandload
+demandload(globals(),
+ 'time',
+ 'pkgcore.ebuild:conditionals',
+ 'pkgcore.ebuild:triggers',
+ 'pkgcore.log:logger',
+ 'pkgcore.fs.ops:change_offset_rewriter',
+ 'pkgcore.vdb.contents:ContentsFile',
+)
+
+
+def _get_default_ebuild_op_args_kwds(self):
+ return (dict(self.domain_settings),), {}
+
+def _default_customize_engine(op_inst, engine):
+ triggers.customize_engine(op_inst.domain_settings, engine)
+
+class install(repo_interfaces.livefs_install):
+
+ def __init__(self, domain_settings, repo, pkg, *a, **kw):
+ self.dirpath = pjoin(
+ repo.base, pkg.category, pkg.package+"-"+pkg.fullver)
+ self.domain_settings = domain_settings
+ repo_interfaces.livefs_install.__init__(self, repo, pkg, *a, **kw)
+
+ install_get_format_op_args_kwds = _get_default_ebuild_op_args_kwds
+ customize_engine = _default_customize_engine
+
+ def merge_metadata(self, dirpath=None):
+ # error checking?
+ if dirpath is None:
+ dirpath = self.dirpath
+ ensure_dirs(dirpath, mode=0755, minimal=True)
+ rewrite = self.repo._metadata_rewrites
+ for k in self.new_pkg.tracked_attributes:
+ if k == "contents":
+ v = ContentsFile(pjoin(dirpath, "CONTENTS"),
+ mutable=True, create=True)
+ # strip the offset.
+ if self.offset:
+ v.update(change_offset_rewriter(self.offset, '/',
+ self.me.csets["install"]))
+ else:
+ v.update(self.me.csets["install"])
+ v.flush()
+ elif k == "environment":
+ data = bzip2.compress(
+ self.new_pkg.environment.get_fileobj().read())
+ open(pjoin(dirpath, "environment.bz2"), "w").write(data)
+ del data
+ else:
+ v = getattr(self.new_pkg, k)
+ if k == 'provides':
+ versionless_providers = lambda b:b.key
+ s = conditionals.stringify_boolean(v,
+ func=versionless_providers)
+ elif not isinstance(v, basestring):
+ try:
+ s = ' '.join(v)
+ except TypeError:
+ s = str(v)
+ else:
+ s = v
+ open(pjoin(
+ dirpath,
+ rewrite.get(k, k.upper())), "w", 32768).write(s)
+
+ # ebuild_data is the actual ebuild- no point in holding onto
+ # it for built ebuilds, but if it's there, we store it.
+ o = getattr(self.new_pkg, "ebuild", None)
+ if o is None:
+ logger.warn(
+ "doing install/replace op, "
+ "but source package doesn't provide the actual ebuild data. "
+ "Creating an empty file")
+ o = ''
+ else:
+ o = o.get_fileobj().read()
+ # XXX lil hackish accessing PF
+ open(pjoin(dirpath, self.new_pkg.PF + ".ebuild"), "w").write(o)
+
+ # XXX finally, hack to keep portage from doing stupid shit.
+ # relies on counter to discern what to punt during
+ # merging/removal, we don't need that crutch however. problem?
+ # No counter file, portage wipes all of our merges (friendly
+ # bugger).
+ # need to get zmedico to localize the counter
+ # creation/counting to per CP for this trick to behave
+ # perfectly.
+ open(pjoin(dirpath, "COUNTER"), "w").write(str(int(time.time())))
+
+ #finally, we mark who made this.
+ open(pjoin(dirpath, "PKGMANAGER"), "w").write(
+ "pkgcore-%s" % VERSION)
+ return True
+
+
+class uninstall(repo_interfaces.livefs_uninstall):
+
+ def __init__(self, domain_settings, repo, pkg, offset=None, *a, **kw):
+ self.dirpath = pjoin(
+ repo.base, pkg.category, pkg.package+"-"+pkg.fullver)
+ self.domain_settings = domain_settings
+ repo_interfaces.livefs_uninstall.__init__(
+ self, repo, pkg, offset=offset, *a, **kw)
+
+ uninstall_get_format_op_args_kwds = _get_default_ebuild_op_args_kwds
+ customize_engine = _default_customize_engine
+
+ def unmerge_metadata(self, dirpath=None):
+ if dirpath is None:
+ dirpath = self.dirpath
+ shutil.rmtree(self.dirpath)
+ return True
+
+
+# should convert these to mixins.
+class replace(install, uninstall, repo_interfaces.livefs_replace):
+
+ def __init__(self, domain_settings, repo, pkg, newpkg, *a, **kw):
+ self.dirpath = pjoin(
+ repo.base, pkg.category, pkg.package+"-"+pkg.fullver)
+ self.newpath = pjoin(
+ repo.base, newpkg.category, newpkg.package+"-"+newpkg.fullver)
+ self.tmpdirpath = pjoin(
+ os.path.dirname(self.dirpath),
+ ".tmp."+os.path.basename(self.dirpath))
+ self.domain_settings = domain_settings
+ repo_interfaces.livefs_replace.__init__(self, repo, pkg, newpkg, *a, **kw)
+
+ _get_format_op_args_kwds = _get_default_ebuild_op_args_kwds
+ customize_engine = _default_customize_engine
+
+ def merge_metadata(self, *a, **kw):
+ kw["dirpath"] = self.tmpdirpath
+ if os.path.exists(self.tmpdirpath):
+ shutil.rmtree(self.tmpdirpath)
+ return install.merge_metadata(self, *a, **kw)
+
+ def unmerge_metadata(self, *a, **kw):
+ ret = uninstall.unmerge_metadata(self, *a, **kw)
+ if not ret:
+ return ret
+ os.rename(self.tmpdirpath, self.newpath)
+ return True
diff --git a/pkgcore/vdb/virtuals.py b/pkgcore/vdb/virtuals.py
new file mode 100644
index 0000000..ea71dff
--- /dev/null
+++ b/pkgcore/vdb/virtuals.py
@@ -0,0 +1,182 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+import os, stat
+
+from pkgcore.restrictions import packages, values
+from pkgcore.ebuild.atom import atom
+from pkgcore.package.errors import InvalidDependency
+from pkgcore.os_data import portage_gid
+from pkgcore.repository import virtual
+
+from snakeoil.lists import iflatten_instance, unstable_unique
+from snakeoil.osutils import listdir, ensure_dirs, pjoin, readlines
+from snakeoil.currying import partial
+from snakeoil.fileutils import read_dict, AtomicWriteFile
+from snakeoil.demandload import demandload
+demandload(globals(), "errno")
+
+# generic functions.
+
+def _collect_virtuals(virtuals, iterable):
+ for pkg in iterable:
+ for virtualpkg in iflatten_instance(
+ pkg.provides.evaluate_depset(pkg.use)):
+ virtuals.setdefault(virtualpkg.package, {}).setdefault(
+ pkg.fullver, []).append(pkg.versioned_atom)
+
+def _finalize_virtuals(virtuals):
+ for pkg_dict in virtuals.itervalues():
+ for full_ver, rdep_atoms in pkg_dict.iteritems():
+ pkg_dict[full_ver] = tuple(rdep_atoms)
+
+def _collect_default_providers(virtuals):
+ return dict((virt,
+ frozenset(atom(x.key) for y in data.itervalues() for x in y))
+ for virt, data in virtuals.iteritems())
+
+# noncaching...
+
+def _grab_virtuals(repo):
+ virtuals = {}
+ _collect_virtuals(virtuals, repo)
+ defaults = _collect_default_providers(virtuals)
+ _finalize_virtuals(virtuals)
+ return defaults, virtuals
+
+def non_caching_virtuals(repo, livefs=True):
+ return OldStyleVirtuals(partial(_grab_virtuals, repo))
+
+
+#caching
+
+def _get_mtimes(loc):
+ d = {}
+ sdir = stat.S_ISDIR
+ for x in listdir(loc):
+ st = os.stat(pjoin(loc, x))
+ if sdir(st.st_mode):
+ d[x] = st.st_mtime
+ return d
+
+def _write_mtime_cache(mtimes, data, location):
+ old = os.umask(0113)
+ try:
+ if not ensure_dirs(os.path.dirname(location),
+ gid=portage_gid, mode=0775):
+ # bugger, can't update..
+ return
+ f = AtomicWriteFile(location, gid=portage_gid, perms=0664)
+ # invert the data...
+ rev_data = {}
+ for pkg, ver_dict in data.iteritems():
+ for fullver, virtuals in ver_dict.iteritems():
+ for virtual in virtuals:
+ rev_data.setdefault(virtual.category, []).extend(
+ (pkg, fullver, str(virtual)))
+ for cat, mtime in mtimes.iteritems():
+ if cat in rev_data:
+ f.write("%s\t%i\t%s\n" % (cat, mtime,
+ '\t'.join(rev_data[cat])))
+ else:
+ f.write("%s\t%i\n" % (cat, mtime))
+ f.close()
+ del f
+ finally:
+ os.umask(old)
+ os.chown(location, -1, portage_gid)
+
+def _read_mtime_cache(location):
+ try:
+ d = {}
+ for k, v in read_dict(readlines(location), splitter=None,
+ source_isiter=True).iteritems():
+ v = v.split()
+ # mtime pkg1 fullver1 virtual1 pkg2 fullver2 virtual2...
+ # if it's not the right length, skip this entry,
+ # cache validation will update it.
+ if (len(v) -1) % 3 == 0:
+ d[k] = v
+ return d
+ except IOError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ return {}
+
+def _convert_cached_virtuals(data):
+ iterable = iter(data)
+ # skip the mtime entry.
+ iterable.next()
+ d = {}
+ try:
+ for item in iterable:
+ d.setdefault(item, {}).setdefault(iterable.next(), []).append(
+ atom(iterable.next()))
+ except InvalidDependency:
+ return None
+ return d
+
+def _merge_virtuals(virtuals, new_virts):
+ for pkg, fullver_d in new_virts.iteritems():
+ for fullver, provides in fullver_d.iteritems():
+ virtuals.setdefault(pkg, {}).setdefault(
+ fullver, []).extend(provides)
+
+def _caching_grab_virtuals(repo, cache_basedir):
+ virtuals = {}
+ update = False
+ cache = _read_mtime_cache(pjoin(cache_basedir, 'virtuals.cache'))
+
+ existing = _get_mtimes(repo.location)
+ for cat, mtime in existing.iteritems():
+ d = cache.pop(cat, None)
+ if d is not None and long(d[0]) == mtime:
+ d = _convert_cached_virtuals(d)
+ if d is not None:
+ _merge_virtuals(virtuals, d)
+ continue
+
+ update = True
+ _collect_virtuals(virtuals, repo.itermatch(
+ packages.PackageRestriction("category",
+ values.StrExactMatch(cat))))
+
+ if update or cache:
+ _write_mtime_cache(existing, virtuals,
+ pjoin(cache_basedir, 'virtuals.cache'))
+
+ defaults = _collect_default_providers(virtuals)
+# _finalize_virtuals(virtuals)
+ return defaults, virtuals
+
+def caching_virtuals(repo, cache_basedir, livefs=True):
+ return OldStyleVirtuals(partial(_caching_grab_virtuals, repo, cache_basedir))
+
+
+class OldStyleVirtuals(virtual.tree):
+
+ def __init__(self, load_func):
+ virtual.tree.__init__(self, livefs=True)
+ self._load_func = load_func
+
+ def _load_data(self):
+ self.default_providers, self._virtuals = self._load_func()
+ self.packages._cache['virtual'] = tuple(self._virtuals.iterkeys())
+ self.versions._cache.update((('virtual', k), tuple(ver_dict))
+ for k, ver_dict in self._virtuals.iteritems())
+ self.versions._finalized = True
+ self.versions._known_keys.clear()
+ self._load_func = None
+
+ def _expand_vers(self, cp, ver):
+ return self._virtuals[cp[1]][ver]
+
+ def __getattr__(self, attr):
+ if attr not in ('default_providers', '_virtuals'):
+ return virtual.tree.__getattr__(self, attr)
+ if self._load_func is not None:
+ self._load_data()
+ return getattr(self, attr)
+
+ def _get_versions(self, cp):
+ return tuple(self._virtuals[cp[1]].iterkeys())
diff --git a/pkgcore/version.py b/pkgcore/version.py
new file mode 100644
index 0000000..0be7523
--- /dev/null
+++ b/pkgcore/version.py
@@ -0,0 +1,57 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""Version information (tied to bzr)."""
+
+
+from pkgcore import const
+
+
+_ver = None
+
+
+def get_version():
+ """@returns: a string describing the pkgcore version."""
+ global _ver
+ if _ver is not None:
+ return _ver
+
+ # This should get overwritten below, but let's be paranoid.
+ rev = 'unknown revision (internal error)'
+ version_info = None
+ try:
+ from pkgcore.bzr_verinfo import version_info
+ except ImportError:
+ try:
+ from bzrlib import branch, errors
+ except ImportError:
+ rev = 'unknown revision ' \
+ '(not from an sdist tarball, bzr unavailable)'
+ else:
+ try:
+ # Returns a (branch, relpath) tuple, ignore relpath.
+ b = branch.Branch.open_containing(__file__)[0]
+ except errors.NotBranchError:
+ rev = 'unknown revision ' \
+ '(not from an sdist tarball, not a bzr branch)'
+ else:
+ version_info = {
+ 'branch_nick': b.nick,
+ 'revno': b.revno(),
+ 'revision_id': b.last_revision(),
+ }
+ if b.supports_tags():
+ tagdict = b.tags.get_reverse_tag_dict()
+ version_info['tags'] = tagdict.get(b.last_revision())
+ if version_info is not None:
+ tags = version_info.get('tags')
+ if tags:
+ revname = ' '.join('tag:%s' % (tag,) for tag in tags)
+ else:
+ revname = '%(revno)s revid:%(revision_id)s' % version_info
+ rev = 'from bzr branch %s %s' % (version_info['branch_nick'], revname)
+
+ _ver = 'pkgcore %s\n%s' % (const.VERSION, rev)
+
+ return _ver
diff --git a/snakeoil/__init__.py b/snakeoil/__init__.py
new file mode 100644
index 0000000..9ff5a09
--- /dev/null
+++ b/snakeoil/__init__.py
@@ -0,0 +1,4 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""misc. utility functions"""
diff --git a/snakeoil/caching.py b/snakeoil/caching.py
new file mode 100644
index 0000000..daced2b
--- /dev/null
+++ b/snakeoil/caching.py
@@ -0,0 +1,86 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+instance caching metaclass
+"""
+
+from snakeoil.demandload import demandload
+demandload(
+ globals(),
+ 'warnings',
+ 'weakref:WeakValueDictionary',
+ )
+
+class native_WeakInstMeta(type):
+ """metaclass for instance caching, resulting in reuse of unique instances
+
+ few notes-
+ - instances must be immutable (or effectively so).
+ Since creating a new instance may return a preexisting instance,
+ this requirement B{must} be honored.
+ - due to the potential for mishap, each subclass of a caching class must
+ assign __inst_caching__ = True to enable caching for the derivative.
+ - conversely, __inst_caching__ = False does nothing
+ (although it's useful as a sign of
+ I{do not enable caching for this class}
+ - instance caching can be disabled per instantiation via passing
+ disabling_inst_caching=True into the class constructor.
+
+ Being a metaclass, the voodoo used doesn't require modification of
+ the class itself.
+
+ Examples of usage is the restrictions subsystem for
+ U{pkgcore project<http://pkgcore.org>}
+ """
+ def __new__(cls, name, bases, d):
+ if d.get("__inst_caching__", False):
+ d["__inst_caching__"] = True
+ d["__inst_dict__"] = WeakValueDictionary()
+ else:
+ d["__inst_caching__"] = False
+ slots = d.get('__slots__')
+ if slots is not None:
+ for base in bases:
+ if getattr(base, '__weakref__', False):
+ break
+ else:
+ d['__slots__'] = tuple(slots) + ('__weakref__',)
+ return type.__new__(cls, name, bases, d)
+
+ def __call__(cls, *a, **kw):
+ """disable caching via disable_inst_caching=True"""
+ if cls.__inst_caching__ and not kw.pop("disable_inst_caching", False):
+ kwlist = kw.items()
+ kwlist.sort()
+ key = (a, tuple(kwlist))
+ try:
+ instance = cls.__inst_dict__.get(key)
+ except (NotImplementedError, TypeError), t:
+ warnings.warn(
+ "caching keys for %s, got %s for a=%s, kw=%s" % (
+ cls, t, a, kw))
+ del t
+ key = instance = None
+
+ if instance is None:
+ instance = super(native_WeakInstMeta, cls).__call__(*a, **kw)
+
+ if key is not None:
+ cls.__inst_dict__[key] = instance
+ else:
+ instance = super(native_WeakInstMeta, cls).__call__(*a, **kw)
+
+ return instance
+
+# "Invalid name"
+# pylint: disable-msg=C0103
+
+try:
+ # No name in module
+ # pylint: disable-msg=E0611
+ from snakeoil._caching import WeakInstMeta
+ cpy_WeakInstMeta = WeakInstMeta
+except ImportError:
+ cpy_WeakInstMeta = None
+ WeakInstMeta = native_WeakInstMeta
diff --git a/snakeoil/compatibility.py b/snakeoil/compatibility.py
new file mode 100644
index 0000000..fbdbb87
--- /dev/null
+++ b/snakeoil/compatibility.py
@@ -0,0 +1,32 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+Compatibility module providing native reimplementations of python2.5 functionality.
+
+Uses the native implementation from C{__builtins__} if available.
+"""
+
+def native_any(iterable):
+ for x in iterable:
+ if x:
+ return True
+ return False
+
+def native_all(iterable):
+ for x in iterable:
+ if not x:
+ return False
+ return True
+
+# using variable before assignment
+# pylint: disable-msg=E0601
+
+if "any" in __builtins__:
+ any = any
+ all = all
+else:
+ try:
+ from snakeoil._compatibility import any, all
+ except ImportError:
+ any, all = native_any, native_all
diff --git a/snakeoil/containers.py b/snakeoil/containers.py
new file mode 100644
index 0000000..4ec3f2b
--- /dev/null
+++ b/snakeoil/containers.py
@@ -0,0 +1,207 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+collection of container classes
+"""
+
+from snakeoil.demandload import demandload
+demandload(
+ globals(),
+ 'itertools:chain,ifilterfalse',
+)
+
+class InvertedContains(set):
+
+ """Set that inverts all contains lookup results.
+
+ Mainly useful in conjuection with LimitedChangeSet for converting
+ from blacklist to whitelist.
+
+ Cannot be iterated over.
+ """
+
+ def __contains__(self, key):
+ return not set.__contains__(self, key)
+
+ def __iter__(self):
+ # infinite set, non iterable.
+ raise TypeError("InvertedContains cannot be iterated over")
+
+
+class SetMixin(object):
+ """
+ A mixin providing set methods.
+
+ Subclasses should provide __init__, __iter__ and __contains__.
+ """
+
+ def __and__(self, other, kls=None):
+ # Note: for these methods we don't bother to filter dupes from this
+ # list - since the subclasses __init__ should already handle this,
+ # there's no point doing it twice.
+ return (kls or self.__class__)(x for x in self if x in other)
+
+ def __rand__(self, other):
+ return self.__and__(other, kls=other.__class__)
+
+ def __or__(self, other, kls=None):
+ return (kls or self.__class__)(chain(self, other))
+
+ def __ror__(self, other):
+ return self.__or__(other, kls=other.__class__)
+
+ def __xor__(self, other, kls=None):
+ return (kls or self.__class__)(chain((x for x in self if x not in other),
+ (x for x in other if x not in self)))
+
+ def __rxor__(self, other):
+ return self.__xor__(other, kls=other.__class__)
+
+ def __sub__(self, other):
+ return self.__class__(x for x in self if x not in other)
+
+ def __rsub__(self, other):
+ return other.__class__(x for x in other if x not in self)
+
+ __add__ = __or__
+ __radd__ = __ror__
+
+
+class LimitedChangeSet(SetMixin):
+
+ """Set used to limit the number of times a key can be removed/added.
+
+ specifically deleting/adding a key only once per commit,
+ optionally blocking changes to certain keys.
+ """
+
+ _removed = 0
+ _added = 1
+
+ def __init__(self, initial_keys, unchangable_keys=None):
+ self._new = set(initial_keys)
+ if unchangable_keys is None:
+ self._blacklist = []
+ else:
+ if isinstance(unchangable_keys, (list, tuple)):
+ unchangable_keys = set(unchangable_keys)
+ self._blacklist = unchangable_keys
+ self._changed = set()
+ self._change_order = []
+ self._orig = frozenset(self._new)
+
+ def add(self, key):
+ if key in self._changed or key in self._blacklist:
+ # it's been del'd already once upon a time.
+ if key in self._new:
+ return
+ raise Unchangable(key)
+
+ self._new.add(key)
+ self._changed.add(key)
+ self._change_order.append((self._added, key))
+
+ def remove(self, key):
+ if key in self._changed or key in self._blacklist:
+ if key not in self._new:
+ raise KeyError(key)
+ raise Unchangable(key)
+
+ if key in self._new:
+ self._new.remove(key)
+ self._changed.add(key)
+ self._change_order.append((self._removed, key))
+
+ def __contains__(self, key):
+ return key in self._new
+
+ def changes_count(self):
+ return len(self._change_order)
+
+ def commit(self):
+ self._orig = frozenset(self._new)
+ self._changed.clear()
+ self._change_order = []
+
+ def rollback(self, point=0):
+ l = self.changes_count()
+ if point < 0 or point > l:
+ raise TypeError(
+ "%s point must be >=0 and <= changes_count()" % point)
+ while l > point:
+ change, key = self._change_order.pop(-1)
+ self._changed.remove(key)
+ if change == self._removed:
+ self._new.add(key)
+ else:
+ self._new.remove(key)
+ l -= 1
+
+ def __str__(self):
+ return str(self._new).replace("set(", "LimitedChangeSet(", 1)
+
+ def __iter__(self):
+ return iter(self._new)
+
+ def __len__(self):
+ return len(self._new)
+
+ def __eq__(self, other):
+ if isinstance(other, LimitedChangeSet):
+ return self._new == other._new
+ elif isinstance(other, (frozenset, set)):
+ return self._new == other
+ return False
+
+ def __ne__(self, other):
+ return not (self == other)
+
+
+class Unchangable(Exception):
+
+ def __init__(self, key):
+ Exception.__init__(self, "key '%s' is unchangable" % (key,))
+ self.key = key
+
+
+class ProtectedSet(SetMixin):
+
+ """
+ Wraps a set pushing all changes into a secondary set.
+ """
+ def __init__(self, orig_set):
+ self._orig = orig_set
+ self._new = set()
+
+ def __contains__(self, key):
+ return key in self._orig or key in self._new
+
+ def __iter__(self):
+ return chain(iter(self._new),
+ ifilterfalse(self._new.__contains__, self._orig))
+
+ def __len__(self):
+ return len(self._orig.union(self._new))
+
+ def add(self, key):
+ if key not in self._orig:
+ self._new.add(key)
+
+
+class RefCountingSet(dict):
+
+ def __init__(self, iterable=None):
+ if iterable is not None:
+ dict.__init__(self, ((x, 1) for x in iterable))
+
+ def add(self, item):
+ count = self.get(item, 0)
+ self[item] = count + 1
+
+ def remove(self, item):
+ count = self[item]
+ if count == 1:
+ del self[item]
+ else:
+ self[item] = count - 1
diff --git a/snakeoil/currying.py b/snakeoil/currying.py
new file mode 100644
index 0000000..33be84f
--- /dev/null
+++ b/snakeoil/currying.py
@@ -0,0 +1,129 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+Function currying, generating a functor with a set of args/defaults pre bound.
+
+L{pre_curry} and L{post_curry} return "normal" python functions.
+L{partial} returns a callable object. The difference between
+L{pre_curry} and L{partial} is this::
+
+ >>> def func(arg=None, self=None):
+ ... return arg, self
+ >>> curry = pre_curry(func, True)
+ >>> part = partial(func, True)
+ >>> class Test(object):
+ ... curry = pre_curry(func, True)
+ ... part = partial(func, True)
+ ... def __repr__(self):
+ ... return '<Test object>'
+ >>> curry()
+ (True, None)
+ >>> Test().curry()
+ (True, <Test object>)
+ >>> part()
+ (True, None)
+ >>> Test().part()
+ (True, None)
+
+If your curried function is not used as a class attribute the results
+should be identical. Because L{partial} has an implementation in c
+while L{pre_curry} is python you should use L{partial} if possible.
+"""
+
+from operator import attrgetter
+
+__all__ = ("pre_curry", "partial", "post_curry", "pretty_docs",
+ "alias_class_method")
+
+def pre_curry(func, *args, **kwargs):
+ """passed in args are prefixed, with further args appended"""
+
+ if not kwargs:
+ def callit(*moreargs, **morekwargs):
+ return func(*(args + moreargs), **morekwargs)
+ elif not args:
+ def callit(*moreargs, **morekwargs):
+ kw = kwargs.copy()
+ kw.update(morekwargs)
+ return func(*moreargs, **kw)
+ else:
+ def callit(*moreargs, **morekwargs):
+ kw = kwargs.copy()
+ kw.update(morekwargs)
+ return func(*(args+moreargs), **kw)
+
+ callit.func = func
+ return callit
+
+
+class native_partial(object):
+
+ """Like pre_curry, but does not get turned into an instance method."""
+
+ def __init__(self, func, *args, **kwargs):
+ self.func = func
+ self.args = args
+ self.kwargs = kwargs
+
+ def __call__(self, *moreargs, **morekwargs):
+ kw = self.kwargs.copy()
+ kw.update(morekwargs)
+ return self.func(*(self.args + moreargs), **kw)
+
+# Unused import, unable to import
+# pylint: disable-msg=W0611,F0401
+try:
+ from functools import partial
+except ImportError:
+ try:
+ from snakeoil._compatibility import partial
+ except ImportError:
+ partial = native_partial
+
+
+def post_curry(func, *args, **kwargs):
+ """passed in args are appended to any further args supplied"""
+
+ if not kwargs:
+ def callit(*moreargs, **morekwargs):
+ return func(*(moreargs+args), **morekwargs)
+ elif not args:
+ def callit(*moreargs, **morekwargs):
+ kw = morekwargs.copy()
+ kw.update(kwargs)
+ return func(*moreargs, **kw)
+ else:
+ def callit(*moreargs, **morekwargs):
+ kw = morekwargs.copy()
+ kw.update(kwargs)
+ return func(*(moreargs+args), **kw)
+
+ callit.func = func
+ return callit
+
+def pretty_docs(wrapped, extradocs=None):
+ wrapped.__module__ = wrapped.func.__module__
+ doc = wrapped.func.__doc__
+ if extradocs is None:
+ wrapped.__doc__ = doc
+ else:
+ wrapped.__doc__ = extradocs
+ return wrapped
+
+
+def alias_class_method(attr):
+ """at runtime, redirect to another method
+
+ attr is the desired attr name to lookup, and supply all later passed in
+ args/kws to
+
+ Useful for when setting has_key to __contains__ for example, and
+ __contains__ may be overriden.
+ """
+ grab_attr = attrgetter(attr)
+
+ def _asecond_level_call(self, *a, **kw):
+ return grab_attr(self)(*a, **kw)
+
+ return _asecond_level_call
diff --git a/snakeoil/debug_imports.py b/snakeoil/debug_imports.py
new file mode 100755
index 0000000..dc137ed
--- /dev/null
+++ b/snakeoil/debug_imports.py
@@ -0,0 +1,99 @@
+#!/usr/bin/python
+# Copyright: 2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+import __builtin__
+
+class intercept_import(object):
+
+ def __init__(self, callback):
+ self.callback = callback
+ self.stack = []
+ self.seen = set()
+
+ def __call__(self, *args):
+ if args[0] not in self.seen:
+ self.disable()
+ self.callback(self.stack, args)
+ self.enable()
+ self.stack.append(args[0])
+ self.seen.add(args[0])
+ try:
+ return self.orig_import(*args)
+ finally:
+ self.stack.pop()
+
+ def enable(self):
+ cur_import = __builtin__.__import__
+ if isinstance(cur_import, intercept_import):
+ raise RuntimeError("an intercept is already active")
+ self.orig_import = cur_import
+ __builtin__.__import__ = self
+
+ def disable(self):
+ if __builtin__.__import__ != self:
+ raise RuntimeError("either not active, or a different intercept "
+ "is in use")
+ __builtin__.__import__ = self.orig_import
+ del self.orig_import
+
+
+if __name__ == "__main__":
+ import __main__
+ orig = dict(__main__.__dict__.iteritems())
+ del orig["intercept_import"]
+ del orig["__builtin__"]
+ del orig["__main__"]
+
+ import sys, imp
+
+ usage = "debug_imports.py [-o output_file_path || -i] scriptfile [arg] ..."
+ if not sys.argv[1:]:
+ print "Usage: ", usage
+ sys.exit(2)
+
+ # yes, at first thought, this should use getopt or optparse.
+ # problem is, folks may want to spot that import, thus we can't.
+
+ import traceback, pdb
+
+ args = sys.argv[1:]
+ if args[0] == '-o':
+ if not len(args) > 2:
+ print "Usage: ", usage
+ sys.exit(2)
+ f = open(args[1], 'w')
+ def callback(modules, key, val):
+ f.write("adding %s\n" % key)
+ traceback.print_stack(file=f)
+ args = args[2:]
+ elif args[0] == '-i':
+ def callback(args):
+ pdb.set_trace()
+ args = args[1:]
+ else:
+ import time
+ def callback(stack, args):
+ if stack:
+ print "in: %s" % ', '.join(stack)
+ if len(args) == 4 and args[3] is not None:
+ print "from %s import %s" % (args[0], ', '.join(args[3]))
+ else:
+ print "import %s " % args[0]
+ print time.time()
+# traceback.print_stack(file=sys.stdout)
+ print
+
+
+ path = args[0]
+
+ sys.argv = args[:]
+ i = intercept_import(callback)
+ i.enable()
+ print "starting\n",time.time(),"\n"
+ try:
+ imp.load_module("__main__", open(args[0]), args[0], ("", "r", imp.PY_SOURCE))
+ finally:
+ i.disable()
+ print "\nfinished\n",time.time(),"\n"
+ sys.exit(0)
diff --git a/snakeoil/demandload.py b/snakeoil/demandload.py
new file mode 100644
index 0000000..a515b90
--- /dev/null
+++ b/snakeoil/demandload.py
@@ -0,0 +1,226 @@
+# Copyright: 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+# Copyright: 2007 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""Demand load things when used.
+
+This uses L{Placeholder} objects which create an actual object on
+first use and know how to replace themselves with that object, so
+there is no performance penalty after first use.
+
+This trick is *mostly* transparent, but there are a few things you
+have to be careful with:
+
+ - You may not bind a second name to a placeholder object. Specifically,
+ if you demandload C{bar} in module C{foo}, you may not
+ C{from foo import bar} in a third module. The placeholder object
+ does not "know" it gets imported, so this does not trigger the
+ demandload: C{bar} in the third module is the placeholder object.
+ When that placeholder gets used it replaces itself with the actual
+ module in C{foo} but not in the third module.
+ Because this is normally unwanted (it introduces a small
+ performance hit) the placeholder object will raise an exception if
+ it detects this. But if the demandload gets triggered before the
+ third module is imported you do not get that exception, so you
+ have to be careful not to import or otherwise pass around the
+ placeholder object without triggering it.
+ - Not all operations on the placeholder object trigger demandload.
+ The most common problem is that C{except ExceptionClass} does not
+ work if C{ExceptionClass} is a placeholder.
+ C{except module.ExceptionClass} with C{module} a placeholder does
+ work. You can normally avoid this by always demandloading the
+ module, not something in it.
+"""
+
+# TODO: the use of a curried func instead of subclassing needs more thought.
+
+# the replace_func used by Placeholder is currently passed in as an
+# external callable, with "partial" used to provide arguments to it.
+# This works, but has the disadvantage that calling
+# demand_compile_regexp needs to import re (to hand re.compile to
+# partial). One way to avoid that would be to add a wrapper function
+# that delays the import (well, triggers the demandload) at the time
+# the regexp is used, but that's a bit convoluted. A different way is
+# to make replace_func a method of Placeholder implemented through
+# subclassing instead of a callable passed to its __init__. The
+# current version does not do this because getting/setting attributes
+# of Placeholder is annoying because of the
+# __getattribute__/__setattr__ override.
+
+
+from snakeoil.modules import load_any
+from snakeoil.currying import partial
+
+# There are some demandloaded imports below the definition of demandload.
+
+_allowed_chars = "".join((x.isalnum() or x in "_.") and " " or "a"
+ for x in map(chr, xrange(256)))
+
+def parse_imports(imports):
+ """Parse a sequence of strings describing imports.
+
+ For every input string it returns a tuple of (import, targetname).
+ Examples::
+
+ 'foo' -> ('foo', 'foo')
+ 'foo:bar' -> ('foo.bar', 'bar')
+ 'foo:bar,baz@spork' -> ('foo.bar', 'bar'), ('foo.baz', 'spork')
+ 'foo@bar' -> ('foo', 'bar')
+
+ Notice 'foo.bar' is not a valid input. This simplifies the code,
+ but if it is desired it can be added back.
+
+ @type imports: sequence of C{str} objects.
+ @rtype: iterable of tuples of two C{str} objects.
+ """
+ for s in imports:
+ fromlist = s.split(':', 1)
+ if len(fromlist) == 1:
+ # Not a "from" import.
+ if '.' in s:
+ raise ValueError('dotted imports unsupported.')
+ split = s.split('@', 1)
+ for s in split:
+ if not s.translate(_allowed_chars).isspace():
+ raise ValueError("bad target: %s" % s)
+ if len(split) == 2:
+ yield tuple(split)
+ else:
+ yield split[0], split[0]
+ else:
+ # "from" import.
+ base, targets = fromlist
+ if not base.translate(_allowed_chars).isspace():
+ raise ValueError("bad target: %s" % base)
+ for target in targets.split(','):
+ split = target.split('@', 1)
+ for s in split:
+ if not s.translate(_allowed_chars).isspace():
+ raise ValueError("bad target: %s" % s)
+ yield base + '.' + split[0], split[-1]
+
+
+class Placeholder(object):
+
+ """Object that knows how to replace itself when first accessed.
+
+ See the module docstring for common problems with its use.
+ """
+
+ def __init__(self, scope, name, replace_func):
+ """Initialize.
+
+ @param scope: the scope we live in, normally the result of
+ C{globals()}.
+ @param name: the name we have in C{scope}.
+ @param replace_func: callable returning the object to replace us with.
+ """
+ object.__setattr__(self, '_scope', scope)
+ object.__setattr__(self, '_name', name)
+ object.__setattr__(self, '_replace_func', replace_func)
+
+ def _already_replaced(self):
+ name = object.__getattribute__(self, '_name')
+ raise ValueError('Placeholder for %r was triggered twice' % (name,))
+
+ def _replace(self):
+ """Replace ourself in C{scope} with the result of our C{replace_func}.
+
+ @returns: the result of calling C{replace_func}.
+ """
+ replace_func = object.__getattribute__(self, '_replace_func')
+ scope = object.__getattribute__(self, '_scope')
+ name = object.__getattribute__(self, '_name')
+ # Paranoia, explained in the module docstring.
+ already_replaced = object.__getattribute__(self, '_already_replaced')
+ object.__setattr__(self, '_replace_func', already_replaced)
+
+ # Cleanup, possibly unnecessary.
+ object.__setattr__(self, '_scope', None)
+
+ result = replace_func()
+ scope[name] = result
+ return result
+
+ # Various methods proxied to our replacement.
+
+ def __str__(self):
+ return self.__getattribute__('__str__')()
+
+ def __getattribute__(self, attr):
+ result = object.__getattribute__(self, '_replace')()
+ return getattr(result, attr)
+
+ def __setattr__(self, attr, value):
+ result = object.__getattribute__(self, '_replace')()
+ setattr(result, attr, value)
+
+ def __call__(self, *args, **kwargs):
+ result = object.__getattribute__(self, '_replace')()
+ return result(*args, **kwargs)
+
+
+def demandload(scope, *imports):
+ """Import modules into scope when each is first used.
+
+ scope should be the value of C{globals()} in the module calling
+ this function. (using C{locals()} may work but is not recommended
+ since mutating that is not safe).
+
+ Other args are strings listing module names.
+ names are handled like this::
+
+ foo import foo
+ foo@bar import foo as bar
+ foo:bar from foo import bar
+ foo:bar,quux from foo import bar, quux
+ foo.bar:quux from foo.bar import quux
+ foo:baz@quux from foo import baz as quux
+ """
+ for source, target in parse_imports(imports):
+ scope[target] = Placeholder(scope, target, partial(load_any, source))
+
+
+demandload(globals(), 're')
+
+# Extra name to make undoing monkeypatching demandload with
+# disabled_demandload easier.
+enabled_demandload = demandload
+
+
+def disabled_demandload(scope, *imports):
+ """Exactly like L{demandload} but does all imports immediately."""
+ for source, target in parse_imports(imports):
+ scope[target] = load_any(source)
+
+
+class RegexPlaceholder(Placeholder):
+ """
+ Compiled Regex object that knows how to replace itself when first accessed.
+
+ See the module docstring for common problems with its use; used by
+ L{demand_compile_regexp}.
+ """
+
+ def _replace(self):
+ args, kwargs = object.__getattribute__(self, '_replace_func')
+ object.__setattr__(self, '_replace_func',
+ partial(re.compile, *args, **kwargs))
+ return Placeholder._replace(self)
+
+
+
+def demand_compile_regexp(scope, name, *args, **kwargs):
+ """Demandloaded version of L{re.compile}.
+
+ Extra arguments are passed unchanged to L{re.compile}.
+
+ This returns the placeholder, which you *must* bind to C{name} in
+ the scope you pass as C{scope}. It is done this way to prevent
+ confusing code analysis tools like pylint.
+
+ @param scope: the scope, just like for L{demandload}.
+ @param name: the name of the compiled re object in that scope.
+ @returns: the placeholder object.
+ """
+ return RegexPlaceholder(scope, name, (args, kwargs))
diff --git a/snakeoil/dependant_methods.py b/snakeoil/dependant_methods.py
new file mode 100644
index 0000000..b7597b5
--- /dev/null
+++ b/snakeoil/dependant_methods.py
@@ -0,0 +1,86 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""Metaclass to inject dependencies into method calls.
+
+Essentially, method a must be run prior to method b, invoking method a
+if b is called first.
+"""
+
+from snakeoil.lists import iflatten_instance
+from snakeoil.currying import partial
+
+__all__ = ["ForcedDepends"]
+
+def ensure_deps(self, name, *a, **kw):
+ ignore_deps = "ignore_deps" in kw
+ if ignore_deps:
+ del kw["ignore_deps"]
+ s = [name]
+ else:
+ s = yield_deps(self, self.stage_depends, name)
+
+ r = True
+ for dep in s:
+ if dep not in self._stage_state:
+ r = getattr(self, dep).raw_func(*a, **kw)
+ if r:
+ self._stage_state.add(dep)
+ else:
+ return r
+ return r
+
+def yield_deps(inst, d, k):
+ # While at first glance this looks like should use expandable_chain,
+ # it shouldn't. --charlie
+ if k not in d:
+ yield k
+ return
+ s = [k, iflatten_instance(d.get(k, ()))]
+ while s:
+ if isinstance(s[-1], basestring):
+ yield s.pop(-1)
+ continue
+ exhausted = True
+ for x in s[-1]:
+ v = d.get(x)
+ if v:
+ s.append(x)
+ s.append(iflatten_instance(v))
+ exhausted = False
+ break
+ yield x
+ if exhausted:
+ s.pop(-1)
+
+
+class ForcedDepends(type):
+ """
+ Metaclass forcing methods to run in a certain order.
+
+ Dependencies are controlled by the existance of a stage_depends
+ dict in the class namespace. Its keys are method names, values are
+ either a string (name of preceeding method), or list/tuple
+ (proceeding methods).
+
+ U{pkgcore projects pkgcore.intefaces.format.build_base is an example consumer<http://pkgcore.org>}
+ to look at for usage.
+ """
+ def __call__(cls, *a, **kw):
+ o = super(ForcedDepends, cls).__call__(*a, **kw)
+ if not getattr(cls, "stage_depends"):
+ return o
+
+ if not hasattr(o, "_stage_state"):
+ o._stage_state = set()
+
+ # wrap the funcs
+
+ for x in set(x for x in iflatten_instance(o.stage_depends.iteritems())
+ if x):
+ f = getattr(o, x)
+ f2 = partial(ensure_deps, o, x)
+ f2.raw_func = f
+ setattr(o, x, f2)
+
+ return o
diff --git a/snakeoil/descriptors.py b/snakeoil/descriptors.py
new file mode 100644
index 0000000..bf31b9f
--- /dev/null
+++ b/snakeoil/descriptors.py
@@ -0,0 +1,28 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""Classes implementing the descriptor protocol."""
+
+
+class classproperty(object):
+
+ """Like the builtin C{property} but takes a single classmethod.
+
+ Used like this:
+
+ class Example(object):
+
+ @classproperty
+ def test(cls):
+ # Do stuff with cls here (it is Example or a subclass).
+
+ Now both C{Example.test} and C{Example().test} invoke the getter.
+ A "normal" property only works on instances.
+ """
+
+ def __init__(self, getter):
+ self.getter = getter
+
+ def __get__(self, instance, owner):
+ return self.getter(owner)
diff --git a/snakeoil/fileutils.py b/snakeoil/fileutils.py
new file mode 100644
index 0000000..9c9478e
--- /dev/null
+++ b/snakeoil/fileutils.py
@@ -0,0 +1,284 @@
+# Copyright: 2005-2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+file related operations, mainly reading
+"""
+
+import re, os
+from shlex import shlex
+from snakeoil.mappings import ProtectedDict
+from snakeoil.osutils import readlines
+
+class AtomicWriteFile(file):
+
+ """File class that stores the changes in a tempfile.
+
+ Upon close call, uses rename to replace the destination.
+
+ Similar to file protocol behaviour, except for the C{__init__}, and
+ that close *must* be called for the changes to be made live,
+
+ if C{__del__} is triggered it's assumed that an exception occured,
+ thus the changes shouldn't be made live.
+ """
+ def __init__(self, fp, binary=False, perms=None, uid=-1, gid=-1, **kwds):
+ self.is_finalized = False
+ if binary:
+ file_mode = "wb"
+ else:
+ file_mode = "w"
+ fp = os.path.realpath(fp)
+ self.original_fp = fp
+ self.temp_fp = os.path.join(
+ os.path.dirname(fp), ".update.%s" % os.path.basename(fp))
+ old_umask = None
+ if perms:
+ # give it just write perms
+ old_umask = os.umask(0200)
+ try:
+ file.__init__(self, self.temp_fp, mode=file_mode, **kwds)
+ finally:
+ if old_umask is not None:
+ os.umask(old_umask)
+ if perms:
+ os.chmod(self.temp_fp, perms)
+ if (gid, uid) != (-1, -1):
+ os.chown(self.temp_fp, uid, gid)
+
+ def close(self):
+ file.close(self)
+ os.rename(self.temp_fp, self.original_fp)
+ self.is_finalized = True
+
+ def __del__(self):
+ file.close(self)
+ if not self.is_finalized:
+ os.unlink(self.temp_fp)
+
+
+def iter_read_bash(bash_source):
+ """
+ Read file honoring bash commenting rules.
+
+ Note that it's considered good behaviour to close filehandles, as
+ such, either iterate fully through this, or use read_bash instead.
+ Once the file object is no longer referenced the handle will be
+ closed, but be proactive instead of relying on the garbage
+ collector.
+
+ @param bash_source: either a file to read from
+ or a string holding the filename to open.
+ """
+ if isinstance(bash_source, basestring):
+ bash_source = readlines(bash_source, True)
+ for s in bash_source:
+ s = s.strip()
+ if s and s[0] != "#":
+ yield s
+
+
+def read_bash(bash_source):
+ return list(iter_read_bash(bash_source))
+read_bash.__doc__ = iter_read_bash.__doc__
+
+
+def read_dict(bash_source, splitter="=", source_isiter=False):
+ """
+ read key value pairs, ignoring bash-style comments.
+
+ @param splitter: the string to split on. Can be None to
+ default to str.split's default
+ @param bash_source: either a file to read from,
+ or a string holding the filename to open.
+ """
+ d = {}
+ if not source_isiter:
+ filename = bash_source
+ i = iter_read_bash(bash_source)
+ else:
+ # XXX what to do?
+ filename = '<unknown>'
+ i = bash_source
+ line_count = 1
+ try:
+ for k in i:
+ line_count += 1
+ try:
+ k, v = k.split(splitter, 1)
+ except ValueError:
+ raise ParseError(filename, line_count)
+ if len(v) > 2 and v[0] == v[-1] and v[0] in ("'", '"'):
+ v = v[1:-1]
+ d[k] = v
+ finally:
+ del i
+ return d
+
+def read_bash_dict(bash_source, vars_dict=None, sourcing_command=None):
+ """
+ read bash source, yielding a dict of vars
+
+ @param bash_source: either a file to read from
+ or a string holding the filename to open
+ @param vars_dict: initial 'env' for the sourcing.
+ Is protected from modification.
+ @type vars_dict: dict or None
+ @param sourcing_command: controls whether a source command exists.
+ If one does and is encountered, then this func is called.
+ @type sourcing_command: callable
+ @raise ParseError: thrown if invalid syntax is encountered.
+ @return: dict representing the resultant env if bash executed the source.
+ """
+
+ # quite possibly I'm missing something here, but the original
+ # portage_util getconfig/varexpand seemed like it only went
+ # halfway. The shlex posix mode *should* cover everything.
+
+ if vars_dict is not None:
+ d, protected = ProtectedDict(vars_dict), True
+ else:
+ d, protected = {}, False
+ if isinstance(bash_source, basestring):
+ f = open(bash_source, "r")
+ else:
+ f = bash_source
+ s = bash_parser(f, sourcing_command=sourcing_command, env=d)
+
+ try:
+ tok = ""
+ try:
+ while tok is not None:
+ key = s.get_token()
+ if key is None:
+ break
+ elif key.isspace():
+ # we specifically have to check this, since we're
+ # screwing with the whitespace filters below to
+ # detect empty assigns
+ continue
+ eq = s.get_token()
+ if eq != '=':
+ raise ParseError(bash_source, s.lineno,
+ "got token %r, was expecting '='" % eq)
+ val = s.get_token()
+ if val is None:
+ val = ''
+ # look ahead to see if we just got an empty assign.
+ next_tok = s.get_token()
+ if next_tok == '=':
+ # ... we did.
+ # leftmost insertions, thus reversed ordering
+ s.push_token(next_tok)
+ s.push_token(val)
+ val = ''
+ else:
+ s.push_token(next_tok)
+ d[key] = val
+ except ValueError, e:
+ raise ParseError(bash_source, s.lineno, str(e))
+ finally:
+ del f
+ if protected:
+ d = d.new
+ return d
+
+
+var_find = re.compile(r'\\?(\${\w+}|\$\w+)')
+backslash_find = re.compile(r'\\.')
+def nuke_backslash(s):
+ s = s.group()
+ if s == "\\\n":
+ return "\n"
+ try:
+ return chr(ord(s))
+ except TypeError:
+ return s[1]
+
+class bash_parser(shlex):
+ def __init__(self, source, sourcing_command=None, env=None):
+ self.__dict__['state'] = ' '
+ shlex.__init__(self, source, posix=True)
+ self.wordchars += "@${}/.-+/:~^"
+ self.wordchars = frozenset(self.wordchars)
+ if sourcing_command is not None:
+ self.source = sourcing_command
+ if env is None:
+ env = {}
+ self.env = env
+ self.__pos = 0
+
+ def __setattr__(self, attr, val):
+ if attr == "state":
+ if (self.state, val) in (
+ ('"', 'a'), ('a', '"'), ('a', ' '), ("'", 'a')):
+ strl = len(self.token)
+ if self.__pos != strl:
+ self.changed_state.append(
+ (self.state, self.token[self.__pos:]))
+ self.__pos = strl
+ self.__dict__[attr] = val
+
+ def sourcehook(self, newfile):
+ try:
+ return shlex.sourcehook(self, newfile)
+ except IOError, ie:
+ raise ParseError(newfile, 0, str(ie))
+
+ def read_token(self):
+ self.changed_state = []
+ self.__pos = 0
+ tok = shlex.read_token(self)
+ if tok is None:
+ return tok
+ self.changed_state.append((self.state, self.token[self.__pos:]))
+ tok = ''
+ for s, t in self.changed_state:
+ if s in ('"', "a"):
+ tok += self.var_expand(t).replace("\\\n", '')
+ else:
+ tok += t
+ return tok
+
+ def var_expand(self, val):
+ prev, pos = 0, 0
+ l = []
+ match = var_find.search(val)
+ while match is not None:
+ pos = match.start()
+ if val[pos] == '\\':
+ # it's escaped. either it's \\$ or \\${ , either way,
+ # skipping two ahead handles it.
+ pos += 2
+ else:
+ var = val[match.start():match.end()].strip("${}")
+ if prev != pos:
+ l.append(val[prev:pos])
+ if var in self.env:
+ if not isinstance(self.env[var], basestring):
+ raise ValueError(
+ "env key %r must be a string, not %s: %r" % (
+ var, type(self.env[var]), self.env[var]))
+ l.append(self.env[var])
+ else:
+ l.append("")
+ prev = pos = match.end()
+ match = var_find.search(val, pos)
+
+ # do \\ cleansing, collapsing val down also.
+ val = backslash_find.sub(nuke_backslash, ''.join(l) + val[prev:])
+ return val
+
+
+class ParseError(Exception):
+
+ def __init__(self, filename, line, errmsg=None):
+ if errmsg is not None:
+ Exception.__init__(self,
+ "error parsing '%s' on or before %i: err %s" %
+ (filename, line, errmsg))
+ else:
+ Exception.__init__(self,
+ "error parsing '%s' on or before %i" %
+ (filename, line))
+ self.file, self.line, self.errmsg = filename, line, errmsg
diff --git a/snakeoil/fix_copy.py b/snakeoil/fix_copy.py
new file mode 100644
index 0000000..3a73d5b
--- /dev/null
+++ b/snakeoil/fix_copy.py
@@ -0,0 +1,74 @@
+# Copyright (C) 2005, 2006 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""A version of inspect that includes what 'copy' needs.
+
+Importing the python standard module 'copy' is far more expensive than it
+needs to be, because copy imports 'inspect' which imports 'tokenize'.
+And 'copy' only needs 2 small functions out of 'inspect', but has to
+load all of 'tokenize', which makes it horribly slow.
+
+This module is designed to use tricky hacks in import rules, to avoid this
+overhead.
+"""
+
+
+####
+# These are the only 2 functions that 'copy' needs from 'inspect'
+# As you can see, they are quite trivial, and don't justify the
+# 40ms spent to import 'inspect' because it is importing 'tokenize'
+# These are copied verbatim from the python standard library.
+
+# ----------------------------------------------------------- class helpers
+def _searchbases(cls, accum):
+ # Simulate the "classic class" search order.
+ if cls in accum:
+ return
+ accum.append(cls)
+ for base in cls.__bases__:
+ _searchbases(base, accum)
+
+
+def getmro(cls):
+ "Return tuple of base classes (including cls) in method resolution order."
+ if hasattr(cls, "__mro__"):
+ return cls.__mro__
+ else:
+ result = []
+ _searchbases(cls, result)
+ return tuple(result)
+
+
+def inject_copy():
+ """Import the 'copy' module with a hacked 'inspect' module"""
+ # We don't actually care about 'getmro' but we need to pass
+ # something in the list so that we get the direct module,
+ # rather than getting the base module
+ import sys
+
+ # Don't hack around if 'inspect' already exists
+ if 'inspect' in sys.modules:
+ import copy
+ return
+
+ mod = __import__('snakeoil.fix_copy',
+ globals(), locals(), ['getmro'])
+
+ sys.modules['inspect'] = mod
+ try:
+ import copy
+ finally:
+ del sys.modules['inspect']
diff --git a/snakeoil/formatters.py b/snakeoil/formatters.py
new file mode 100644
index 0000000..b327bb4
--- /dev/null
+++ b/snakeoil/formatters.py
@@ -0,0 +1,495 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""Classes wrapping a file-like object to do fancy output on it."""
+
+import os
+import errno
+
+from snakeoil.klass import GetAttrProxy
+from snakeoil.demandload import demandload
+demandload(globals(), 'locale')
+
+
+class native_StreamClosed(KeyboardInterrupt):
+ """Raised by L{Formatter.write} if the stream it prints to was closed.
+
+ This inherits from C{KeyboardInterrupt} because it should usually
+ be handled the same way: a common way of triggering this exception
+ is by closing a pager before the script finished outputting, which
+ should be handled like control+c, not like an error.
+ """
+
+
+# "Invalid name" (for fg and bg methods, too short)
+# pylint: disable-msg=C0103
+
+
+class Formatter(object):
+
+ """Abstract formatter base class.
+
+ The types of most of the instance attributes is undefined (depends
+ on the implementation of the particular Formatter subclass).
+
+ @ivar bold: object to pass to L{write} to switch to bold mode.
+ @ivar underline: object to pass to L{write} to switch to underlined mode.
+ @ivar reset: object to pass to L{write} to turn off bold and underline.
+ @ivar wrap: boolean indicating we auto-linewrap (defaults to off).
+ @ivar autoline: boolean indicating we are in auto-newline mode
+ (defaults to on).
+ """
+
+ def __init__(self):
+ self.autoline = True
+ self.wrap = False
+
+ def write(self, *args, **kwargs):
+ """Write something to the stream.
+
+ Acceptable arguments are:
+ - Strings are simply written to the stream.
+ - C{None} is ignored.
+ - Functions are called with the formatter as argument.
+ Their return value is then used the same way as the other
+ arguments.
+ - Formatter subclasses might special-case certain objects.
+
+ Accepts wrap and autoline as keyword arguments. Effect is
+ the same as setting them before the write call and resetting
+ them afterwards.
+
+ Accepts first_prefixes and later_prefixes as keyword
+ arguments. They should be sequences that are temporarily
+ appended to the first_prefix and later_prefix attributes.
+
+ Accepts prefixes as a keyword argument. Effect is the same as
+ setting first_prefixes and later_prefixes to the same value.
+
+ Accepts first_prefix, later_prefix and prefix as keyword
+ argument. Effect is the same as setting first_prefixes,
+ later_prefixes or prefixes to a one-element tuple.
+
+ The formatter has a couple of attributes that are useful as argument
+ to write.
+ """
+
+ def fg(self, color=None):
+ """Change foreground color.
+
+ @type color: a string or C{None}.
+ @param color: color to change to. A default is used if omitted.
+ C{None} resets to the default color.
+ """
+
+ def bg(self, color=None):
+ """Change background color.
+
+ @type color: a string or C{None}.
+ @param color: color to change to. A default is used if omitted.
+ C{None} resets to the default color.
+ """
+
+ def error(self, message):
+ """Format a string as an error message."""
+ self.write(message, prefixes=(
+ self.fg('red'), self.bold, '!!! ', self.reset))
+
+ def warn(self, message):
+ """Format a string as a warning message."""
+ self.write(message, prefixes=(
+ self.fg('yellow'), self.bold, '*** ', self.reset))
+
+ def title(self, string):
+ """Set the title to string"""
+ pass
+
+
+class native_PlainTextFormatter(Formatter):
+
+ """Formatter writing plain text to a file-like object.
+
+ @ivar width: contains the current maximum line length.
+ @ivar encoding: the encoding unicode strings should be converted to.
+ @ivar first_prefix: prefixes to output at the beginning of every write.
+ @ivar later_prefix: prefixes to output on each line after the first of
+ every write.
+ """
+
+ bold = underline = reset = ''
+
+ def __init__(self, stream, width=79, encoding=None):
+ """Initialize.
+
+ @type stream: file-like object.
+ @param stream: stream to output to.
+ @param width: maximum line width (defaults to 79).
+ @param encoding: encoding unicode strings are converted to.
+ """
+ Formatter.__init__(self)
+ self.stream = stream
+ if encoding is None:
+ encoding = getattr(self.stream, 'encoding', None)
+ if encoding is None:
+ try:
+ encoding = locale.getpreferredencoding()
+ except locale.Error:
+ encoding = 'ascii'
+ self.encoding = encoding
+ self.width = width
+ self._pos = 0
+ self._in_first_line = True
+ self._wrote_something = False
+ self.first_prefix = []
+ self.later_prefix = []
+
+
+ def _write_prefix(self, wrap):
+ if self._in_first_line:
+ prefix = self.first_prefix
+ else:
+ prefix = self.later_prefix
+ # This is a bit braindead since it duplicates a lot of code
+ # from write. Avoids fun things like word wrapped prefix though.
+
+ for thing in prefix:
+ while callable(thing):
+ thing = thing(self)
+ if thing is None:
+ continue
+ if not isinstance(thing, basestring):
+ thing = str(thing)
+ self._pos += len(thing)
+ if isinstance(thing, unicode):
+ thing = thing.encode(self.encoding, 'replace')
+ self.stream.write(thing)
+ if wrap and self._pos >= self.width:
+ # XXX What to do? Our prefix does not fit.
+ # This makes sure we still output something,
+ # but it is completely arbitrary.
+ self._pos = self.width - 10
+
+
+ def write(self, *args, **kwargs):
+ wrap = kwargs.get('wrap', self.wrap)
+ autoline = kwargs.get('autoline', self.autoline)
+ prefixes = kwargs.get('prefixes')
+ first_prefixes = kwargs.get('first_prefixes')
+ later_prefixes = kwargs.get('later_prefixes')
+ if prefixes is not None:
+ if first_prefixes is not None or later_prefixes is not None:
+ raise TypeError(
+ 'do not pass first_prefixes or later_prefixes '
+ 'if prefixes is passed')
+ first_prefixes = later_prefixes = prefixes
+ prefix = kwargs.get('prefix')
+ first_prefix = kwargs.get('first_prefix')
+ later_prefix = kwargs.get('later_prefix')
+ if prefix is not None:
+ if first_prefix is not None or later_prefix is not None:
+ raise TypeError(
+ 'do not pass first_prefix or later_prefix with prefix')
+ first_prefix = later_prefix = prefix
+ if first_prefix is not None:
+ if first_prefixes is not None:
+ raise TypeError(
+ 'do not pass both first_prefix and first_prefixes')
+ first_prefixes = (first_prefix,)
+ if later_prefix is not None:
+ if later_prefixes is not None:
+ raise TypeError(
+ 'do not pass both later_prefix and later_prefixes')
+ later_prefixes = (later_prefix,)
+ if first_prefixes is not None:
+ self.first_prefix.extend(first_prefixes)
+ if later_prefixes is not None:
+ self.later_prefix.extend(later_prefixes)
+ # Remove this nested try block once we depend on python 2.5
+ try:
+ try:
+ for arg in args:
+ # If we're at the start of the line, write our prefix.
+ # There is a deficiency here: if neither our arg nor our
+ # prefix affect _pos (both are escape sequences or empty)
+ # we will write prefix more than once. This should not
+ # matter.
+ if not self._pos:
+ self._write_prefix(wrap)
+ while callable(arg):
+ arg = arg(self)
+ if arg is None:
+ continue
+ if not isinstance(arg, basestring):
+ arg = str(arg)
+ is_unicode = isinstance(arg, unicode)
+ while wrap and self._pos + len(arg) > self.width:
+ # We have to split.
+ maxlen = self.width - self._pos
+ space = arg.rfind(' ', 0, maxlen)
+ if space == -1:
+ # No space to split on.
+
+ # If we are on the first line we can simply go to
+ # the next (this helps if the "later" prefix is
+ # shorter and should not really matter if not).
+
+ # If we are on the second line and have already
+ # written something we can also go to the next
+ # line.
+ if self._in_first_line or self._wrote_something:
+ bit = ''
+ else:
+ # Forcibly split this as far to the right as
+ # possible.
+ bit = arg[:maxlen]
+ arg = arg[maxlen:]
+ else:
+ bit = arg[:space]
+ # Omit the space we split on.
+ arg = arg[space+1:]
+ if is_unicode:
+ bit = bit.encode(self.encoding, 'replace')
+ self.stream.write(bit)
+ self.stream.write('\n')
+ self._pos = 0
+ self._in_first_line = False
+ self._wrote_something = False
+ self._write_prefix(wrap)
+
+ # This fits.
+ self._wrote_something = True
+ self._pos += len(arg)
+ if is_unicode:
+ arg = arg.encode(self.encoding, 'replace')
+ self.stream.write(arg)
+ if autoline:
+ self.stream.write('\n')
+ self._wrote_something = False
+ self._pos = 0
+ self._in_first_line = True
+ except IOError, e:
+ if e.errno == errno.EPIPE:
+ raise StreamClosed(e)
+ raise
+ finally:
+ if first_prefixes is not None:
+ self.first_prefix = self.first_prefix[:-len(first_prefixes)]
+ if later_prefixes is not None:
+ self.later_prefix = self.later_prefix[:-len(later_prefixes)]
+
+ def fg(self, color=None):
+ return ''
+
+ def bg(self, color=None):
+ return ''
+
+try:
+ from snakeoil._formatters import PlainTextFormatter, StreamClosed
+ class PlainTextFormatter(PlainTextFormatter, Formatter):
+ __doc__ = native_PlainTextFormatter.__doc__
+ __slots__ = ()
+ def fg(self, color=None):
+ return ''
+ bg = fg
+
+except ImportError:
+ PlainTextFormatter = native_PlainTextFormatter
+ StreamClosed = native_StreamClosed
+
+# This is necessary because the curses module is optional (and we
+# should run on a very minimal python for bootstrapping).
+try:
+ import curses
+except ImportError:
+ TerminfoColor = None
+else:
+ class TerminfoColor(object):
+
+ def __init__(self, mode, color):
+ self.mode = mode
+ self.color = color
+
+ def __call__(self, formatter):
+ if self.color is None:
+ formatter._current_colors[self.mode] = None
+ res = formatter._color_reset
+ # slight abuse of boolean True/False and 1/0 equivalence
+ other = formatter._current_colors[not self.mode]
+ if other is not None:
+ res = res + other
+ else:
+ if self.mode == 0:
+ default = curses.COLOR_WHITE
+ else:
+ default = curses.COLOR_BLACK
+ color = formatter._colors.get(self.color, default)
+ # The curses module currently segfaults if handed a
+ # bogus template so check explicitly.
+ template = formatter._set_color[self.mode]
+ if template:
+ res = curses.tparm(template, color)
+ else:
+ res = ''
+ formatter._current_colors[self.mode] = res
+ formatter.stream.write(res)
+
+
+ class TerminfoCode(object):
+ def __init__(self, value):
+ self.value = value
+
+ class TerminfoMode(TerminfoCode):
+ def __call__(self, formatter):
+ formatter._modes.add(self)
+ formatter.stream.write(self.value)
+
+ class TerminfoReset(TerminfoCode):
+ def __call__(self, formatter):
+ formatter._modes.clear()
+ formatter.stream.write(self.value)
+
+
+ class TerminfoFormatter(PlainTextFormatter):
+
+ """Formatter writing to a tty, using terminfo to do colors."""
+
+ _colors = dict(
+ black = curses.COLOR_BLACK,
+ red = curses.COLOR_RED,
+ green = curses.COLOR_GREEN,
+ yellow = curses.COLOR_YELLOW,
+ blue = curses.COLOR_BLUE,
+ magenta = curses.COLOR_MAGENTA,
+ cyan = curses.COLOR_CYAN,
+ white = curses.COLOR_WHITE)
+
+ # Remapping of TERM setting to more capable equivalent.
+ # Mainly used to force on the hardstatus (aka title bar updates)
+ # capability for terminals that do not support this by default.
+ term_alternates = {
+ 'xterm': 'xterm+sl',
+ 'screen': 'screen-s',
+ }
+
+ def __init__(self, stream, term=None, forcetty=False, encoding=None):
+ """Initialize.
+
+ @type stream: file-like object.
+ @param stream: stream to output to, defaulting to C{sys.stdout}.
+ @type term: string.
+ @param term: terminal type, pulled from the environment if omitted.
+ @type forcetty: bool
+ @param forcetty: force output of colors even if the wrapped stream
+ is not a tty.
+ """
+ PlainTextFormatter.__init__(self, stream, encoding=encoding)
+ fd = stream.fileno()
+ if term is None:
+ # We only apply the remapping if we are guessing the
+ # terminal type from the environment. If we get a term
+ # type passed explicitly we just use it as-is (if the
+ # caller wants the remap just doing the
+ # term_alternates lookup there is easy enough.)
+ term_env = os.environ.get('TERM')
+ term_alt = self.term_alternates.get(term_env)
+ for term in (term_alt, term_env, 'dumb'):
+ if term is not None:
+ try:
+ curses.setupterm(fd=fd, term=term)
+ except curses.error:
+ pass
+ else:
+ break
+ else:
+ raise ValueError(
+ 'no terminfo entries, not even for "dumb"?')
+ else:
+ # TODO maybe do something more useful than raising curses.error
+ # if term is not in the terminfo db here?
+ curses.setupterm(fd=fd, term=term)
+ self.width = curses.tigetnum('cols')
+ self.reset = TerminfoReset(curses.tigetstr('sgr0'))
+ self.bold = TerminfoMode(curses.tigetstr('bold'))
+ self.underline = TerminfoMode(curses.tigetstr('smul'))
+ self._color_reset = curses.tigetstr('op')
+ self._set_color = (
+ curses.tigetstr('setaf'), curses.tigetstr('setab'))
+ # [fg, bg]
+ self._current_colors = [None, None]
+ self._modes = set()
+ self._pos = 0
+
+ def fg(self, color=None):
+ return TerminfoColor(0, color)
+
+ def bg(self, color=None):
+ return TerminfoColor(1, color)
+
+ def write(self, *args, **kwargs):
+ PlainTextFormatter.write(self, *args, **kwargs)
+ try:
+ if self._modes:
+ self.reset(self)
+ if self._current_colors != [None, None]:
+ self._current_colors = [None, None]
+ self.stream.write(self._color_reset)
+ except IOError, e:
+ if e.errno == errno.EPIPE:
+ raise StreamClosed(e)
+ raise
+
+ def title(self, string):
+ # I want to use curses.tigetflag('hs') here but at least
+ # the screen-s entry defines a tsl and fsl string but does
+ # not set the hs flag. So just check for the ability to
+ # jump to and out of the status line, without checking if
+ # the status line we're using exists.
+ if curses.tigetstr('tsl') and curses.tigetstr('fsl'):
+ self.stream.write(
+ curses.tigetstr('tsl') + string + curses.tigetstr('fsl'))
+ self.stream.flush()
+
+
+class ObserverFormatter(object):
+
+ def __init__(self, real_formatter):
+ self._formatter = real_formatter
+
+ def write(self, *args):
+ self._formatter.write(autoline=False, *args)
+
+ __getattr__ = GetAttrProxy("_formatter")
+
+
+def get_formatter(stream):
+ """TerminfoFormatter if the stream is a tty, else PlainTextFormatter."""
+ if TerminfoColor is None:
+ return PlainTextFormatter(stream)
+ try:
+ fd = stream.fileno()
+ except AttributeError:
+ pass
+ else:
+ # We do this instead of stream.isatty() because TerminfoFormatter
+ # needs an fd to pass to curses, not just a filelike talking to a tty.
+ if os.isatty(fd):
+ try:
+ return TerminfoFormatter(stream)
+ except curses.error:
+ # This happens if TERM is unset and possibly in more cases.
+ # Just fall back to the PlainTextFormatter.
+ pass
+ return PlainTextFormatter(stream)
+
+
+def decorate_forced_wrapping(setting=True):
+ def wrapped_func(func):
+ def f(out, *args, **kwds):
+ oldwrap = out.wrap
+ out.wrap = setting
+ try:
+ return func(out, *args, **kwds)
+ finally:
+ out.wrap = oldwrap
+ return f
+ return wrapped_func
diff --git a/snakeoil/iterables.py b/snakeoil/iterables.py
new file mode 100644
index 0000000..e6f05bf
--- /dev/null
+++ b/snakeoil/iterables.py
@@ -0,0 +1,202 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from collections import deque
+
+class expandable_chain(object):
+ """
+ chained iterables, with the ability to add new iterables to the chain
+ as long as the instance hasn't raised StopIteration already.
+ """
+
+ __slot__ = ("iterables", "__weakref__")
+
+ def __init__(self, *iterables):
+ """
+ accepts N iterables, must have at least one specified
+ """
+ self.iterables = deque()
+ self.extend(iterables)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.iterables is not None:
+ while self.iterables:
+ try:
+ return self.iterables[0].next()
+ except StopIteration:
+ self.iterables.popleft()
+ self.iterables = None
+ raise StopIteration()
+
+ def append(self, iterable):
+ """append an iterable to the chain to be consumed"""
+ if self.iterables is None:
+ raise StopIteration()
+ self.iterables.append(iter(iterable))
+
+ def appendleft(self, iterable):
+ """prepend an iterable to the chain to be consumed"""
+ if self.iterables is None:
+ raise StopIteration()
+ self.iterables.appendleft(iter(iterable))
+
+ def extend(self, iterables):
+ """extend multiple iterables to the chain to be consumed"""
+ if self.iterables is None:
+ raise StopIteration()
+ self.iterables.extend(iter(x) for x in iterables)
+
+ def extendleft(self, iterables):
+ """prepend multiple iterables to the chain to be consumed"""
+ if self.iterables is None:
+ raise StopIteration()
+ self.iterables.extendleft(iter(x) for x in iterables)
+
+
+class caching_iter(object):
+ """
+ On demand consumes from an iterable so as to appear like a tuple
+ """
+ __slots__ = ("iterable", "__weakref__", "cached_list", "sorter")
+
+ def __init__(self, iterable, sorter=None):
+ self.sorter = sorter
+ self.iterable = iter(iterable)
+ self.cached_list = []
+
+ def __setitem__(self, key, val):
+ raise TypeError("non modifiable")
+
+ def __getitem__(self, index):
+ existing_len = len(self.cached_list)
+ if self.iterable is not None and self.sorter:
+ self.cached_list.extend(self.iterable)
+ self.cached_list = tuple(self.sorter(self.cached_list))
+ self.iterable = self.sorter = None
+ existing_len = len(self.cached_list)
+
+ if index < 0:
+ if self.iterable is not None:
+ self.cached_list = tuple(self.cached_list + list(self.iterable))
+ self.iterable = None
+ existing_len = len(self.cached_list)
+
+ index = existing_len + index
+ if index < 0:
+ raise IndexError("list index out of range")
+
+ elif index >= existing_len - 1:
+ if self.iterable is not None:
+ try:
+ self.cached_list.extend(self.iterable.next()
+ for i in xrange(existing_len - index + 1))
+ except StopIteration:
+ # consumed, baby.
+ self.iterable = None
+ self.cached_list = tuple(self.cached_list)
+ raise IndexError("list index out of range")
+
+ return self.cached_list[index]
+
+ def __cmp__(self, other):
+ if self.iterable is not None:
+ if self.sorter:
+ self.cached_list.extend(self.iterable)
+ self.cached_list = tuple(self.sorter(self.cached_list))
+ self.sorter = None
+ else:
+ self.cached_list = tuple(self.cached_list + list(self.iterable))
+ self.iterable = None
+ return cmp(self.cached_list, other)
+
+ def __nonzero__(self):
+ if self.cached_list:
+ return True
+
+ if self.iterable:
+ for x in self.iterable:
+ self.cached_list.append(x)
+ return True
+ # if we've made it here... then nothing more in the iterable.
+ self.iterable = self.sorter = None
+ self.cached_list = ()
+ return False
+
+ def __len__(self):
+ if self.iterable is not None:
+ self.cached_list.extend(self.iterable)
+ if self.sorter:
+ self.cached_list = tuple(self.sorter(self.cached_list))
+ self.sorter = None
+ else:
+ self.cached_list = tuple(self.cached_list)
+ self.iterable = None
+ return len(self.cached_list)
+
+ def __iter__(self):
+ if (self.sorter is not None and
+ self.iterable is not None and
+ len(self.cached_list) == 0):
+ self.cached_list = tuple(self.sorter(self.iterable))
+ self.iterable = self.sorter = None
+
+ for x in self.cached_list:
+ yield x
+ if self.iterable is not None:
+ for x in self.iterable:
+ self.cached_list.append(x)
+ yield x
+ else:
+ return
+ self.iterable = None
+ self.cached_list = tuple(self.cached_list)
+
+ def __hash__(self):
+ if self.iterable is not None:
+ self.cached_list.extend(self.iterable)
+ self.cached_list = tuple(self.cached_list)
+ self.iterable = None
+ return hash(self.cached_list)
+
+ def __str__(self):
+ return "iterable(%s), cached: %s" % (
+ self.iterable, str(self.cached_list))
+
+def iter_sort(sorter, *iterables):
+ """Merge a number of sorted iterables into a single sorted iterable.
+
+ @type sorter: callable.
+ @param sorter: function, passed a list of [element, iterable].
+ @param iterables: iterables to consume from.
+ B{Required} to yield in presorted order.
+ """
+ l = []
+ for x in iterables:
+ try:
+ x = iter(x)
+ l.append([x.next(), x])
+ except StopIteration:
+ pass
+ if len(l) == 1:
+ yield l[0][0]
+ for x in l[0][1]:
+ yield x
+ return
+ l = sorter(l)
+ while l:
+ yield l[0][0]
+ for y in l[0][1]:
+ l[0][0] = y
+ break
+ else:
+ del l[0]
+ if len(l) == 1:
+ yield l[0][0]
+ for x in l[0][1]:
+ yield x
+ break
+ continue
+ l = sorter(l)
diff --git a/snakeoil/klass.py b/snakeoil/klass.py
new file mode 100644
index 0000000..6763c1f
--- /dev/null
+++ b/snakeoil/klass.py
@@ -0,0 +1,95 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from operator import attrgetter
+from snakeoil.caching import WeakInstMeta
+from collections import deque
+
+def native_GetAttrProxy(target):
+ def reflected_getattr(self, attr):
+ return getattr(getattr(self, target), attr)
+ return reflected_getattr
+
+def native_contains(self, key):
+ try:
+ self[key]
+ return True
+ except KeyError:
+ return False
+
+def native_get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+
+attrlist_getter = attrgetter("__attr_comparison__")
+def native_generic_eq(inst1, inst2, sentinel=object()):
+ if inst1 is inst2:
+ return True
+ for attr in attrlist_getter(inst1):
+ if getattr(inst1, attr, sentinel) != \
+ getattr(inst2, attr, sentinel):
+ return False
+ return True
+
+def native_generic_ne(inst1, inst2, sentinel=object()):
+ if inst1 is inst2:
+ return False
+ for attr in attrlist_getter(inst1):
+ if getattr(inst1, attr, sentinel) != \
+ getattr(inst2, attr, sentinel):
+ return True
+ return False
+
+try:
+ from snakeoil._klass import (GetAttrProxy, contains, get,
+ generic_eq, generic_ne)
+except ImportError:
+ GetAttrProxy = native_GetAttrProxy
+ contains = native_contains
+ get = native_get
+ generic_eq = native_generic_eq
+ generic_ne = native_generic_ne
+
+
+def generic_equality(name, bases, scope, real_type=type,
+ eq=generic_eq, ne=generic_ne):
+ attrlist = scope.pop("__attr_comparison__", None)
+ if attrlist is None:
+ raise TypeError("__attr_comparison__ must be in the classes scope")
+ for x in attrlist:
+ if not isinstance(x, str):
+ raise TypeError("all members of attrlist must be strings- "
+ " got %r %s" % (type(x), repr(x)))
+
+ scope["__attr_comparison__"] = tuple(attrlist)
+ scope.setdefault("__eq__", eq)
+ scope.setdefault("__ne__", ne)
+ return real_type(name, bases, scope)
+
+
+class chained_getter(object):
+ def __metaclass__(name, bases, scope):
+ return generic_equality(name, bases, scope, real_type=WeakInstMeta)
+ __slots__ = ('namespace', 'chain')
+ __fifo_cache__ = deque()
+ __inst_caching__ = True
+ __attr_comparison__ = ("namespace",)
+
+ def __init__(self, namespace):
+ self.namespace = namespace
+ self.chain = map(attrgetter, namespace.split("."))
+ if len(self.__fifo_cache__) > 10:
+ self.__fifo_cache__.popleft()
+ self.__fifo_cache__.append(self)
+
+ def __hash__(self):
+ return hash(self.namespace)
+
+ def __call__(self, obj):
+ o = obj
+ for f in self.chain:
+ o = f(o)
+ return o
diff --git a/snakeoil/lists.py b/snakeoil/lists.py
new file mode 100644
index 0000000..3cc8d4c
--- /dev/null
+++ b/snakeoil/lists.py
@@ -0,0 +1,171 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+sequence related operations
+"""
+
+from snakeoil.iterables import expandable_chain
+
+def unstable_unique(sequence):
+ """
+ lifted from python cookbook, credit: Tim Peters
+ Return a list of the elements in s in arbitrary order, sans duplicates
+ """
+
+ n = len(sequence)
+ # assume all elements are hashable, if so, it's linear
+ try:
+ return list(set(sequence))
+ except TypeError:
+ pass
+
+ # so much for linear. abuse sort.
+ try:
+ t = sorted(sequence)
+ except TypeError:
+ pass
+ else:
+ assert n > 0
+ last = t[0]
+ lasti = i = 1
+ while i < n:
+ if t[i] != last:
+ t[lasti] = last = t[i]
+ lasti += 1
+ i += 1
+ return t[:lasti]
+
+ # blah. back to original portage.unique_array
+ u = []
+ for x in sequence:
+ if x not in u:
+ u.append(x)
+ return u
+
+def stable_unique(iterable):
+ """
+ return unique list from iterable, preserving ordering
+ """
+ return list(iter_stable_unique(iterable))
+
+def iter_stable_unique(iterable):
+ """
+ generator yielding unique elements from iterable, preserving ordering
+ """
+ s = set()
+ for x in iterable:
+ if x not in s:
+ yield x
+ s.add(x)
+
+def native_iflatten_instance(l, skip_flattening=(basestring,)):
+ """
+ collapse [[1],2] into [1,2]
+
+ @param skip_flattening: list of classes to not descend through
+ """
+ if isinstance(l, skip_flattening):
+ yield l
+ return
+ iters = expandable_chain(l)
+ try:
+ while True:
+ x = iters.next()
+ if hasattr(x, '__iter__') and not isinstance(x, skip_flattening):
+ iters.appendleft(x)
+ else:
+ yield x
+ except StopIteration:
+ pass
+
+def native_iflatten_func(l, skip_func):
+ """
+ collapse [[1],2] into [1,2]
+
+ @param skip_func: a callable that returns True when iflatten_func should
+ descend no further
+ """
+ if skip_func(l):
+ yield l
+ return
+ iters = expandable_chain(l)
+ try:
+ while True:
+ x = iters.next()
+ if hasattr(x, '__iter__') and not skip_func(x):
+ iters.appendleft(x)
+ else:
+ yield x
+ except StopIteration:
+ pass
+
+
+try:
+ # No name "readdir" in module osutils
+ # pylint: disable-msg=E0611
+ from snakeoil._lists import iflatten_instance, iflatten_func
+ cpy_builtin = True
+except ImportError:
+ cpy_builtin = False
+ cpy_iflatten_instance = cpy_iflatten_func = None
+ iflatten_instance = native_iflatten_instance
+ iflatten_func = native_iflatten_func
+
+
+class ChainedLists(object):
+ """
+ sequences chained together, without collapsing into a list
+ """
+ __slots__ = ("_lists", "__weakref__")
+
+ def __init__(self, *lists):
+ """
+ all args must be sequences
+ """
+ # ensure they're iterable
+ for x in lists:
+ iter(x)
+
+ if isinstance(lists, tuple):
+ lists = list(lists)
+ self._lists = lists
+
+ def __len__(self):
+ return sum(len(l) for l in self._lists)
+
+ def __getitem__(self, idx):
+ if idx < 0:
+ idx += len(self)
+ if idx < 0:
+ raise IndexError
+ for l in self._lists:
+ l2 = len(l)
+ if idx < l2:
+ return l[idx]
+ idx -= l2
+ else:
+ raise IndexError
+
+ def __setitem__(self, idx, val):
+ raise TypeError("not mutable")
+
+ def __delitem__(self, idx):
+ raise TypeError("not mutable")
+
+ def __iter__(self):
+ for l in self._lists:
+ for x in l:
+ yield x
+
+ def __contains__(self, obj):
+ return obj in iter(self)
+
+ def __str__(self):
+ return "[ %s ]" % ", ".join(str(l) for l in self._lists)
+
+ def append(self, item):
+ self._lists.append(item)
+
+ def extend(self, items):
+ self._lists.extend(items)
diff --git a/snakeoil/mappings.py b/snakeoil/mappings.py
new file mode 100644
index 0000000..7847c8c
--- /dev/null
+++ b/snakeoil/mappings.py
@@ -0,0 +1,579 @@
+# Copyright: 2005-2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+miscellanious mapping/dict related classes
+"""
+
+import operator
+from itertools import imap, chain, ifilterfalse, izip
+from snakeoil.klass import get, contains
+from collections import deque
+
+
+class DictMixin(object):
+ """
+ new style class replacement for L{UserDict.DictMixin}
+ designed around iter* methods rather then forcing lists as DictMixin does
+ """
+
+ __slots__ = ()
+
+ __externally_mutable__ = True
+
+ def __init__(self, iterable=None, **kwargs):
+ if iterable is not None:
+ self.update(iterable)
+
+ if kwargs:
+ self.update(kwargs.iteritems())
+
+ def __iter__(self):
+ return self.iterkeys()
+
+ def keys(self):
+ return list(self.iterkeys())
+
+ def values(self):
+ return list(self.itervalues())
+
+ def items(self):
+ return list(self.iteritems())
+
+ def update(self, iterable):
+ for k, v in iterable:
+ self[k] = v
+
+ get = get
+ __contains__ = contains
+
+ # default cmp actually operates based on key len comparison, oddly enough
+ def __cmp__(self, other):
+ for k1, k2 in izip(sorted(self), sorted(other)):
+ c = cmp(k1, k2)
+ if c != 0:
+ return c
+ c = cmp(self[k1], other[k2])
+ if c != 0:
+ return c
+ c = cmp(len(self), len(other))
+ return c
+
+ def __eq__(self, other):
+ return self.__cmp__(other) == 0
+
+ def __ne__(self, other):
+ return self.__cmp__(other) != 0
+
+ def pop(self, key, default=None):
+ if not self.__externally_mutable__:
+ raise AttributeError(self, "pop")
+ try:
+ val = self[key]
+ del self[key]
+ except KeyError:
+ if default is not None:
+ return default
+ raise
+ return val
+
+ def setdefault(self, key, default=None):
+ if not self.__externally_mutable__:
+ raise AttributeError(self, "setdefault")
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ def has_key(self, key):
+ return key in self
+
+ def iterkeys(self):
+ raise NotImplementedError(self, "iterkeys")
+
+ def itervalues(self):
+ return imap(self.__getitem__, self)
+
+ def iteritems(self):
+ for k in self:
+ yield k, self[k]
+
+ def __getitem__(self, key):
+ raise NotImplementedError(self, "__getitem__")
+
+ def __setitem__(self, key, val):
+ if not self.__externally_mutable__:
+ raise AttributeError(self, "__setitem__")
+ raise NotImplementedError(self, "__setitem__")
+
+ def __delitem__(self, key):
+ if not self.__externally_mutable__:
+ raise AttributeError(self, "__delitem__")
+ raise NotImplementedError(self, "__delitem__")
+
+ def clear(self):
+ if not self.__externally_mutable__:
+ raise AttributeError(self, "clear")
+ # crappy, override if faster method exists.
+ map(self.__delitem__, self.keys())
+
+ def __len__(self):
+ c = 0
+ for x in self:
+ c += 1
+ return c
+
+ def popitem(self):
+ if not self.__externally_mutable__:
+ raise AttributeError(self, "popitem")
+ # do it this way so python handles the stopiteration; faster
+ for key, val in self.iteritems():
+ del self[key]
+ return key, val
+ raise KeyError("container is empty")
+
+
+class LazyValDict(DictMixin):
+
+ """
+ Mapping that loads values via a callable
+
+ given a function to get keys, and to look up the val for those keys, it'll
+ lazily load key definitions and values as requested
+ """
+ __slots__ = ("_keys", "_keys_func", "_vals", "_val_func")
+ __externally_mutable__ = False
+
+ def __init__(self, get_keys_func, get_val_func):
+ """
+ @param get_keys_func: either a container, or func to call to get keys.
+ @param get_val_func: a callable that is JIT called
+ with the key requested.
+ """
+ if not callable(get_val_func):
+ raise TypeError("get_val_func isn't a callable")
+ if hasattr(get_keys_func, "__iter__"):
+ self._keys = get_keys_func
+ self._keys_func = None
+ else:
+ if not callable(get_keys_func):
+ raise TypeError(
+ "get_keys_func isn't iterable or callable")
+ self._keys_func = get_keys_func
+ self._val_func = get_val_func
+ self._vals = {}
+
+ def __getitem__(self, key):
+ if self._keys_func is not None:
+ self._keys = set(self._keys_func())
+ self._keys_func = None
+ if key in self._vals:
+ return self._vals[key]
+ if key in self._keys:
+ v = self._vals[key] = self._val_func(key)
+ return v
+ raise KeyError(key)
+
+ def keys(self):
+ if self._keys_func is not None:
+ self._keys = set(self._keys_func())
+ self._keys_func = None
+ return list(self._keys)
+
+ def iterkeys(self):
+ if self._keys_func is not None:
+ self._keys = set(self._keys_func())
+ self._keys_func = None
+ return iter(self._keys)
+
+ def itervalues(self):
+ return imap(self.__getitem__, self.iterkeys())
+
+ def iteritems(self):
+ return ((k, self[k]) for k in self.iterkeys())
+
+ def __contains__(self, key):
+ if self._keys_func is not None:
+ self._keys = set(self._keys_func())
+ self._keys_func = None
+ return key in self._keys
+
+ def __len__(self):
+ if self._keys_func is not None:
+ self._keys = set(self._keys_func())
+ self._keys_func = None
+ return len(self._keys)
+
+
+class LazyFullValLoadDict(LazyValDict):
+
+ __slots__ = ()
+
+ def __getitem__(self, key):
+ if self._keys_func is not None:
+ self._keys = set(self._keys_func())
+ self._keys_func = None
+ if key in self._vals:
+ return self._vals[key]
+ if key in self._keys:
+ if self._val_func is not None:
+ self._vals.update(self._val_func(self._keys))
+ return self._vals[key]
+ raise KeyError(key)
+
+
+class ProtectedDict(DictMixin):
+
+ """
+ Mapping wrapper storing changes to a dict without modifying the original.
+
+ Changes are stored in a secondary dict, protecting the underlying
+ mapping from changes.
+ """
+
+ __slots__ = ("orig", "new", "blacklist")
+
+ def __init__(self, orig):
+ self.orig = orig
+ self.new = {}
+ self.blacklist = {}
+
+ def __setitem__(self, key, val):
+ self.new[key] = val
+ if key in self.blacklist:
+ del self.blacklist[key]
+
+ def __getitem__(self, key):
+ if key in self.new:
+ return self.new[key]
+ if key in self.blacklist:
+ raise KeyError(key)
+ return self.orig[key]
+
+ def __delitem__(self, key):
+ if key in self.new:
+ del self.new[key]
+ self.blacklist[key] = True
+ return
+ elif key in self.orig:
+ if key not in self.blacklist:
+ self.blacklist[key] = True
+ return
+ raise KeyError(key)
+
+ def iterkeys(self):
+ for k in self.new.iterkeys():
+ yield k
+ for k in self.orig.iterkeys():
+ if k not in self.blacklist and k not in self.new:
+ yield k
+
+ def __contains__(self, key):
+ return key in self.new or (key not in self.blacklist and
+ key in self.orig)
+
+
+class ImmutableDict(dict):
+
+ """Immutable Dict, non changable after instantiating"""
+
+ _hash_key_grabber = operator.itemgetter(0)
+
+ def __delitem__(self, *args):
+ raise TypeError("non modifiable")
+
+ __setitem__ = clear = update = pop = popitem = setdefault = __delitem__
+
+ def __hash__(self):
+ k = self.items()
+ k.sort(key=self._hash_key_grabber)
+ return hash(tuple(k))
+
+ __delattr__ = __setitem__
+ __setattr__ = __setitem__
+
+
+class IndeterminantDict(object):
+
+ """A wrapped dict with constant defaults, and a function for other keys."""
+
+ __slots__ = ("__initial", "__pull")
+
+ def __init__(self, pull_func, starter_dict=None):
+ object.__init__(self)
+ if starter_dict is None:
+ self.__initial = {}
+ else:
+ self.__initial = starter_dict
+ self.__pull = pull_func
+
+ def __getitem__(self, key):
+ if key in self.__initial:
+ return self.__initial[key]
+ else:
+ return self.__pull(key)
+
+ def get(self, key, val=None):
+ try:
+ return self[key]
+ except KeyError:
+ return val
+
+ def __hash__(self):
+ raise TypeError("non hashable")
+
+ def __delitem__(self, *args):
+ raise TypeError("non modifiable")
+
+ pop = get
+
+ clear = update = popitem = setdefault = __setitem__ = __delitem__
+ __iter__ = keys = values = items = __len__ = __delitem__
+ iteritems = iterkeys = itervalues = __delitem__
+
+
+class StackedDict(DictMixin):
+
+ """A non modifiable dict that makes multiple dicts appear as one"""
+
+ def __init__(self, *dicts):
+ self._dicts = dicts
+
+ def __getitem__(self, key):
+ for x in self._dicts:
+ if key in x:
+ return x[key]
+ raise KeyError(key)
+
+ def iterkeys(self):
+ s = set()
+ for k in ifilterfalse(s.__contains__, chain(*map(iter, self._dicts))):
+ s.add(k)
+ yield k
+
+ def __contains__(self, key):
+ for x in self._dicts:
+ if key in x:
+ return True
+ return False
+
+ def __setitem__(self, *a):
+ raise TypeError("non modifiable")
+
+ __delitem__ = clear = __setitem__
+
+
+class OrderedDict(DictMixin):
+
+ """Dict that preserves insertion ordering which is used for iteration ops"""
+
+ __slots__ = ("_data", "_order")
+
+ def __init__(self, iterable=()):
+ self._order = deque()
+ self._data = {}
+ for k, v in iterable:
+ self[k] = v
+
+ def __setitem__(self, key, val):
+ if key not in self:
+ self._order.append(key)
+ self._data[key] = val
+
+ def __delitem__(self, key):
+ del self._data[key]
+
+ for idx, o in enumerate(self._order):
+ if o == key:
+ del self._order[idx]
+ break
+ else:
+ raise AssertionError("orderdict lost its internal ordering")
+
+ def __getitem__(self, key):
+ return self._data[key]
+
+ def __len__(self):
+ return len(self._order)
+
+ def iterkeys(self):
+ return iter(self._order)
+
+ def clear(self):
+ self._order = deque()
+ self._data = {}
+
+ def __contains__(self, key):
+ return key in self._data
+
+
+class ListBackedDict(DictMixin):
+
+ __slots__ = ("_data")
+ _kls = list
+ _key_grabber = operator.itemgetter(0)
+ _value_grabber = operator.itemgetter(1)
+
+ def __init__(self, iterables=()):
+ self._data = self._kls((k, v) for k, v in iterables)
+
+ def __setitem__(self, key, val):
+ for idx, vals in enumerate(self._data):
+ if vals[0] == key:
+ self._data[idx] = (key, val)
+ break
+ else:
+ self._data.append((key, val))
+
+ def __getitem__(self, key):
+ for existing_key, val in self._data:
+ if key == existing_key:
+ return val
+ raise KeyError(key)
+
+ def __delitem__(self, key):
+ l = self._kls((k, v) for k, v in self._data if k != key)
+ if len(l) == len(self._data):
+ # no match.
+ raise KeyError(key)
+ self._data = l
+
+ def iterkeys(self):
+ return imap(self._key_grabber, self._data)
+
+ def itervalues(self):
+ return imap(self._value_grabber, self._data)
+
+ def iteritems(self):
+ return iter(self._data)
+
+ def __contains__(self, key):
+ for k, v in self._data:
+ if k == key:
+ return True
+ return False
+
+ def __len__(self):
+ return len(self._data)
+
+class TupleBackedDict(ListBackedDict):
+ __slots__ = ()
+ _kls = tuple
+
+ def __setitem__(self, key, val):
+ self._data = self._kls(
+ chain((x for x in self.iteritems() if x[0] != key), ((key, val),)))
+
+
+class PreservingFoldingDict(DictMixin):
+
+ """dict that uses a 'folder' function when looking up keys.
+
+ The most common use for this is to implement a dict with
+ case-insensitive key values (by using C{str.lower} as folder
+ function).
+
+ This version returns the original 'unfolded' key.
+ """
+
+ def __init__(self, folder, sourcedict=None):
+ self._folder = folder
+ # dict mapping folded keys to (original key, value)
+ self._dict = {}
+ if sourcedict is not None:
+ self.update(sourcedict)
+
+ def copy(self):
+ return PreservingFoldingDict(self._folder, self.iteritems())
+
+ def refold(self, folder=None):
+ """Use the remembered original keys to update to a new folder.
+
+ If folder is C{None}, keep the current folding function (this
+ is useful if the folding function uses external data and that
+ data changed).
+ """
+ if folder is not None:
+ self._folder = folder
+ oldDict = self._dict
+ self._dict = {}
+ for key, value in oldDict.itervalues():
+ self._dict[self._folder(key)] = (key, value)
+
+ def __getitem__(self, key):
+ return self._dict[self._folder(key)][1]
+
+ def __setitem__(self, key, value):
+ self._dict[self._folder(key)] = (key, value)
+
+ def __delitem__(self, key):
+ del self._dict[self._folder(key)]
+
+ def iteritems(self):
+ return self._dict.itervalues()
+
+ def iterkeys(self):
+ for val in self._dict.itervalues():
+ yield val[0]
+
+ def itervalues(self):
+ for val in self._dict.itervalues():
+ yield val[1]
+
+ def __contains__(self, key):
+ return self._folder(key) in self._dict
+
+ def __len__(self):
+ return len(self._dict)
+
+ def clear(self):
+ self._dict = {}
+
+
+class NonPreservingFoldingDict(DictMixin):
+
+ """dict that uses a 'folder' function when looking up keys.
+
+ The most common use for this is to implement a dict with
+ case-insensitive key values (by using C{str.lower} as folder
+ function).
+
+ This version returns the 'folded' key.
+ """
+
+ def __init__(self, folder, sourcedict=None):
+ self._folder = folder
+ # dict mapping folded keys to values.
+ self._dict = {}
+ if sourcedict is not None:
+ self.update(sourcedict)
+
+ def copy(self):
+ return NonPreservingFoldingDict(self._folder, self.iteritems())
+
+ def __getitem__(self, key):
+ return self._dict[self._folder(key)]
+
+ def __setitem__(self, key, value):
+ self._dict[self._folder(key)] = value
+
+ def __delitem__(self, key):
+ del self._dict[self._folder(key)]
+
+ def iterkeys(self):
+ return iter(self._dict)
+
+ def itervalues(self):
+ return self._dict.itervalues()
+
+ def iteritems(self):
+ return self._dict.iteritems()
+
+ def __contains__(self, key):
+ return self._folder(key) in self._dict
+
+ def __len__(self):
+ return len(self._dict)
+
+ def clear(self):
+ self._dict = {}
diff --git a/snakeoil/modules.py b/snakeoil/modules.py
new file mode 100644
index 0000000..d685001
--- /dev/null
+++ b/snakeoil/modules.py
@@ -0,0 +1,53 @@
+# Copyright: 2005 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+dynamic import functionality
+"""
+
+import sys
+
+class FailedImport(ImportError):
+ def __init__(self, trg, e):
+ ImportError.__init__(
+ self, "Failed importing target '%s': '%s'" % (trg, e))
+ self.trg, self.e = trg, e
+
+
+def load_module(name):
+ """load 'name' module, throwing a FailedImport if __import__ fails"""
+ if name in sys.modules:
+ return sys.modules[name]
+ try:
+ m = __import__(name)
+ # __import__('foo.bar') returns foo, so...
+ for bit in name.split('.')[1:]:
+ m = getattr(m, bit)
+ return m
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except Exception, e:
+ raise FailedImport(name, e)
+
+
+def load_attribute(name):
+ """load a specific attribute, rather then a module"""
+ chunks = name.rsplit(".", 1)
+ if len(chunks) == 1:
+ raise FailedImport(name, "it isn't an attribute, it's a module")
+ try:
+ m = load_module(chunks[0])
+ m = getattr(m, chunks[1])
+ return m
+ except (AttributeError, ImportError), e:
+ raise FailedImport(name, e)
+
+
+def load_any(name):
+ """Load a module or attribute."""
+ try:
+ return load_module(name)
+ except FailedImport, fi:
+ if not isinstance(fi.e, ImportError):
+ raise
+ return load_attribute(name)
diff --git a/snakeoil/obj.py b/snakeoil/obj.py
new file mode 100644
index 0000000..9101280
--- /dev/null
+++ b/snakeoil/obj.py
@@ -0,0 +1,206 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+from operator import attrgetter
+from snakeoil.currying import pre_curry
+from snakeoil.mappings import DictMixin
+
+def alias_method(getter, self, *a, **kwd):
+ return getter(self.__obj__)(*a, **kwd)
+
+def instantiate(inst):
+ delayed = object.__getattribute__(inst, "__delayed__")
+ obj = delayed[1](*delayed[2], **delayed[3])
+ object.__setattr__(inst, "__obj__", obj)
+ object.__delattr__(inst, "__delayed__")
+ return obj
+
+
+# we exempt __getattribute__ since we cover it already, same
+# for __new__ and __init__
+base_kls_descriptors = frozenset(
+ ('__delattr__', '__doc__', '__hash__', '__reduce__',
+ '__reduce_ex__', '__repr__', '__setattr__', '__str__'))
+
+class BaseDelayedObject(object):
+ """
+ delay actual instantiation
+ """
+
+ def __new__(cls, desired_kls, func, *a, **kwd):
+ o = object.__new__(cls)
+ object.__setattr__(o, "__delayed__", (desired_kls, func, a, kwd))
+ object.__setattr__(o, "__obj__", None)
+ return o
+
+ def __getattribute__(self, attr):
+ obj = object.__getattribute__(self, "__obj__")
+ if obj is None:
+ if attr == "__class__":
+ return object.__getattribute__(self, "__delayed__")[0]
+
+ obj = instantiate(self)
+ # now we grow some attributes.
+
+ if attr == "__obj__":
+ # special casing for alias_method
+ return obj
+ return getattr(obj, attr)
+
+ # special case the normal descriptors
+ for x in base_kls_descriptors:
+ locals()[x] = pre_curry(alias_method, attrgetter(x))
+ del x
+
+
+# note that we ignore __getattribute__; we already handle it.
+kls_descriptors = frozenset([
+ # simple comparison protocol...
+ '__cmp__',
+ # rich comparison protocol...
+ '__le__', '__lt__', '__eq__', '__ne__', '__gt__', '__ge__',
+ # unicode conversion
+ '__unicode__',
+ # truth...
+ '__nonzero__',
+ # container protocol...
+ '__len__', '__getitem__', '__setitem__', '__delitem__',
+ '__iter__', '__contains__',
+ # deprecated sequence protocol bits...
+ '__getslice__', '__setslice__', '__delslice__',
+ # numeric...
+ '__add__', '__sub__', '__mul__', '__floordiv__', '__mod__',
+ '__divmod__', '__pow__', '__lshift__', '__rshift__',
+ '__and__', '__xor__', '__or__', '__div__', '__truediv__',
+ '__rad__', '__rsub__', '__rmul__', '__rdiv__', '__rtruediv__',
+ '__rfloordiv__', '__rmod__', '__rdivmod__', '__rpow__',
+ '__rlshift__', '__rrshift__', '__rand__', '__rxor__', '__ror__',
+ '__iadd__', '__isub__', '__imul__', '__idiv__', '__itruediv__',
+ '__ifloordiv__', '__imod__', '__ipow__', '__ilshift__',
+ '__irshift__', '__iand__', '__ixor__', '__ior__',
+ '__neg__', '__pos__', '__abs__', '__invert__', '__complex__',
+ '__int__', '__long__', '__float__', '__oct__', '__hex__',
+ '__coerce__',
+ # remaining...
+ '__call__'])
+
+descriptor_overrides = dict((k, pre_curry(alias_method, attrgetter(k)))
+ for k in kls_descriptors)
+
+method_cache = {}
+def make_kls(kls):
+ special_descriptors = tuple(sorted(kls_descriptors.intersection(dir(kls))))
+ if not special_descriptors:
+ return BaseDelayedObject
+ o = method_cache.get(special_descriptors, None)
+ if o is None:
+ class CustomDelayedObject(BaseDelayedObject):
+ locals().update((k, descriptor_overrides[k])
+ for k in special_descriptors)
+
+ o = CustomDelayedObject
+ method_cache[special_descriptors] = o
+ return o
+
+def DelayedInstantiation_kls(kls, *a, **kwd):
+ return DelayedInstantiation(kls, kls, *a, **kwd)
+
+class_cache = {}
+def DelayedInstantiation(resultant_kls, func, *a, **kwd):
+ """Generate an objects that does not get initialized before it is used.
+
+ The returned object can be passed around without triggering
+ initialization. The first time it is actually used (an attribute
+ is accessed) it is initialized once.
+
+ The returned "fake" object cannot completely reliably mimic a
+ builtin type. It will usually work but some corner cases may fail
+ in confusing ways. Make sure to test if DelayedInstantiation has
+ no unwanted side effects.
+
+ @param resultant_kls: type object to fake an instance of.
+ @param func: callable, the return value is used as initialized object.
+ """
+ o = class_cache.get(resultant_kls, None)
+ if o is None:
+ o = make_kls(resultant_kls)
+ class_cache[resultant_kls] = o
+ return o(resultant_kls, func, *a, **kwd)
+
+
+slotted_dict_cache = {}
+def make_SlottedDict_kls(keys):
+ new_keys = tuple(sorted(keys))
+ o = slotted_dict_cache.get(new_keys, None)
+ if o is None:
+ class SlottedDict(DictMixin):
+ __slots__ = new_keys
+ __externally_mutable__ = True
+
+ def __init__(self, iterables=()):
+ if iterables:
+ self.update(iterables)
+
+ __setitem__ = object.__setattr__
+
+ def __getitem__(self, key):
+ try:
+ return getattr(self, key)
+ except AttributeError:
+ raise KeyError(key)
+
+ def __delitem__(self, key):
+ # Python does not raise anything if you delattr an
+ # unset slot (works ok if __slots__ is not involved).
+ try:
+ getattr(self, key)
+ except AttributeError:
+ raise KeyError(key)
+ delattr(self, key)
+
+ def __iter__(self):
+ for k in self.__slots__:
+ if hasattr(self, k):
+ yield k
+
+ def iterkeys(self):
+ return iter(self)
+
+ def itervalues(self):
+ for k in self:
+ yield self[k]
+
+ def get(self, key, default=None):
+ return getattr(self, key, default)
+
+ def pop(self, key, *a):
+ # faster then the exception form...
+ l = len(a)
+ if l > 1:
+ raise TypeError("pop accepts 1 or 2 args only")
+ if hasattr(self, key):
+ o = getattr(self, key)
+ object.__delattr__(self, key)
+ elif l:
+ o = a[0]
+ else:
+ raise KeyError(key)
+ return o
+
+ def clear(self):
+ for k in self:
+ del self[k]
+
+ def update(self, iterable):
+ for k, v in iterable:
+ setattr(self, k, v)
+
+ def __len__(self):
+ return len(self.keys())
+
+ def __contains__(self, key):
+ return hasattr(self, key)
+
+ o = SlottedDict
+ slotted_dict_cache[new_keys] = o
+ return o
diff --git a/snakeoil/osutils/__init__.py b/snakeoil/osutils/__init__.py
new file mode 100644
index 0000000..62c1b28
--- /dev/null
+++ b/snakeoil/osutils/__init__.py
@@ -0,0 +1,340 @@
+# Copyright 2004-2007 Brian Harring <ferringb@gmail.com>
+# Copyright 2006 Marien Zwart <marienz@gentoo.org>
+# Distributed under the terms of the GNU General Public License v2
+
+"""
+os specific utilities, FS access mainly
+
+"""
+
+import os, stat
+import fcntl
+import errno
+
+__all__ = ['abspath', 'abssymlink', 'ensure_dirs', 'join', 'pjoin',
+ 'listdir_files', 'listdir_dirs', 'listdir', 'readlines', 'readfile',
+ 'readdir']
+
+
+# No name '_readdir' in module osutils
+# pylint: disable-msg=E0611
+
+try:
+ from snakeoil.osutils import _readdir as module
+except ImportError:
+ from snakeoil.osutils import native_readdir as module
+
+listdir = module.listdir
+listdir_dirs = module.listdir_dirs
+listdir_files = module.listdir_files
+readdir = module.readdir
+
+del module
+
+
+def ensure_dirs(path, gid=-1, uid=-1, mode=0777, minimal=True):
+ """
+ ensure dirs exist, creating as needed with (optional) gid, uid, and mode.
+
+ be forewarned- if mode is specified to a mode that blocks the euid
+ from accessing the dir, this code *will* try to create the dir.
+ """
+
+ try:
+ st = os.stat(path)
+ except OSError:
+ base = os.path.sep
+ try:
+ um = os.umask(0)
+ # if the dir perms would lack +wx, we have to force it
+ force_temp_perms = ((mode & 0300) != 0300)
+ resets = []
+ apath = normpath(os.path.abspath(path))
+ sticky_parent = False
+
+ for directory in apath.split(os.path.sep):
+ base = join(base, directory)
+ try:
+ st = os.stat(base)
+ if not stat.S_ISDIR(st.st_mode):
+ return False
+
+ # if it's a subdir, we need +wx at least
+ if apath != base:
+ if ((st.st_mode & 0300) != 0300):
+ try:
+ os.chmod(base, (st.st_mode | 0300))
+ except OSError:
+ return False
+ resets.append((base, st.st_mode))
+ sticky_parent = (st.st_gid & stat.S_ISGID)
+
+ except OSError:
+ # nothing exists.
+ try:
+ if force_temp_perms:
+ os.mkdir(base, 0700)
+ resets.append((base, mode))
+ else:
+ os.mkdir(base, mode)
+ if base == apath and sticky_parent:
+ resets.append((base, mode))
+ if gid != -1 or uid != -1:
+ os.chown(base, uid, gid)
+ except OSError:
+ return False
+
+ try:
+ for base, m in reversed(resets):
+ os.chmod(base, m)
+ if uid != -1 or gid != -1:
+ os.chown(base, uid, gid)
+ except OSError:
+ return False
+
+ finally:
+ os.umask(um)
+ return True
+ else:
+ try:
+ if ((gid != -1 and gid != st.st_gid) or
+ (uid != -1 and uid != st.st_uid)):
+ os.chown(path, uid, gid)
+ if minimal:
+ if mode != (st.st_mode & mode):
+ os.chmod(path, st.st_mode | mode)
+ elif mode != (st.st_mode & 07777):
+ os.chmod(path, mode)
+ except OSError:
+ return False
+ return True
+
+
+def abssymlink(symlink):
+ """
+ Read a symlink, resolving if it is relative, returning the absolute.
+ If the path doesn't exist, OSError is thrown.
+
+ @param symlink: filepath to resolve
+ @return: resolve path.
+ """
+ mylink = os.readlink(symlink)
+ if mylink[0] != '/':
+ mydir = os.path.dirname(symlink)
+ mylink = mydir+"/"+mylink
+ return os.path.normpath(mylink)
+
+
+def abspath(path):
+ """
+ resolve a path absolutely, including symlink resolving.
+ Throws OSError if the path doesn't exist
+
+ Note that if it's a symlink and the target doesn't exist, it'll still
+ return the target.
+
+ @param path: filepath to resolve.
+ @return: resolve path
+ """
+ path = os.path.abspath(path)
+ try:
+ return abssymlink(path)
+ except OSError, e:
+ if e.errno == errno.EINVAL:
+ return path
+ raise
+
+
+def native_normpath(mypath):
+ """
+ normalize path- //usr/bin becomes /usr/bin
+ """
+ newpath = os.path.normpath(mypath)
+ if newpath.startswith('//'):
+ return newpath[1:]
+ return newpath
+
+native_join = os.path.join
+
+def native_readfile(mypath, none_on_missing=False):
+ """
+ read a file, returning the contents
+
+ @param mypath: fs path for the file to read
+ @param none_on_missing: whether to return None if the file is missing,
+ else through the exception
+ """
+ try:
+ return open(mypath, "r").read()
+ except IOError, oe:
+ if none_on_missing and oe.errno == errno.ENOENT:
+ return None
+ raise
+
+
+class readlines_iter(object):
+ __slots__ = ("iterable", "mtime")
+ def __init__(self, iterable, mtime):
+ self.iterable = iterable
+ self.mtime = mtime
+
+ def __iter__(self):
+ return self.iterable
+
+
+def native_readlines(mypath, strip_newlines=True, swallow_missing=False,
+ none_on_missing=False):
+ """
+ read a file, yielding each line
+
+ @param mypath: fs path for the file to read
+ @param strip_newlines: strip trailing newlines?
+ @param swallow_missing: throw an IOError if missing, or swallow it?
+ @param none_on_missing: if the file is missing, return None, else
+ if the file is missing return an empty iterable
+ """
+ try:
+ f = open(mypath, "r")
+ except IOError, ie:
+ if ie.errno != errno.ENOENT or not swallow_missing:
+ raise
+ if none_on_missing:
+ return None
+ return readlines_iter(iter([]), None)
+
+ if not strip_newlines:
+ return readlines_iter(f, os.fstat(f.fileno()).st_mtime)
+
+ return readlines_iter((x.strip("\n") for x in f), os.fstat(f.fileno()).st_mtime)
+
+
+try:
+ from snakeoil.osutils._posix import normpath, join, readfile, readlines
+except ImportError:
+ normpath = native_normpath
+ join = native_join
+ readfile = native_readfile
+ readlines = native_readlines
+
+# convenience. importing join into a namespace is ugly, pjoin less so
+pjoin = join
+
+class LockException(Exception):
+ """Base lock exception class"""
+ def __init__(self, path, reason):
+ Exception.__init__(self, path, reason)
+ self.path, self.reason = path, reason
+
+class NonExistant(LockException):
+ """Missing file/dir exception"""
+ def __init__(self, path, reason=None):
+ LockException.__init__(self, path, reason)
+ def __str__(self):
+ return (
+ "Lock action for '%s' failed due to not being a valid dir/file %s"
+ % (self.path, self.reason))
+
+class GenericFailed(LockException):
+ """The fallback lock exception class.
+
+ Covers perms, IOError's, and general whackyness.
+ """
+ def __str__(self):
+ return "Lock action for '%s' failed due to '%s'" % (
+ self.path, self.reason)
+
+
+# should the fd be left open indefinitely?
+# IMO, it shouldn't, but opening/closing everytime around is expensive
+
+
+class FsLock(object):
+
+ """
+ fnctl based locks
+ """
+
+ __slots__ = ("path", "fd", "create")
+ def __init__(self, path, create=False):
+ """
+ @param path: fs path for the lock
+ @param create: controls whether the file will be created
+ if the file doesn't exist.
+ If true, the base dir must exist, and it will create a file.
+ If you want to lock via a dir, you have to ensure it exists
+ (create doesn't suffice).
+ @raise NonExistant: if no file/dir exists for that path,
+ and cannot be created
+ """
+ self.path = path
+ self.fd = None
+ self.create = create
+ if not create:
+ if not os.path.exists(path):
+ raise NonExistant(path)
+
+ def _acquire_fd(self):
+ if self.create:
+ try:
+ self.fd = os.open(self.path, os.R_OK|os.O_CREAT)
+ except OSError, oe:
+ raise GenericFailed(self.path, oe)
+ else:
+ try:
+ self.fd = os.open(self.path, os.R_OK)
+ except OSError, oe:
+ raise NonExistant(self.path, oe)
+
+ def _enact_change(self, flags, blocking):
+ if self.fd is None:
+ self._acquire_fd()
+ # we do it this way, due to the fact try/except is a bit of a hit
+ if not blocking:
+ try:
+ fcntl.flock(self.fd, flags|fcntl.LOCK_NB)
+ except IOError, ie:
+ if ie.errno == errno.EAGAIN:
+ return False
+ raise GenericFailed(self.path, ie)
+ else:
+ fcntl.flock(self.fd, flags)
+ return True
+
+ def acquire_write_lock(self, blocking=True):
+ """
+ Acquire an exclusive lock
+
+ Note if you have a read lock, it implicitly upgrades atomically
+
+ @param blocking: if enabled, don't return until we have the lock
+ @return: True if lock is acquired, False if not.
+ """
+ return self._enact_change(fcntl.LOCK_EX, blocking)
+
+ def acquire_read_lock(self, blocking=True):
+ """
+ Acquire a shared lock
+
+ Note if you have a write lock, it implicitly downgrades atomically
+
+ @param blocking: if enabled, don't return until we have the lock
+ @return: True if lock is acquired, False if not.
+ """
+ return self._enact_change(fcntl.LOCK_SH, blocking)
+
+ def release_write_lock(self):
+ """Release an write/exclusive lock if held"""
+ self._enact_change(fcntl.LOCK_UN, False)
+
+ def release_read_lock(self):
+ """Release an shared/read lock if held"""
+ self._enact_change(fcntl.LOCK_UN, False)
+
+ def __del__(self):
+ # alright, it's 5:45am, yes this is weird code.
+ try:
+ if self.fd is not None:
+ self.release_read_lock()
+ finally:
+ if self.fd is not None:
+ os.close(self.fd)
diff --git a/snakeoil/osutils/native_readdir.py b/snakeoil/osutils/native_readdir.py
new file mode 100644
index 0000000..c09eb1d
--- /dev/null
+++ b/snakeoil/osutils/native_readdir.py
@@ -0,0 +1,60 @@
+# Copyright: 2006-2007 Brian Harring <ferringb@gmail.com>
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+"""Wrapper for readdir which grabs file type from d_type."""
+
+
+import os, errno
+from stat import (S_IFDIR, S_IFREG, S_IFCHR, S_IFBLK, S_IFIFO, S_IFLNK, S_IFSOCK,
+ S_IFMT, S_ISDIR, S_ISREG)
+
+listdir = os.listdir
+
+# we can still use the cpy pjoin here, just need to do something about the
+# import cycle.
+pjoin = os.path.join
+
+def stat_swallow_enoent(path, check, default=False, stat=os.stat):
+ try:
+ return check(stat(path).st_mode)
+ except OSError, oe:
+ if oe.errno == errno.ENOENT:
+ return default
+ raise
+
+def listdir_dirs(path, followSymlinks=True):
+ scheck = S_ISDIR
+ pjf = pjoin
+ lstat = os.lstat
+ if followSymlinks:
+ return [x for x in os.listdir(path) if
+ stat_swallow_enoent(pjf(path, x), scheck)]
+ lstat = os.lstat
+ return [x for x in os.listdir(path) if
+ scheck(lstat(pjf(path, x)).st_mode)]
+
+def listdir_files(path, followSymlinks=True):
+ scheck = S_ISREG
+ pjf = pjoin
+ if followSymlinks:
+ return [x for x in os.listdir(path) if
+ stat_swallow_enoent(pjf(path, x), scheck)]
+ lstat = os.lstat
+ return [x for x in os.listdir(path) if
+ scheck(lstat(pjf(path, x)).st_mode)]
+
+def readdir(path):
+ pjf = pjoin
+ assocs = {
+ S_IFREG: "file",
+ S_IFDIR: "directory",
+ S_IFLNK: "symlink",
+ S_IFCHR: "chardev",
+ S_IFBLK: "block",
+ S_IFSOCK: "socket",
+ S_IFIFO: "fifo",
+ }
+ things = listdir(path)
+ lstat = os.lstat
+ return [(name, assocs[S_IFMT(lstat(pjf(path, name)).st_mode)]) for name in things]
diff --git a/snakeoil/pickling.py b/snakeoil/pickling.py
new file mode 100644
index 0000000..fe1b39f
--- /dev/null
+++ b/snakeoil/pickling.py
@@ -0,0 +1,18 @@
+# Copyright: 2007 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+convenience module using cPickle if available, else failing back to pickle
+"""
+
+try:
+ from cPickle import *
+except ImportError:
+ from pickle import *
+
+def iter_stream(stream):
+ try:
+ while True:
+ yield load(stream)
+ except EOFError:
+ pass
diff --git a/snakeoil/tar.py b/snakeoil/tar.py
new file mode 100644
index 0000000..8f581b1
--- /dev/null
+++ b/snakeoil/tar.py
@@ -0,0 +1,35 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+tar file access
+
+monkey patching of stdlib tarfile to reduce mem usage (33% reduction).
+
+note this is also racey; N threads trying an import, if they're after
+the *original* tarfile, they may inadvertantly get ours.
+"""
+
+import sys
+t = sys.modules.pop("tarfile", None)
+tarfile = __import__("tarfile")
+if t is not None:
+ sys.modules["tarfile"] = t
+else:
+ del sys.modules["tarfile"]
+del t
+# ok, we now have our own local copy to monkey patch
+
+class TarInfo(tarfile.TarInfo):
+ __slots__ = (
+ "name", "mode", "uid", "gid", "size", "mtime", "chksum", "type",
+ "linkname", "uname", "gname", "devmajor", "devminor", "prefix",
+ "offset", "offset_data", "buf", "sparse", "_link_target")
+
+tarfile.TarInfo = TarInfo
+# finished monkey patching. now to lift things out of our tarfile
+# module into this scope so from/import behaves properly.
+
+for x in tarfile.__all__:
+ locals()[x] = getattr(tarfile, x)
+del x
diff --git a/snakeoil/version.py b/snakeoil/version.py
new file mode 100644
index 0000000..4c48c5f
--- /dev/null
+++ b/snakeoil/version.py
@@ -0,0 +1,39 @@
+# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
+# License: GPL2
+
+
+"""Version information (tied to bzr)."""
+
+import os
+
+__version__ = '0.1_rc2'
+
+_ver = None
+
+def get_version():
+ """@returns: a string describing the snakeoil version."""
+ global _ver
+ if _ver is not None:
+ return _ver
+
+ try:
+ from snakeoil.bzr_verinfo import version_info
+ except ImportError:
+ try:
+ from bzrlib import branch, errors
+ except ImportError:
+ ver = 'unknown (not from an sdist tarball, bzr unavailable)'
+ else:
+ try:
+ # Returns a (branch, relpath) tuple, ignore relpath.
+ b = branch.Branch.open_containing(os.path.realpath(__file__))[0]
+ except errors.NotBranchError:
+ ver = 'unknown (not from an sdist tarball, not a bzr branch)'
+ else:
+ ver = '%s:%s %s' % (b.nick, b.revno(), b.last_revision())
+ else:
+ ver = '%(branch_nick)s:%(revno)s %(revision_id)s' % version_info
+
+ _ver = 'snakeoil %s\n(bzr rev %s)' % (__version__, ver)
+
+ return _ver
diff --git a/snakeoil/weakrefs.py b/snakeoil/weakrefs.py
new file mode 100644
index 0000000..16336be
--- /dev/null
+++ b/snakeoil/weakrefs.py
@@ -0,0 +1,12 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+# Unused import
+# pylint: disable-msg=W0611
+
+try:
+ # No name in module
+ # pylint: disable-msg=E0611
+ from snakeoil._caching import WeakValCache
+except ImportError:
+ from weakref import WeakValueDictionary as WeakValCache
diff --git a/snakeoil/xml/__init__.py b/snakeoil/xml/__init__.py
new file mode 100644
index 0000000..4091f02
--- /dev/null
+++ b/snakeoil/xml/__init__.py
@@ -0,0 +1,46 @@
+# Copyright: 2006 Brian Harring <ferringb@gmail.com>
+# License: GPL2
+
+"""
+indirection to load ElementTree
+"""
+# essentially... prefer cElementTree, then 2.5 bundled, then
+# elementtree, then 2.5 bundled, then our own bundled
+
+# "No name etree in module xml", "Reimport cElementTree"
+# pylint: disable-msg=E0611,W0404
+
+gotit = True
+try:
+ import cElementTree as etree
+except ImportError:
+ gotit = False
+if not gotit:
+ try:
+ from xml.etree import cElementTree as etree
+ gotit = True
+ except ImportError:
+ pass
+if not gotit:
+ try:
+ from elementtree import ElementTree as etree
+ gotit = True
+ except ImportError:
+ pass
+if not gotit:
+ try:
+ from xml.etree import ElementTree as etree
+ gotit = True
+ except ImportError:
+ pass
+
+if not gotit:
+ from snakeoil.xml import bundled_elementtree as etree
+del gotit
+
+def escape(string):
+ """
+ simple escaping of &, <, and >
+ """
+ return string.replace("&", "&amp;").replace("<", "&lt;").replace(">",
+ "&gt;")
diff --git a/snakeoil/xml/bundled_elementtree.py b/snakeoil/xml/bundled_elementtree.py
new file mode 100644
index 0000000..5d8b1d3
--- /dev/null
+++ b/snakeoil/xml/bundled_elementtree.py
@@ -0,0 +1,1254 @@
+#
+# ElementTree
+# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z fredrik $
+#
+# light-weight XML support for Python 1.5.2 and later.
+#
+# history:
+# 2001-10-20 fl created (from various sources)
+# 2001-11-01 fl return root from parse method
+# 2002-02-16 fl sort attributes in lexical order
+# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
+# 2002-05-01 fl finished TreeBuilder refactoring
+# 2002-07-14 fl added basic namespace support to ElementTree.write
+# 2002-07-25 fl added QName attribute support
+# 2002-10-20 fl fixed encoding in write
+# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
+# 2002-11-27 fl accept file objects or file names for parse/write
+# 2002-12-04 fl moved XMLTreeBuilder back to this module
+# 2003-01-11 fl fixed entity encoding glitch for us-ascii
+# 2003-02-13 fl added XML literal factory
+# 2003-02-21 fl added ProcessingInstruction/PI factory
+# 2003-05-11 fl added tostring/fromstring helpers
+# 2003-05-26 fl added ElementPath support
+# 2003-07-05 fl added makeelement factory method
+# 2003-07-28 fl added more well-known namespace prefixes
+# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas Dartsch)
+# 2003-09-04 fl fall back on emulator if ElementPath is not installed
+# 2003-10-31 fl markup updates
+# 2003-11-15 fl fixed nested namespace bug
+# 2004-03-28 fl added XMLID helper
+# 2004-06-02 fl added default support to findtext
+# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
+# 2004-08-23 fl take advantage of post-2.1 expat features
+# 2005-02-01 fl added iterparse implementation
+# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
+#
+# Copyright (c) 1999-2005 by Fredrik Lundh. All rights reserved.
+#
+# fredrik@pythonware.com
+# http://www.pythonware.com
+#
+# --------------------------------------------------------------------
+# The ElementTree toolkit is
+#
+# Copyright (c) 1999-2005 by Fredrik Lundh
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Secret Labs AB or the author not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
+# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
+# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
+# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+# --------------------------------------------------------------------
+
+__all__ = [
+ # public symbols
+ "Comment",
+ "dump",
+ "Element", "ElementTree",
+ "fromstring",
+ "iselement", "iterparse",
+ "parse",
+ "PI", "ProcessingInstruction",
+ "QName",
+ "SubElement",
+ "tostring",
+ "TreeBuilder",
+ "VERSION", "XML",
+ "XMLTreeBuilder",
+ ]
+
+##
+# The <b>Element</b> type is a flexible container object, designed to
+# store hierarchical data structures in memory. The type can be
+# described as a cross between a list and a dictionary.
+# <p>
+# Each element has a number of properties associated with it:
+# <ul>
+# <li>a <i>tag</i>. This is a string identifying what kind of data
+# this element represents (the element type, in other words).</li>
+# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
+# <li>a <i>text</i> string.</li>
+# <li>an optional <i>tail</i> string.</li>
+# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
+# </ul>
+#
+# To create an element instance, use the {@link #Element} or {@link
+# #SubElement} factory functions.
+# <p>
+# The {@link #ElementTree} class can be used to wrap an element
+# structure, and convert it from and to XML.
+##
+
+import string, sys, re
+
+class _SimpleElementPath:
+ # emulate pre-1.2 find/findtext/findall behaviour
+ def find(self, element, tag):
+ for elem in element:
+ if elem.tag == tag:
+ return elem
+ return None
+ def findtext(self, element, tag, default=None):
+ for elem in element:
+ if elem.tag == tag:
+ return elem.text or ""
+ return default
+ def findall(self, element, tag):
+ if tag[:3] == ".//":
+ return element.getiterator(tag[3:])
+ result = []
+ for elem in element:
+ if elem.tag == tag:
+ result.append(elem)
+ return result
+
+try:
+ import ElementPath
+except ImportError:
+ # FIXME: issue warning in this case?
+ ElementPath = _SimpleElementPath()
+
+# TODO: add support for custom namespace resolvers/default namespaces
+# TODO: add improved support for incremental parsing
+
+VERSION = "1.2.6"
+
+##
+# Internal element class. This class defines the Element interface,
+# and provides a reference implementation of this interface.
+# <p>
+# You should not create instances of this class directly. Use the
+# appropriate factory functions instead, such as {@link #Element}
+# and {@link #SubElement}.
+#
+# @see Element
+# @see SubElement
+# @see Comment
+# @see ProcessingInstruction
+
+class _ElementInterface:
+ # <tag attrib>text<child/>...</tag>tail
+
+ ##
+ # (Attribute) Element tag.
+
+ tag = None
+
+ ##
+ # (Attribute) Element attribute dictionary. Where possible, use
+ # {@link #_ElementInterface.get},
+ # {@link #_ElementInterface.set},
+ # {@link #_ElementInterface.keys}, and
+ # {@link #_ElementInterface.items} to access
+ # element attributes.
+
+ attrib = None
+
+ ##
+ # (Attribute) Text before first subelement. This is either a
+ # string or the value None, if there was no text.
+
+ text = None
+
+ ##
+ # (Attribute) Text after this element's end tag, but before the
+ # next sibling element's start tag. This is either a string or
+ # the value None, if there was no text.
+
+ tail = None # text after end tag, if any
+
+ def __init__(self, tag, attrib):
+ self.tag = tag
+ self.attrib = attrib
+ self._children = []
+
+ def __repr__(self):
+ return "<Element %s at %x>" % (self.tag, id(self))
+
+ ##
+ # Creates a new element object of the same type as this element.
+ #
+ # @param tag Element tag.
+ # @param attrib Element attributes, given as a dictionary.
+ # @return A new element instance.
+
+ def makeelement(self, tag, attrib):
+ return Element(tag, attrib)
+
+ ##
+ # Returns the number of subelements.
+ #
+ # @return The number of subelements.
+
+ def __len__(self):
+ return len(self._children)
+
+ ##
+ # Returns the given subelement.
+ #
+ # @param index What subelement to return.
+ # @return The given subelement.
+ # @exception IndexError If the given element does not exist.
+
+ def __getitem__(self, index):
+ return self._children[index]
+
+ ##
+ # Replaces the given subelement.
+ #
+ # @param index What subelement to replace.
+ # @param element The new element value.
+ # @exception IndexError If the given element does not exist.
+ # @exception AssertionError If element is not a valid object.
+
+ def __setitem__(self, index, element):
+ assert iselement(element)
+ self._children[index] = element
+
+ ##
+ # Deletes the given subelement.
+ #
+ # @param index What subelement to delete.
+ # @exception IndexError If the given element does not exist.
+
+ def __delitem__(self, index):
+ del self._children[index]
+
+ ##
+ # Returns a list containing subelements in the given range.
+ #
+ # @param start The first subelement to return.
+ # @param stop The first subelement that shouldn't be returned.
+ # @return A sequence object containing subelements.
+
+ def __getslice__(self, start, stop):
+ return self._children[start:stop]
+
+ ##
+ # Replaces a number of subelements with elements from a sequence.
+ #
+ # @param start The first subelement to replace.
+ # @param stop The first subelement that shouldn't be replaced.
+ # @param elements A sequence object with zero or more elements.
+ # @exception AssertionError If a sequence member is not a valid object.
+
+ def __setslice__(self, start, stop, elements):
+ for element in elements:
+ assert iselement(element)
+ self._children[start:stop] = list(elements)
+
+ ##
+ # Deletes a number of subelements.
+ #
+ # @param start The first subelement to delete.
+ # @param stop The first subelement to leave in there.
+
+ def __delslice__(self, start, stop):
+ del self._children[start:stop]
+
+ ##
+ # Adds a subelement to the end of this element.
+ #
+ # @param element The element to add.
+ # @exception AssertionError If a sequence member is not a valid object.
+
+ def append(self, element):
+ assert iselement(element)
+ self._children.append(element)
+
+ ##
+ # Inserts a subelement at the given position in this element.
+ #
+ # @param index Where to insert the new subelement.
+ # @exception AssertionError If the element is not a valid object.
+
+ def insert(self, index, element):
+ assert iselement(element)
+ self._children.insert(index, element)
+
+ ##
+ # Removes a matching subelement. Unlike the <b>find</b> methods,
+ # this method compares elements based on identity, not on tag
+ # value or contents.
+ #
+ # @param element What element to remove.
+ # @exception ValueError If a matching element could not be found.
+ # @exception AssertionError If the element is not a valid object.
+
+ def remove(self, element):
+ assert iselement(element)
+ self._children.remove(element)
+
+ ##
+ # Returns all subelements. The elements are returned in document
+ # order.
+ #
+ # @return A list of subelements.
+ # @defreturn list of Element instances
+
+ def getchildren(self):
+ return self._children
+
+ ##
+ # Finds the first matching subelement, by tag name or path.
+ #
+ # @param path What element to look for.
+ # @return The first matching element, or None if no element was found.
+ # @defreturn Element or None
+
+ def find(self, path):
+ return ElementPath.find(self, path)
+
+ ##
+ # Finds text for the first matching subelement, by tag name or path.
+ #
+ # @param path What element to look for.
+ # @param default What to return if the element was not found.
+ # @return The text content of the first matching element, or the
+ # default value no element was found. Note that if the element
+ # has is found, but has no text content, this method returns an
+ # empty string.
+ # @defreturn string
+
+ def findtext(self, path, default=None):
+ return ElementPath.findtext(self, path, default)
+
+ ##
+ # Finds all matching subelements, by tag name or path.
+ #
+ # @param path What element to look for.
+ # @return A list or iterator containing all matching elements,
+ # in document order.
+ # @defreturn list of Element instances
+
+ def findall(self, path):
+ return ElementPath.findall(self, path)
+
+ ##
+ # Resets an element. This function removes all subelements, clears
+ # all attributes, and sets the text and tail attributes to None.
+
+ def clear(self):
+ self.attrib.clear()
+ self._children = []
+ self.text = self.tail = None
+
+ ##
+ # Gets an element attribute.
+ #
+ # @param key What attribute to look for.
+ # @param default What to return if the attribute was not found.
+ # @return The attribute value, or the default value, if the
+ # attribute was not found.
+ # @defreturn string or None
+
+ def get(self, key, default=None):
+ return self.attrib.get(key, default)
+
+ ##
+ # Sets an element attribute.
+ #
+ # @param key What attribute to set.
+ # @param value The attribute value.
+
+ def set(self, key, value):
+ self.attrib[key] = value
+
+ ##
+ # Gets a list of attribute names. The names are returned in an
+ # arbitrary order (just like for an ordinary Python dictionary).
+ #
+ # @return A list of element attribute names.
+ # @defreturn list of strings
+
+ def keys(self):
+ return self.attrib.keys()
+
+ ##
+ # Gets element attributes, as a sequence. The attributes are
+ # returned in an arbitrary order.
+ #
+ # @return A list of (name, value) tuples for all attributes.
+ # @defreturn list of (string, string) tuples
+
+ def items(self):
+ return self.attrib.items()
+
+ ##
+ # Creates a tree iterator. The iterator loops over this element
+ # and all subelements, in document order, and returns all elements
+ # with a matching tag.
+ # <p>
+ # If the tree structure is modified during iteration, the result
+ # is undefined.
+ #
+ # @param tag What tags to look for (default is to return all elements).
+ # @return A list or iterator containing all the matching elements.
+ # @defreturn list or iterator
+
+ def getiterator(self, tag=None):
+ nodes = []
+ if tag == "*":
+ tag = None
+ if tag is None or self.tag == tag:
+ nodes.append(self)
+ for node in self._children:
+ nodes.extend(node.getiterator(tag))
+ return nodes
+
+# compatibility
+_Element = _ElementInterface
+
+##
+# Element factory. This function returns an object implementing the
+# standard Element interface. The exact class or type of that object
+# is implementation dependent, but it will always be compatible with
+# the {@link #_ElementInterface} class in this module.
+# <p>
+# The element name, attribute names, and attribute values can be
+# either 8-bit ASCII strings or Unicode strings.
+#
+# @param tag The element name.
+# @param attrib An optional dictionary, containing element attributes.
+# @param **extra Additional attributes, given as keyword arguments.
+# @return An element instance.
+# @defreturn Element
+
+def Element(tag, attrib={}, **extra):
+ attrib = attrib.copy()
+ attrib.update(extra)
+ return _ElementInterface(tag, attrib)
+
+##
+# Subelement factory. This function creates an element instance, and
+# appends it to an existing element.
+# <p>
+# The element name, attribute names, and attribute values can be
+# either 8-bit ASCII strings or Unicode strings.
+#
+# @param parent The parent element.
+# @param tag The subelement name.
+# @param attrib An optional dictionary, containing element attributes.
+# @param **extra Additional attributes, given as keyword arguments.
+# @return An element instance.
+# @defreturn Element
+
+def SubElement(parent, tag, attrib={}, **extra):
+ attrib = attrib.copy()
+ attrib.update(extra)
+ element = parent.makeelement(tag, attrib)
+ parent.append(element)
+ return element
+
+##
+# Comment element factory. This factory function creates a special
+# element that will be serialized as an XML comment.
+# <p>
+# The comment string can be either an 8-bit ASCII string or a Unicode
+# string.
+#
+# @param text A string containing the comment string.
+# @return An element instance, representing a comment.
+# @defreturn Element
+
+def Comment(text=None):
+ element = Element(Comment)
+ element.text = text
+ return element
+
+##
+# PI element factory. This factory function creates a special element
+# that will be serialized as an XML processing instruction.
+#
+# @param target A string containing the PI target.
+# @param text A string containing the PI contents, if any.
+# @return An element instance, representing a PI.
+# @defreturn Element
+
+def ProcessingInstruction(target, text=None):
+ element = Element(ProcessingInstruction)
+ element.text = target
+ if text:
+ element.text = element.text + " " + text
+ return element
+
+PI = ProcessingInstruction
+
+##
+# QName wrapper. This can be used to wrap a QName attribute value, in
+# order to get proper namespace handling on output.
+#
+# @param text A string containing the QName value, in the form {uri}local,
+# or, if the tag argument is given, the URI part of a QName.
+# @param tag Optional tag. If given, the first argument is interpreted as
+# an URI, and this argument is interpreted as a local name.
+# @return An opaque object, representing the QName.
+
+class QName:
+ def __init__(self, text_or_uri, tag=None):
+ if tag:
+ text_or_uri = "{%s}%s" % (text_or_uri, tag)
+ self.text = text_or_uri
+ def __str__(self):
+ return self.text
+ def __hash__(self):
+ return hash(self.text)
+ def __cmp__(self, other):
+ if isinstance(other, QName):
+ return cmp(self.text, other.text)
+ return cmp(self.text, other)
+
+##
+# ElementTree wrapper class. This class represents an entire element
+# hierarchy, and adds some extra support for serialization to and from
+# standard XML.
+#
+# @param element Optional root element.
+# @keyparam file Optional file handle or name. If given, the
+# tree is initialized with the contents of this XML file.
+
+class ElementTree:
+
+ def __init__(self, element=None, file=None):
+ assert element is None or iselement(element)
+ self._root = element # first node
+ if file:
+ self.parse(file)
+
+ ##
+ # Gets the root element for this tree.
+ #
+ # @return An element instance.
+ # @defreturn Element
+
+ def getroot(self):
+ return self._root
+
+ ##
+ # Replaces the root element for this tree. This discards the
+ # current contents of the tree, and replaces it with the given
+ # element. Use with care.
+ #
+ # @param element An element instance.
+
+ def _setroot(self, element):
+ assert iselement(element)
+ self._root = element
+
+ ##
+ # Loads an external XML document into this element tree.
+ #
+ # @param source A file name or file object.
+ # @param parser An optional parser instance. If not given, the
+ # standard {@link XMLTreeBuilder} parser is used.
+ # @return The document root element.
+ # @defreturn Element
+
+ def parse(self, source, parser=None):
+ if not hasattr(source, "read"):
+ source = open(source, "rb")
+ if not parser:
+ parser = XMLTreeBuilder()
+ while 1:
+ data = source.read(32768)
+ if not data:
+ break
+ parser.feed(data)
+ self._root = parser.close()
+ return self._root
+
+ ##
+ # Creates a tree iterator for the root element. The iterator loops
+ # over all elements in this tree, in document order.
+ #
+ # @param tag What tags to look for (default is to return all elements)
+ # @return An iterator.
+ # @defreturn iterator
+
+ def getiterator(self, tag=None):
+ assert self._root is not None
+ return self._root.getiterator(tag)
+
+ ##
+ # Finds the first toplevel element with given tag.
+ # Same as getroot().find(path).
+ #
+ # @param path What element to look for.
+ # @return The first matching element, or None if no element was found.
+ # @defreturn Element or None
+
+ def find(self, path):
+ assert self._root is not None
+ if path[:1] == "/":
+ path = "." + path
+ return self._root.find(path)
+
+ ##
+ # Finds the element text for the first toplevel element with given
+ # tag. Same as getroot().findtext(path).
+ #
+ # @param path What toplevel element to look for.
+ # @param default What to return if the element was not found.
+ # @return The text content of the first matching element, or the
+ # default value no element was found. Note that if the element
+ # has is found, but has no text content, this method returns an
+ # empty string.
+ # @defreturn string
+
+ def findtext(self, path, default=None):
+ assert self._root is not None
+ if path[:1] == "/":
+ path = "." + path
+ return self._root.findtext(path, default)
+
+ ##
+ # Finds all toplevel elements with the given tag.
+ # Same as getroot().findall(path).
+ #
+ # @param path What element to look for.
+ # @return A list or iterator containing all matching elements,
+ # in document order.
+ # @defreturn list of Element instances
+
+ def findall(self, path):
+ assert self._root is not None
+ if path[:1] == "/":
+ path = "." + path
+ return self._root.findall(path)
+
+ ##
+ # Writes the element tree to a file, as XML.
+ #
+ # @param file A file name, or a file object opened for writing.
+ # @param encoding Optional output encoding (default is US-ASCII).
+
+ def write(self, file, encoding="us-ascii"):
+ assert self._root is not None
+ if not hasattr(file, "write"):
+ file = open(file, "wb")
+ if not encoding:
+ encoding = "us-ascii"
+ elif encoding != "utf-8" and encoding != "us-ascii":
+ file.write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
+ self._write(file, self._root, encoding, {})
+
+ def _write(self, file, node, encoding, namespaces):
+ # write XML to file
+ tag = node.tag
+ if tag is Comment:
+ file.write("<!-- %s -->" % _escape_cdata(node.text, encoding))
+ elif tag is ProcessingInstruction:
+ file.write("<?%s?>" % _escape_cdata(node.text, encoding))
+ else:
+ items = node.items()
+ xmlns_items = [] # new namespaces in this scope
+ try:
+ if isinstance(tag, QName) or tag[:1] == "{":
+ tag, xmlns = fixtag(tag, namespaces)
+ if xmlns: xmlns_items.append(xmlns)
+ except TypeError:
+ _raise_serialization_error(tag)
+ file.write("<" + _encode(tag, encoding))
+ if items or xmlns_items:
+ items.sort() # lexical order
+ for k, v in items:
+ try:
+ if isinstance(k, QName) or k[:1] == "{":
+ k, xmlns = fixtag(k, namespaces)
+ if xmlns: xmlns_items.append(xmlns)
+ except TypeError:
+ _raise_serialization_error(k)
+ try:
+ if isinstance(v, QName):
+ v, xmlns = fixtag(v, namespaces)
+ if xmlns: xmlns_items.append(xmlns)
+ except TypeError:
+ _raise_serialization_error(v)
+ file.write(" %s=\"%s\"" % (_encode(k, encoding),
+ _escape_attrib(v, encoding)))
+ for k, v in xmlns_items:
+ file.write(" %s=\"%s\"" % (_encode(k, encoding),
+ _escape_attrib(v, encoding)))
+ if node.text or len(node):
+ file.write(">")
+ if node.text:
+ file.write(_escape_cdata(node.text, encoding))
+ for n in node:
+ self._write(file, n, encoding, namespaces)
+ file.write("</" + _encode(tag, encoding) + ">")
+ else:
+ file.write(" />")
+ for k, v in xmlns_items:
+ del namespaces[v]
+ if node.tail:
+ file.write(_escape_cdata(node.tail, encoding))
+
+# --------------------------------------------------------------------
+# helpers
+
+##
+# Checks if an object appears to be a valid element object.
+#
+# @param An element instance.
+# @return A true value if this is an element object.
+# @defreturn flag
+
+def iselement(element):
+ # FIXME: not sure about this; might be a better idea to look
+ # for tag/attrib/text attributes
+ return isinstance(element, _ElementInterface) or hasattr(element, "tag")
+
+##
+# Writes an element tree or element structure to sys.stdout. This
+# function should be used for debugging only.
+# <p>
+# The exact output format is implementation dependent. In this
+# version, it's written as an ordinary XML file.
+#
+# @param elem An element tree or an individual element.
+
+def dump(elem):
+ # debugging
+ if not isinstance(elem, ElementTree):
+ elem = ElementTree(elem)
+ elem.write(sys.stdout)
+ tail = elem.getroot().tail
+ if not tail or tail[-1] != "\n":
+ sys.stdout.write("\n")
+
+def _encode(s, encoding):
+ try:
+ return s.encode(encoding)
+ except AttributeError:
+ return s # 1.5.2: assume the string uses the right encoding
+
+if sys.version[:3] == "1.5":
+ _escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
+else:
+ _escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
+
+_escape_map = {
+ "&": "&amp;",
+ "<": "&lt;",
+ ">": "&gt;",
+ '"': "&quot;",
+}
+
+_namespace_map = {
+ # "well-known" namespace prefixes
+ "http://www.w3.org/XML/1998/namespace": "xml",
+ "http://www.w3.org/1999/xhtml": "html",
+ "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
+ "http://schemas.xmlsoap.org/wsdl/": "wsdl",
+}
+
+def _raise_serialization_error(text):
+ raise TypeError(
+ "cannot serialize %r (type %s)" % (text, type(text).__name__)
+ )
+
+def _encode_entity(text, pattern=_escape):
+ # map reserved and non-ascii characters to numerical entities
+ def escape_entities(m, map=_escape_map):
+ out = []
+ append = out.append
+ for char in m.group():
+ text = map.get(char)
+ if text is None:
+ text = "&#%d;" % ord(char)
+ append(text)
+ return string.join(out, "")
+ try:
+ return _encode(pattern.sub(escape_entities, text), "ascii")
+ except TypeError:
+ _raise_serialization_error(text)
+
+#
+# the following functions assume an ascii-compatible encoding
+# (or "utf-16")
+
+def _escape_cdata(text, encoding=None, replace=string.replace):
+ # escape character data
+ try:
+ if encoding:
+ try:
+ text = _encode(text, encoding)
+ except UnicodeError:
+ return _encode_entity(text)
+ text = replace(text, "&", "&amp;")
+ text = replace(text, "<", "&lt;")
+ text = replace(text, ">", "&gt;")
+ return text
+ except (TypeError, AttributeError):
+ _raise_serialization_error(text)
+
+def _escape_attrib(text, encoding=None, replace=string.replace):
+ # escape attribute value
+ try:
+ if encoding:
+ try:
+ text = _encode(text, encoding)
+ except UnicodeError:
+ return _encode_entity(text)
+ text = replace(text, "&", "&amp;")
+ text = replace(text, "'", "&apos;") # FIXME: overkill
+ text = replace(text, "\"", "&quot;")
+ text = replace(text, "<", "&lt;")
+ text = replace(text, ">", "&gt;")
+ return text
+ except (TypeError, AttributeError):
+ _raise_serialization_error(text)
+
+def fixtag(tag, namespaces):
+ # given a decorated tag (of the form {uri}tag), return prefixed
+ # tag and namespace declaration, if any
+ if isinstance(tag, QName):
+ tag = tag.text
+ namespace_uri, tag = string.split(tag[1:], "}", 1)
+ prefix = namespaces.get(namespace_uri)
+ if prefix is None:
+ prefix = _namespace_map.get(namespace_uri)
+ if prefix is None:
+ prefix = "ns%d" % len(namespaces)
+ namespaces[namespace_uri] = prefix
+ if prefix == "xml":
+ xmlns = None
+ else:
+ xmlns = ("xmlns:%s" % prefix, namespace_uri)
+ else:
+ xmlns = None
+ return "%s:%s" % (prefix, tag), xmlns
+
+##
+# Parses an XML document into an element tree.
+#
+# @param source A filename or file object containing XML data.
+# @param parser An optional parser instance. If not given, the
+# standard {@link XMLTreeBuilder} parser is used.
+# @return An ElementTree instance
+
+def parse(source, parser=None):
+ tree = ElementTree()
+ tree.parse(source, parser)
+ return tree
+
+##
+# Parses an XML document into an element tree incrementally, and reports
+# what's going on to the user.
+#
+# @param source A filename or file object containing XML data.
+# @param events A list of events to report back. If omitted, only "end"
+# events are reported.
+# @return A (event, elem) iterator.
+
+class iterparse:
+
+ def __init__(self, source, events=None):
+ if not hasattr(source, "read"):
+ source = open(source, "rb")
+ self._file = source
+ self._events = []
+ self._index = 0
+ self.root = self._root = None
+ self._parser = XMLTreeBuilder()
+ # wire up the parser for event reporting
+ parser = self._parser._parser
+ append = self._events.append
+ if events is None:
+ events = ["end"]
+ for event in events:
+ if event == "start":
+ try:
+ parser.ordered_attributes = 1
+ parser.specified_attributes = 1
+ def handler(tag, attrib_in, event=event, append=append,
+ start=self._parser._start_list):
+ append((event, start(tag, attrib_in)))
+ parser.StartElementHandler = handler
+ except AttributeError:
+ def handler(tag, attrib_in, event=event, append=append,
+ start=self._parser._start):
+ append((event, start(tag, attrib_in)))
+ parser.StartElementHandler = handler
+ elif event == "end":
+ def handler(tag, event=event, append=append,
+ end=self._parser._end):
+ append((event, end(tag)))
+ parser.EndElementHandler = handler
+ elif event == "start-ns":
+ def handler(prefix, uri, event=event, append=append):
+ try:
+ uri = _encode(uri, "ascii")
+ except UnicodeError:
+ pass
+ append((event, (prefix or "", uri)))
+ parser.StartNamespaceDeclHandler = handler
+ elif event == "end-ns":
+ def handler(prefix, event=event, append=append):
+ append((event, None))
+ parser.EndNamespaceDeclHandler = handler
+
+ def next(self):
+ while 1:
+ try:
+ item = self._events[self._index]
+ except IndexError:
+ if self._parser is None:
+ self.root = self._root
+ try:
+ raise StopIteration
+ except NameError:
+ raise IndexError
+ # load event buffer
+ del self._events[:]
+ self._index = 0
+ data = self._file.read(16384)
+ if data:
+ self._parser.feed(data)
+ else:
+ self._root = self._parser.close()
+ self._parser = None
+ else:
+ self._index = self._index + 1
+ return item
+
+ try:
+ iter
+ def __iter__(self):
+ return self
+ except NameError:
+ def __getitem__(self, index):
+ return self.next()
+
+##
+# Parses an XML document from a string constant. This function can
+# be used to embed "XML literals" in Python code.
+#
+# @param source A string containing XML data.
+# @return An Element instance.
+# @defreturn Element
+
+def XML(text):
+ parser = XMLTreeBuilder()
+ parser.feed(text)
+ return parser.close()
+
+##
+# Parses an XML document from a string constant, and also returns
+# a dictionary which maps from element id:s to elements.
+#
+# @param source A string containing XML data.
+# @return A tuple containing an Element instance and a dictionary.
+# @defreturn (Element, dictionary)
+
+def XMLID(text):
+ parser = XMLTreeBuilder()
+ parser.feed(text)
+ tree = parser.close()
+ ids = {}
+ for elem in tree.getiterator():
+ id = elem.get("id")
+ if id:
+ ids[id] = elem
+ return tree, ids
+
+##
+# Parses an XML document from a string constant. Same as {@link #XML}.
+#
+# @def fromstring(text)
+# @param source A string containing XML data.
+# @return An Element instance.
+# @defreturn Element
+
+fromstring = XML
+
+##
+# Generates a string representation of an XML element, including all
+# subelements.
+#
+# @param element An Element instance.
+# @return An encoded string containing the XML data.
+# @defreturn string
+
+def tostring(element, encoding=None):
+ class dummy:
+ pass
+ data = []
+ file = dummy()
+ file.write = data.append
+ ElementTree(element).write(file, encoding)
+ return string.join(data, "")
+
+##
+# Generic element structure builder. This builder converts a sequence
+# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
+# #TreeBuilder.end} method calls to a well-formed element structure.
+# <p>
+# You can use this class to build an element structure using a custom XML
+# parser, or a parser for some other XML-like format.
+#
+# @param element_factory Optional element factory. This factory
+# is called to create new Element instances, as necessary.
+
+class TreeBuilder:
+
+ def __init__(self, element_factory=None):
+ self._data = [] # data collector
+ self._elem = [] # element stack
+ self._last = None # last element
+ self._tail = None # true if we're after an end tag
+ if element_factory is None:
+ element_factory = _ElementInterface
+ self._factory = element_factory
+
+ ##
+ # Flushes the parser buffers, and returns the toplevel documen
+ # element.
+ #
+ # @return An Element instance.
+ # @defreturn Element
+
+ def close(self):
+ assert len(self._elem) == 0, "missing end tags"
+ assert self._last is not None, "missing toplevel element"
+ return self._last
+
+ def _flush(self):
+ if self._data:
+ if self._last is not None:
+ text = string.join(self._data, "")
+ if self._tail:
+ assert self._last.tail is None, "internal error (tail)"
+ self._last.tail = text
+ else:
+ assert self._last.text is None, "internal error (text)"
+ self._last.text = text
+ self._data = []
+
+ ##
+ # Adds text to the current element.
+ #
+ # @param data A string. This should be either an 8-bit string
+ # containing ASCII text, or a Unicode string.
+
+ def data(self, data):
+ self._data.append(data)
+
+ ##
+ # Opens a new element.
+ #
+ # @param tag The element name.
+ # @param attrib A dictionary containing element attributes.
+ # @return The opened element.
+ # @defreturn Element
+
+ def start(self, tag, attrs):
+ self._flush()
+ self._last = elem = self._factory(tag, attrs)
+ if self._elem:
+ self._elem[-1].append(elem)
+ self._elem.append(elem)
+ self._tail = 0
+ return elem
+
+ ##
+ # Closes the current element.
+ #
+ # @param tag The element name.
+ # @return The closed element.
+ # @defreturn Element
+
+ def end(self, tag):
+ self._flush()
+ self._last = self._elem.pop()
+ assert self._last.tag == tag,\
+ "end tag mismatch (expected %s, got %s)" % (
+ self._last.tag, tag)
+ self._tail = 1
+ return self._last
+
+##
+# Element structure builder for XML source data, based on the
+# <b>expat</b> parser.
+#
+# @keyparam target Target object. If omitted, the builder uses an
+# instance of the standard {@link #TreeBuilder} class.
+# @keyparam html Predefine HTML entities. This flag is not supported
+# by the current implementation.
+# @see #ElementTree
+# @see #TreeBuilder
+
+class XMLTreeBuilder:
+
+ def __init__(self, html=0, target=None):
+ try:
+ from xml.parsers import expat
+ except ImportError:
+ raise ImportError(
+ "No module named expat; use SimpleXMLTreeBuilder instead"
+ )
+ self._parser = parser = expat.ParserCreate(None, "}")
+ if target is None:
+ target = TreeBuilder()
+ self._target = target
+ self._names = {} # name memo cache
+ # callbacks
+ parser.DefaultHandlerExpand = self._default
+ parser.StartElementHandler = self._start
+ parser.EndElementHandler = self._end
+ parser.CharacterDataHandler = self._data
+ # let expat do the buffering, if supported
+ try:
+ self._parser.buffer_text = 1
+ except AttributeError:
+ pass
+ # use new-style attribute handling, if supported
+ try:
+ self._parser.ordered_attributes = 1
+ self._parser.specified_attributes = 1
+ parser.StartElementHandler = self._start_list
+ except AttributeError:
+ pass
+ encoding = None
+ if not parser.returns_unicode:
+ encoding = "utf-8"
+ # target.xml(encoding, None)
+ self._doctype = None
+ self.entity = {}
+
+ def _fixtext(self, text):
+ # convert text string to ascii, if possible
+ try:
+ return _encode(text, "ascii")
+ except UnicodeError:
+ return text
+
+ def _fixname(self, key):
+ # expand qname, and convert name string to ascii, if possible
+ try:
+ name = self._names[key]
+ except KeyError:
+ name = key
+ if "}" in name:
+ name = "{" + name
+ self._names[key] = name = self._fixtext(name)
+ return name
+
+ def _start(self, tag, attrib_in):
+ fixname = self._fixname
+ tag = fixname(tag)
+ attrib = {}
+ for key, value in attrib_in.items():
+ attrib[fixname(key)] = self._fixtext(value)
+ return self._target.start(tag, attrib)
+
+ def _start_list(self, tag, attrib_in):
+ fixname = self._fixname
+ tag = fixname(tag)
+ attrib = {}
+ if attrib_in:
+ for i in xrange(0, len(attrib_in), 2):
+ attrib[fixname(attrib_in[i])] = self._fixtext(attrib_in[i+1])
+ return self._target.start(tag, attrib)
+
+ def _data(self, text):
+ return self._target.data(self._fixtext(text))
+
+ def _end(self, tag):
+ return self._target.end(self._fixname(tag))
+
+ def _default(self, text):
+ prefix = text[:1]
+ if prefix == "&":
+ # deal with undefined entities
+ try:
+ self._target.data(self.entity[text[1:-1]])
+ except KeyError:
+ from xml.parsers import expat
+ raise expat.error(
+ "undefined entity %s: line %d, column %d" %
+ (text, self._parser.ErrorLineNumber,
+ self._parser.ErrorColumnNumber)
+ )
+ elif prefix == "<" and text[:9] == "<!DOCTYPE":
+ self._doctype = [] # inside a doctype declaration
+ elif self._doctype is not None:
+ # parse doctype contents
+ if prefix == ">":
+ self._doctype = None
+ return
+ text = string.strip(text)
+ if not text:
+ return
+ self._doctype.append(text)
+ n = len(self._doctype)
+ if n > 2:
+ type = self._doctype[1]
+ if type == "PUBLIC" and n == 4:
+ name, type, pubid, system = self._doctype
+ elif type == "SYSTEM" and n == 3:
+ name, type, system = self._doctype
+ pubid = None
+ else:
+ return
+ if pubid:
+ pubid = pubid[1:-1]
+ self.doctype(name, pubid, system[1:-1])
+ self._doctype = None
+
+ ##
+ # Handles a doctype declaration.
+ #
+ # @param name Doctype name.
+ # @param pubid Public identifier.
+ # @param system System identifier.
+
+ def doctype(self, name, pubid, system):
+ pass
+
+ ##
+ # Feeds data to the parser.
+ #
+ # @param data Encoded data.
+
+ def feed(self, data):
+ self._parser.Parse(data, 0)
+
+ ##
+ # Finishes feeding data to the parser.
+ #
+ # @return An element structure.
+ # @defreturn Element
+
+ def close(self):
+ self._parser.Parse("", 1) # end of data
+ tree = self._target.close()
+ del self._target, self._parser # get rid of circular references
+ return tree