1 # Copyright 1998-2007 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
3 # $Id: vartree.py 11392 2008-08-10 10:30:20Z zmedico $
5 __all__
= ["PreservedLibsRegistry", "LinkageMap",
6 "vardbapi", "vartree", "dblink"] + \
7 ["write_contents", "tar_contents"]
9 from portage
.checksum
import perform_md5
10 from portage
.const
import CACHE_PATH
, CONFIG_MEMORY_FILE
, PORTAGE_BIN_PATH
, \
11 PRIVATE_PATH
, VDB_PATH
12 from portage
.data
import portage_gid
, portage_uid
, secpass
13 from portage
.dbapi
import dbapi
14 from portage
.dep
import dep_getslot
, use_reduce
, paren_reduce
, isvalidatom
, \
15 isjustname
, dep_getkey
, match_from_list
16 from portage
.exception
import InvalidAtom
, InvalidData
, InvalidPackageName
, \
17 FileNotFound
, PermissionDenied
, UnsupportedAPIException
18 from portage
.locks
import lockdir
, unlockdir
19 from portage
.output
import bold
, red
, green
20 from portage
.update
import fixdbentries
21 from portage
.util
import apply_secpass_permissions
, ConfigProtect
, ensure_dirs
, \
22 writemsg
, writemsg_stdout
, writemsg_level
, \
23 write_atomic
, atomic_ofstream
, writedict
, \
24 grabfile
, grabdict
, normalize_path
, new_protect_filename
, getlibpaths
25 from portage
.versions
import pkgsplit
, catpkgsplit
, catsplit
, best
, pkgcmp
27 from portage
import listdir
, dep_expand
, flatten
, key_expand
, \
28 doebuild_environment
, doebuild
, env_update
, prepare_build_dirs
, \
29 abssymlink
, movefile
, _movefile
, bsd_chflags
, cpv_getkey
31 from portage
.elog
import elog_process
32 from portage
.elog
.filtering
import filter_mergephases
, filter_unmergephases
34 import os
, re
, sys
, stat
, errno
, commands
, copy
, time
, subprocess
37 from itertools
import izip
42 import pickle
as cPickle
44 class PreservedLibsRegistry(object):
45 """ This class handles the tracking of preserved library objects """
46 def __init__(self
, filename
, autocommit
=True):
47 """ @param filename: absolute path for saving the preserved libs records
48 @type filename: String
49 @param autocommit: determines if the file is written after every update
50 @type autocommit: Boolean
52 self
._filename
= filename
53 self
._autocommit
= autocommit
55 self
.pruneNonExisting()
58 """ Reload the registry data from file """
60 self
._data
= cPickle
.load(open(self
._filename
, "r"))
61 except (EOFError, IOError), e
:
62 if isinstance(e
, EOFError) or e
.errno
== errno
.ENOENT
:
64 elif e
.errno
== PermissionDenied
.errno
:
65 raise PermissionDenied(self
._filename
)
70 """ Store the registry data to file. No need to call this if autocommit
73 if os
.environ
.get("SANDBOX_ON") == "1":
76 f
= atomic_ofstream(self
._filename
)
77 cPickle
.dump(self
._data
, f
)
79 except EnvironmentError, e
:
80 if e
.errno
!= PermissionDenied
.errno
:
81 writemsg("!!! %s %s\n" % (e
, self
._filename
), noiselevel
=-1)
83 def register(self
, cpv
, slot
, counter
, paths
):
84 """ Register new objects in the registry. If there is a record with the
85 same packagename (internally derived from cpv) and slot it is
86 overwritten with the new data.
87 @param cpv: package instance that owns the objects
88 @type cpv: CPV (as String)
89 @param slot: the value of SLOT of the given package instance
91 @param counter: vdb counter value for the package instace
92 @type counter: Integer
93 @param paths: absolute paths of objects that got preserved during an update
96 cp
= "/".join(catpkgsplit(cpv
)[:2])
98 if len(paths
) == 0 and cps
in self
._data \
99 and self
._data
[cps
][0] == cpv
and int(self
._data
[cps
][1]) == int(counter
):
102 self
._data
[cps
] = (cpv
, counter
, paths
)
106 def unregister(self
, cpv
, slot
, counter
):
107 """ Remove a previous registration of preserved objects for the given package.
108 @param cpv: package instance whose records should be removed
109 @type cpv: CPV (as String)
110 @param slot: the value of SLOT of the given package instance
113 self
.register(cpv
, slot
, counter
, [])
115 def pruneNonExisting(self
):
116 """ Remove all records for objects that no longer exist on the filesystem. """
117 for cps
in self
._data
.keys():
118 cpv
, counter
, paths
= self
._data
[cps
]
119 paths
= [f
for f
in paths
if os
.path
.exists(f
)]
121 self
._data
[cps
] = (cpv
, counter
, paths
)
127 def hasEntries(self
):
128 """ Check if this registry contains any records. """
129 return len(self
._data
) > 0
131 def getPreservedLibs(self
):
132 """ Return a mapping of packages->preserved objects.
133 @returns mapping of package instances to preserved objects
134 @rtype Dict cpv->list-of-paths
137 for cps
in self
._data
:
138 rValue
[self
._data
[cps
][0]] = self
._data
[cps
][2]
141 class LinkageMap(object):
143 """Models dynamic linker dependencies."""
145 def __init__(self
, vardbapi
):
146 self
._dbapi
= vardbapi
148 self
._obj
_properties
= {}
149 self
._defpath
= set(getlibpaths())
150 self
._obj
_key
_cache
= {}
152 class _ObjectKey(object):
154 """Helper class used as _obj_properties keys for objects."""
156 def __init__(self
, object):
158 This takes a path to an object.
160 @param object: path to a file
161 @type object: string (example: '/usr/bin/bar')
164 self
._key
= self
._generate
_object
_key
(object)
167 return hash(self
._key
)
169 def __eq__(self
, other
):
170 return self
._key
== other
._key
172 def __ne__(self
, other
):
173 return self
._key
!= other
._key
175 def _generate_object_key(self
, object):
177 Generate object key for a given object.
179 @param object: path to a file
180 @type object: string (example: '/usr/bin/bar')
181 @rtype: 2-tuple of types (long, int) if object exists. string if
182 object does not exist.
184 1. 2-tuple of object's inode and device from a stat call, if object
186 2. realpath of object if object does not exist.
190 object_stat
= os
.stat(object)
192 # Use the realpath as the key if the file does not exists on the
194 return os
.path
.realpath(object)
195 # Return a tuple of the device and inode.
196 return (object_stat
.st_dev
, object_stat
.st_ino
)
198 def file_exists(self
):
200 Determine if the file for this key exists on the filesystem.
204 1. True if the file exists.
205 2. False if the file does not exist or is a broken symlink.
208 return isinstance(self
._key
, tuple)
210 def rebuild(self
, include_file
=None):
215 for cpv
in self
._dbapi
.cpv_all():
216 lines
+= self
._dbapi
.aux_get(cpv
, ["NEEDED.ELF.2"])[0].split('\n')
217 # Cache NEEDED.* files avoid doing excessive IO for every rebuild.
218 self
._dbapi
.flush_cache()
221 lines
+= grabfile(include_file
)
223 # have to call scanelf for preserved libs here as they aren't
224 # registered in NEEDED.ELF.2 files
225 if self
._dbapi
.plib_registry
and self
._dbapi
.plib_registry
.getPreservedLibs():
226 args
= ["/usr/bin/scanelf", "-qF", "%a;%F;%S;%r;%n"]
227 for items
in self
._dbapi
.plib_registry
.getPreservedLibs().values():
228 args
+= [x
.lstrip(".") for x
in items
]
229 proc
= subprocess
.Popen(args
, stdout
=subprocess
.PIPE
)
230 output
= [l
[3:] for l
in proc
.communicate()[0].split("\n")]
236 fields
= l
.strip("\n").split(";")
238 print "Error", fields
239 # insufficient field length
243 obj_key
= self
._ObjectKey
(obj
)
245 path
= set([normalize_path(x
)
246 for x
in filter(None, fields
[3].replace(
247 "${ORIGIN}", os
.path
.dirname(obj
)).replace(
248 "$ORIGIN", os
.path
.dirname(obj
)).split(":"))])
249 needed
= filter(None, fields
[4].split(","))
251 libs
.setdefault(soname
, \
252 {arch
: {"providers": set(), "consumers": set()}})
253 libs
[soname
].setdefault(arch
, \
254 {"providers": set(), "consumers": set()})
255 libs
[soname
][arch
]["providers"].add(obj_key
)
258 {arch
: {"providers": set(), "consumers": set()}})
259 libs
[x
].setdefault(arch
, {"providers": set(), "consumers": set()})
260 libs
[x
][arch
]["consumers"].add(obj_key
)
261 obj_key_cache
.setdefault(obj
, obj_key
)
262 # All object paths are added into the obj_properties tuple
263 obj_properties
.setdefault(obj_key
, \
264 (arch
, needed
, path
, soname
, set()))[4].add(obj
)
267 self
._obj
_properties
= obj_properties
268 self
._obj
_key
_cache
= obj_key_cache
270 def listBrokenBinaries(self
, debug
=False):
272 Find binaries and their needed sonames, which have no providers.
274 @param debug: Boolean to enable debug output
276 @rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
277 @return: The return value is an object -> set-of-sonames mapping, where
278 object is a broken binary and the set consists of sonames needed by
279 object that have no corresponding libraries to fulfill the dependency.
282 class _LibraryCache(object):
285 Caches properties associated with paths.
287 The purpose of this class is to prevent multiple instances of
288 _ObjectKey for the same paths.
292 def __init__(cache_self
):
293 cache_self
.cache
= {}
295 def get(cache_self
, obj
):
297 Caches and returns properties associated with an object.
299 @param obj: absolute path (can be symlink)
300 @type obj: string (example: '/usr/lib/libfoo.so')
301 @rtype: 4-tuple with types
302 (string or None, string or None, 2-tuple, Boolean)
303 @return: 4-tuple with the following components:
304 1. arch as a string or None if it does not exist,
305 2. soname as a string or None if it does not exist,
306 3. obj_key as 2-tuple,
307 4. Boolean representing whether the object exists.
308 (example: ('libfoo.so.1', (123L, 456L), True))
311 if obj
in cache_self
.cache
:
312 return cache_self
.cache
[obj
]
314 if obj
in self
._obj
_key
_cache
:
315 obj_key
= self
._obj
_key
_cache
.get(obj
)
317 obj_key
= self
._ObjectKey
(obj
)
318 # Check that the library exists on the filesystem.
319 if obj_key
.file_exists():
320 # Get the arch and soname from LinkageMap._obj_properties if
321 # it exists. Otherwise, None.
322 arch
, _
, _
, soname
, _
= \
323 self
._obj
_properties
.get(obj_key
, (None,)*5)
324 return cache_self
.cache
.setdefault(obj
, \
325 (arch
, soname
, obj_key
, True))
327 return cache_self
.cache
.setdefault(obj
, \
328 (None, None, obj_key
, False))
331 cache
= _LibraryCache()
332 providers
= self
.listProviders()
334 # Iterate over all obj_keys and their providers.
335 for obj_key
, sonames
in providers
.items():
336 arch
, _
, path
, _
, objs
= self
._obj
_properties
[obj_key
]
337 path
= path
.union(self
._defpath
)
338 # Iterate over each needed soname and the set of library paths that
339 # fulfill the soname to determine if the dependency is broken.
340 for soname
, libraries
in sonames
.items():
341 # validLibraries is used to store libraries, which satisfy soname,
342 # so if no valid libraries are found, the soname is not satisfied
343 # for obj_key. If unsatisfied, objects associated with obj_key
345 validLibraries
= set()
346 # It could be the case that the library to satisfy the soname is
347 # not in the obj's runpath, but a symlink to the library is (eg
348 # libnvidia-tls.so.1 in nvidia-drivers). Also, since LinkageMap
349 # does not catalog symlinks, broken or missing symlinks may go
350 # unnoticed. As a result of these cases, check that a file with
351 # the same name as the soname exists in obj's runpath.
352 # XXX If we catalog symlinks in LinkageMap, this could be improved.
353 for directory
in path
:
354 cachedArch
, cachedSoname
, cachedKey
, cachedExists
= \
355 cache
.get(os
.path
.join(directory
, soname
))
356 # Check that this library provides the needed soname. Doing
357 # this, however, will cause consumers of libraries missing
358 # sonames to be unnecessarily emerged. (eg libmix.so)
359 if cachedSoname
== soname
and cachedArch
== arch
:
360 validLibraries
.add(cachedKey
)
361 if debug
and cachedKey
not in \
362 set(map(self
._obj
_key
_cache
.get
, libraries
)):
363 # XXX This is most often due to soname symlinks not in
364 # a library's directory. We could catalog symlinks in
365 # LinkageMap to avoid checking for this edge case here.
366 print "Found provider outside of findProviders:", \
367 os
.path
.join(directory
, soname
), "->", \
368 self
._obj
_properties
[cachedKey
][4], libraries
369 # A valid library has been found, so there is no need to
372 if debug
and cachedArch
== arch
and \
373 cachedKey
in self
._obj
_properties
:
374 print "Broken symlink or missing/bad soname:", \
375 os
.path
.join(directory
, soname
), '->', \
376 self
._obj
_properties
[cachedKey
], "with soname", \
377 cachedSoname
, "but expecting", soname
378 # This conditional checks if there are no libraries to satisfy the
379 # soname (empty set).
380 if not validLibraries
:
382 rValue
.setdefault(obj
, set()).add(soname
)
383 # If no valid libraries have been found by this point, then
384 # there are no files named with the soname within obj's runpath,
385 # but if there are libraries (from the providers mapping), it is
386 # likely that soname symlinks or the actual libraries are
387 # missing or broken. Thus those libraries are added to rValue
388 # in order to emerge corrupt library packages.
389 for lib
in libraries
:
390 rValue
.setdefault(lib
, set()).add(soname
)
392 if not os
.path
.isfile(lib
):
393 print "Missing library:", lib
395 print "Possibly missing symlink:", \
396 os
.path
.join(os
.path
.dirname(lib
), soname
)
399 def listProviders(self
):
401 Find the providers for all object keys in LinkageMap.
403 @rtype: dict (example:
404 {(123L, 456L): {'libbar.so': set(['/lib/libbar.so.1.5'])}})
405 @return: The return value is an object key -> providers mapping, where
406 providers is a mapping of soname -> set-of-library-paths returned
407 from the findProviders method.
413 # Iterate over all object keys within LinkageMap.
414 for obj_key
in self
._obj
_properties
:
415 rValue
.setdefault(obj_key
, self
.findProviders(obj_key
))
418 def isMasterLink(self
, obj
):
420 Determine whether an object is a master link.
422 @param obj: absolute path to an object
423 @type obj: string (example: '/usr/bin/foo')
426 1. True if obj is a master link
427 2. False if obj is not a master link
430 basename
= os
.path
.basename(obj
)
431 obj_key
= self
._ObjectKey
(obj
)
432 if obj_key
not in self
._obj
_properties
:
433 raise KeyError("%s (%s) not in object list" % (obj_key
, obj
))
434 soname
= self
._obj
_properties
[obj_key
][3]
435 return (len(basename
) < len(soname
))
437 def listLibraryObjects(self
):
439 Return a list of library objects.
441 Known limitation: library objects lacking an soname are not included.
443 @rtype: list of strings
444 @return: list of paths to all providers
450 for soname
in self
._libs
:
451 for arch
in self
._libs
[soname
]:
452 for obj_key
in self
._libs
[soname
][arch
]["providers"]:
453 rValue
.extend(self
._obj
_properties
[obj_key
][4])
456 def getSoname(self
, obj
):
458 Return the soname associated with an object.
460 @param obj: absolute path to an object
461 @type obj: string (example: '/usr/bin/bar')
463 @return: soname as a string
468 if obj
not in self
._obj
_key
_cache
:
469 raise KeyError("%s not in object list" % obj
)
470 return self
._obj
_properties
[self
._obj
_key
_cache
[obj
]][3]
472 def findProviders(self
, obj
):
474 Find providers for an object or object key.
476 This method may be called with a key from _obj_properties.
478 In some cases, not all valid libraries are returned. This may occur when
479 an soname symlink referencing a library is in an object's runpath while
480 the actual library is not. We should consider cataloging symlinks within
481 LinkageMap as this would avoid those cases and would be a better model of
482 library dependencies (since the dynamic linker actually searches for
483 files named with the soname in the runpaths).
485 @param obj: absolute path to an object or a key from _obj_properties
486 @type obj: string (example: '/usr/bin/bar') or _ObjectKey
487 @rtype: dict (example: {'libbar.so': set(['/lib/libbar.so.1.5'])})
488 @return: The return value is a soname -> set-of-library-paths, where
489 set-of-library-paths satisfy soname.
497 # Determine the obj_key from the arguments.
498 if isinstance(obj
, self
._ObjectKey
):
500 if obj_key
not in self
._obj
_properties
:
501 raise KeyError("%s not in object list" % obj_key
)
503 obj_key
= self
._obj
_key
_cache
.get(obj
)
504 if obj_key
not in self
._obj
_properties
:
505 obj_key
= self
._ObjectKey
(obj
)
506 if obj_key
not in self
._obj
_properties
:
507 raise KeyError("%s (%s) not in object list" % (obj_key
, obj
))
509 arch
, needed
, path
, _
, _
= self
._obj
_properties
[obj_key
]
510 path
= path
.union(self
._defpath
)
511 for soname
in needed
:
512 rValue
[soname
] = set()
513 if soname
not in self
._libs
or arch
not in self
._libs
[soname
]:
515 # For each potential provider of the soname, add it to rValue if it
516 # resides in the obj's runpath.
517 for provider_key
in self
._libs
[soname
][arch
]["providers"]:
518 providers
= self
._obj
_properties
[provider_key
][4]
519 for provider
in providers
:
520 if os
.path
.dirname(provider
) in path
:
521 rValue
[soname
].add(provider
)
524 def findConsumers(self
, obj
):
526 Find consumers of an object or object key.
528 This method may be called with a key from _obj_properties.
530 In some cases, not all consumers are returned. This may occur when
531 an soname symlink referencing a library is in an object's runpath while
532 the actual library is not.
534 @param obj: absolute path to an object or a key from _obj_properties
535 @type obj: string (example: '/usr/bin/bar') or _ObjectKey
536 @rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
537 @return: The return value is a soname -> set-of-library-paths, where
538 set-of-library-paths satisfy soname.
546 # Determine the obj_key and the set of objects matching the arguments.
547 if isinstance(obj
, self
._ObjectKey
):
549 if obj_key
not in self
._obj
_properties
:
550 raise KeyError("%s not in object list" % obj_key
)
551 objs
= self
._obj
_properties
[obj_key
][4]
554 obj_key
= self
._obj
_key
_cache
.get(obj
)
555 if obj_key
not in self
._obj
_properties
:
556 obj_key
= self
._ObjectKey
(obj
)
557 if obj_key
not in self
._obj
_properties
:
558 raise KeyError("%s (%s) not in object list" % (obj_key
, obj
))
560 # Determine the directory(ies) from the set of objects.
561 objs_dirs
= set([os
.path
.dirname(x
) for x
in objs
])
563 # If there is another version of this lib with the
564 # same soname and the master link points to that
565 # other version, this lib will be shadowed and won't
566 # have any consumers.
567 soname
= self
._obj
_properties
[obj_key
][3]
568 shadowed_library
= True
569 for obj_dir
in objs_dirs
:
570 master_link
= os
.path
.join(obj_dir
, soname
)
571 master_link_obj_key
= self
._ObjectKey
(master_link
)
572 if obj_key
== master_link_obj_key
:
573 shadowed_library
= False
578 arch
, _
, _
, soname
, _
= self
._obj
_properties
[obj_key
]
579 if soname
in self
._libs
and arch
in self
._libs
[soname
]:
580 # For each potential consumer, add it to rValue if an object from the
581 # arguments resides in the consumer's runpath.
582 for consumer_key
in self
._libs
[soname
][arch
]["consumers"]:
583 _
, _
, path
, _
, consumer_objs
= \
584 self
._obj
_properties
[consumer_key
]
585 path
= path
.union(self
._defpath
)
586 if objs_dirs
.intersection(path
):
587 rValue
.update(consumer_objs
)
590 class vardbapi(dbapi
):
592 _excluded_dirs
= ["CVS", "lost+found"]
593 _excluded_dirs
= [re
.escape(x
) for x
in _excluded_dirs
]
594 _excluded_dirs
= re
.compile(r
'^(\..*|-MERGING-.*|' + \
595 "|".join(_excluded_dirs
) + r
')$')
597 _aux_cache_version
= "1"
598 _owners_cache_version
= "1"
600 # Number of uncached packages to trigger cache update, since
601 # it's wasteful to update it for every vdb change.
602 _aux_cache_threshold
= 5
604 _aux_cache_keys_re
= re
.compile(r
'^NEEDED\..*$')
605 _aux_multi_line_re
= re
.compile(r
'^(CONTENTS|NEEDED\..*)$')
607 def __init__(self
, root
, categories
=None, settings
=None, vartree
=None):
609 The categories parameter is unused since the dbapi class
610 now has a categories property that is generated from the
615 #cache for category directory mtimes
618 #cache for dependency checks
621 #cache for cp_list results
626 from portage
import settings
627 self
.settings
= settings
629 from portage
import db
630 vartree
= db
[root
]["vartree"]
631 self
.vartree
= vartree
632 self
._aux
_cache
_keys
= set(
633 ["CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
634 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
635 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
636 "repository", "RESTRICT" , "SLOT", "USE"])
637 self
._aux
_cache
_obj
= None
638 self
._aux
_cache
_filename
= os
.path
.join(self
.root
,
639 CACHE_PATH
.lstrip(os
.path
.sep
), "vdb_metadata.pickle")
640 self
._counter
_path
= os
.path
.join(root
,
641 CACHE_PATH
.lstrip(os
.path
.sep
), "counter")
644 self
.plib_registry
= PreservedLibsRegistry(
645 os
.path
.join(self
.root
, PRIVATE_PATH
, "preserved_libs_registry"))
646 except PermissionDenied
:
647 # apparently this user isn't allowed to access PRIVATE_PATH
648 self
.plib_registry
= None
650 self
.linkmap
= LinkageMap(self
)
651 self
._owners
= self
._owners
_db
(self
)
653 def getpath(self
, mykey
, filename
=None):
654 rValue
= os
.path
.join(self
.root
, VDB_PATH
, mykey
)
656 rValue
= os
.path
.join(rValue
, filename
)
659 def cpv_exists(self
, mykey
):
660 "Tells us whether an actual ebuild exists on disk (no masking)"
661 return os
.path
.exists(self
.getpath(mykey
))
663 def cpv_counter(self
, mycpv
):
664 "This method will grab the COUNTER. Returns a counter value."
666 return long(self
.aux_get(mycpv
, ["COUNTER"])[0])
667 except (KeyError, ValueError):
669 cdir
= self
.getpath(mycpv
)
670 cpath
= self
.getpath(mycpv
, filename
="COUNTER")
672 # We write our new counter value to a new file that gets moved into
673 # place to avoid filesystem corruption on XFS (unexpected reboot.)
675 if os
.path
.exists(cpath
):
676 cfile
= open(cpath
, "r")
678 counter
= long(cfile
.readline())
680 print "portage: COUNTER for", mycpv
, "was corrupted; resetting to value of 0"
684 elif os
.path
.exists(cdir
):
685 mys
= pkgsplit(mycpv
)
686 myl
= self
.match(mys
[0], use_cache
=0)
690 # Only one package... Counter doesn't matter.
691 write_atomic(cpath
, "1")
693 except SystemExit, e
:
696 writemsg("!!! COUNTER file is missing for "+str(mycpv
)+" in /var/db.\n",
698 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH
,
700 writemsg("!!! unmerge this exact version.\n", noiselevel
=-1)
701 writemsg("!!! %s\n" % e
, noiselevel
=-1)
704 writemsg("!!! COUNTER file is missing for "+str(mycpv
)+" in /var/db.\n",
706 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH
,
708 writemsg("!!! remerge the package.\n", noiselevel
=-1)
713 # update new global counter file
714 write_atomic(cpath
, str(counter
))
717 def cpv_inject(self
, mycpv
):
718 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
719 os
.makedirs(self
.getpath(mycpv
))
720 counter
= self
.counter_tick(self
.root
, mycpv
=mycpv
)
721 # write local package counter so that emerge clean does the right thing
722 write_atomic(self
.getpath(mycpv
, filename
="COUNTER"), str(counter
))
724 def isInjected(self
, mycpv
):
725 if self
.cpv_exists(mycpv
):
726 if os
.path
.exists(self
.getpath(mycpv
, filename
="INJECTED")):
728 if not os
.path
.exists(self
.getpath(mycpv
, filename
="CONTENTS")):
732 def move_ent(self
, mylist
):
737 for cp
in [origcp
, newcp
]:
738 if not (isvalidatom(cp
) and isjustname(cp
)):
739 raise InvalidPackageName(cp
)
740 origmatches
= self
.match(origcp
, use_cache
=0)
744 for mycpv
in origmatches
:
745 mycpsplit
= catpkgsplit(mycpv
)
746 mynewcpv
= newcp
+ "-" + mycpsplit
[2]
747 mynewcat
= newcp
.split("/")[0]
748 if mycpsplit
[3] != "r0":
749 mynewcpv
+= "-" + mycpsplit
[3]
750 mycpsplit_new
= catpkgsplit(mynewcpv
)
751 origpath
= self
.getpath(mycpv
)
752 if not os
.path
.exists(origpath
):
755 if not os
.path
.exists(self
.getpath(mynewcat
)):
756 #create the directory
757 os
.makedirs(self
.getpath(mynewcat
))
758 newpath
= self
.getpath(mynewcpv
)
759 if os
.path
.exists(newpath
):
760 #dest already exists; keep this puppy where it is.
762 _movefile(origpath
, newpath
, mysettings
=self
.settings
)
764 # We need to rename the ebuild now.
765 old_pf
= catsplit(mycpv
)[1]
766 new_pf
= catsplit(mynewcpv
)[1]
769 os
.rename(os
.path
.join(newpath
, old_pf
+ ".ebuild"),
770 os
.path
.join(newpath
, new_pf
+ ".ebuild"))
771 except EnvironmentError, e
:
772 if e
.errno
!= errno
.ENOENT
:
775 write_atomic(os
.path
.join(newpath
, "PF"), new_pf
+"\n")
776 write_atomic(os
.path
.join(newpath
, "CATEGORY"), mynewcat
+"\n")
777 fixdbentries([mylist
], newpath
)
780 def cp_list(self
, mycp
, use_cache
=1):
781 mysplit
=catsplit(mycp
)
782 if mysplit
[0] == '*':
783 mysplit
[0] = mysplit
[0][1:]
785 mystat
= os
.stat(self
.getpath(mysplit
[0]))[stat
.ST_MTIME
]
788 if use_cache
and mycp
in self
.cpcache
:
789 cpc
= self
.cpcache
[mycp
]
792 cat_dir
= self
.getpath(mysplit
[0])
794 dir_list
= os
.listdir(cat_dir
)
795 except EnvironmentError, e
:
796 if e
.errno
== PermissionDenied
.errno
:
797 raise PermissionDenied(cat_dir
)
803 if self
._excluded
_dirs
.match(x
) is not None:
807 self
.invalidentry(os
.path
.join(self
.getpath(mysplit
[0]), x
))
810 if ps
[0] == mysplit
[1]:
811 returnme
.append(mysplit
[0]+"/"+x
)
812 self
._cpv
_sort
_ascending
(returnme
)
814 self
.cpcache
[mycp
] = [mystat
, returnme
[:]]
815 elif mycp
in self
.cpcache
:
816 del self
.cpcache
[mycp
]
819 def cpv_all(self
, use_cache
=1):
821 Set use_cache=0 to bypass the portage.cachedir() cache in cases
822 when the accuracy of mtime staleness checks should not be trusted
823 (generally this is only necessary in critical sections that
824 involve merge or unmerge of packages).
827 basepath
= os
.path
.join(self
.root
, VDB_PATH
) + os
.path
.sep
830 from portage
import listdir
832 def listdir(p
, **kwargs
):
834 return [x
for x
in os
.listdir(p
) \
835 if os
.path
.isdir(os
.path
.join(p
, x
))]
836 except EnvironmentError, e
:
837 if e
.errno
== PermissionDenied
.errno
:
838 raise PermissionDenied(p
)
842 for x
in listdir(basepath
, EmptyOnError
=1, ignorecvs
=1, dirsonly
=1):
843 if self
._excluded
_dirs
.match(x
) is not None:
845 if not self
._category
_re
.match(x
):
847 for y
in listdir(basepath
+ x
, EmptyOnError
=1, dirsonly
=1):
848 if self
._excluded
_dirs
.match(y
) is not None:
850 subpath
= x
+ "/" + y
851 # -MERGING- should never be a cpv, nor should files.
853 if catpkgsplit(subpath
) is None:
854 self
.invalidentry(os
.path
.join(self
.root
, subpath
))
857 self
.invalidentry(os
.path
.join(self
.root
, subpath
))
859 returnme
.append(subpath
)
862 def cp_all(self
, use_cache
=1):
863 mylist
= self
.cpv_all(use_cache
=use_cache
)
869 mysplit
= catpkgsplit(y
)
871 self
.invalidentry(self
.getpath(y
))
874 self
.invalidentry(self
.getpath(y
))
876 d
[mysplit
[0]+"/"+mysplit
[1]] = None
879 def checkblockers(self
, origdep
):
882 def _clear_cache(self
):
883 self
.mtdircache
.clear()
884 self
.matchcache
.clear()
886 self
._aux
_cache
_obj
= None
888 def _add(self
, pkg_dblink
):
889 self
._clear
_pkg
_cache
(pkg_dblink
)
891 def _remove(self
, pkg_dblink
):
892 self
._clear
_pkg
_cache
(pkg_dblink
)
894 def _clear_pkg_cache(self
, pkg_dblink
):
895 # Due to 1 second mtime granularity in <python-2.5, mtime checks
896 # are not always sufficient to invalidate vardbapi caches. Therefore,
897 # the caches need to be actively invalidated here.
898 self
.mtdircache
.pop(pkg_dblink
.cat
, None)
899 self
.matchcache
.pop(pkg_dblink
.cat
, None)
900 self
.cpcache
.pop(pkg_dblink
.mysplit
[0], None)
901 from portage
import dircache
902 dircache
.pop(pkg_dblink
.dbcatdir
, None)
904 def match(self
, origdep
, use_cache
=1):
905 "caching match function"
907 origdep
, mydb
=self
, use_cache
=use_cache
, settings
=self
.settings
)
908 mykey
= dep_getkey(mydep
)
909 mycat
= catsplit(mykey
)[0]
911 if mycat
in self
.matchcache
:
912 del self
.mtdircache
[mycat
]
913 del self
.matchcache
[mycat
]
914 return list(self
._iter
_match
(mydep
,
915 self
.cp_list(mydep
.cp
, use_cache
=use_cache
)))
917 curmtime
= os
.stat(self
.root
+VDB_PATH
+"/"+mycat
).st_mtime
918 except (IOError, OSError):
921 if mycat
not in self
.matchcache
or \
922 self
.mtdircache
[mycat
] != curmtime
:
924 self
.mtdircache
[mycat
] = curmtime
925 self
.matchcache
[mycat
] = {}
926 if mydep
not in self
.matchcache
[mycat
]:
927 mymatch
= list(self
._iter
_match
(mydep
,
928 self
.cp_list(mydep
.cp
, use_cache
=use_cache
)))
929 self
.matchcache
[mycat
][mydep
] = mymatch
930 return self
.matchcache
[mycat
][mydep
][:]
932 def findname(self
, mycpv
):
933 return self
.getpath(str(mycpv
), filename
=catsplit(mycpv
)[1]+".ebuild")
935 def flush_cache(self
):
936 """If the current user has permission and the internal aux_get cache has
937 been updated, save it to disk and mark it unmodified. This is called
938 by emerge after it has loaded the full vdb for use in dependency
939 calculations. Currently, the cache is only written if the user has
940 superuser privileges (since that's required to obtain a lock), but all
941 users have read access and benefit from faster metadata lookups (as
942 long as at least part of the cache is still valid)."""
943 if self
._aux
_cache
is not None and \
944 len(self
._aux
_cache
["modified"]) >= self
._aux
_cache
_threshold
and \
946 self
._owners
.populate() # index any unindexed contents
947 valid_nodes
= set(self
.cpv_all())
948 for cpv
in self
._aux
_cache
["packages"].keys():
949 if cpv
not in valid_nodes
:
950 del self
._aux
_cache
["packages"][cpv
]
951 del self
._aux
_cache
["modified"]
953 f
= atomic_ofstream(self
._aux
_cache
_filename
)
954 cPickle
.dump(self
._aux
_cache
, f
, -1)
956 apply_secpass_permissions(
957 self
._aux
_cache
_filename
, gid
=portage_gid
, mode
=0644)
958 except (IOError, OSError), e
:
960 self
._aux
_cache
["modified"] = set()
963 def _aux_cache(self
):
964 if self
._aux
_cache
_obj
is None:
965 self
._aux
_cache
_init
()
966 return self
._aux
_cache
_obj
968 def _aux_cache_init(self
):
971 f
= open(self
._aux
_cache
_filename
)
972 mypickle
= cPickle
.Unpickler(f
)
973 mypickle
.find_global
= None
974 aux_cache
= mypickle
.load()
977 except (IOError, OSError, EOFError, cPickle
.UnpicklingError
), e
:
978 if isinstance(e
, cPickle
.UnpicklingError
):
979 writemsg("!!! Error loading '%s': %s\n" % \
980 (self
._aux
_cache
_filename
, str(e
)), noiselevel
=-1)
983 if not aux_cache
or \
984 not isinstance(aux_cache
, dict) or \
985 aux_cache
.get("version") != self
._aux
_cache
_version
or \
986 not aux_cache
.get("packages"):
987 aux_cache
= {"version": self
._aux
_cache
_version
}
988 aux_cache
["packages"] = {}
990 owners
= aux_cache
.get("owners")
991 if owners
is not None:
992 if not isinstance(owners
, dict):
994 elif "version" not in owners
:
996 elif owners
["version"] != self
._owners
_cache
_version
:
998 elif "base_names" not in owners
:
1000 elif not isinstance(owners
["base_names"], dict):
1006 "version" : self
._owners
_cache
_version
1008 aux_cache
["owners"] = owners
1010 aux_cache
["modified"] = set()
1011 self
._aux
_cache
_obj
= aux_cache
1013 def aux_get(self
, mycpv
, wants
):
1014 """This automatically caches selected keys that are frequently needed
1015 by emerge for dependency calculations. The cached metadata is
1016 considered valid if the mtime of the package directory has not changed
1017 since the data was cached. The cache is stored in a pickled dict
1018 object with the following format:
1020 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
1022 If an error occurs while loading the cache pickle or the version is
1023 unrecognized, the cache will simple be recreated from scratch (it is
1024 completely disposable).
1026 cache_these_wants
= self
._aux
_cache
_keys
.intersection(wants
)
1028 if self
._aux
_cache
_keys
_re
.match(x
) is not None:
1029 cache_these_wants
.add(x
)
1031 if not cache_these_wants
:
1032 return self
._aux
_get
(mycpv
, wants
)
1034 cache_these
= set(self
._aux
_cache
_keys
)
1035 cache_these
.update(cache_these_wants
)
1037 mydir
= self
.getpath(mycpv
)
1040 mydir_stat
= os
.stat(mydir
)
1042 if e
.errno
!= errno
.ENOENT
:
1044 raise KeyError(mycpv
)
1045 mydir_mtime
= long(mydir_stat
.st_mtime
)
1046 pkg_data
= self
._aux
_cache
["packages"].get(mycpv
)
1047 pull_me
= cache_these
.union(wants
)
1048 mydata
= {"_mtime_" : mydir_mtime
}
1050 cache_incomplete
= False
1053 if pkg_data
is not None:
1054 if not isinstance(pkg_data
, tuple) or len(pkg_data
) != 2:
1057 cache_mtime
, metadata
= pkg_data
1058 if not isinstance(cache_mtime
, (long, int)) or \
1059 not isinstance(metadata
, dict):
1063 cache_mtime
, metadata
= pkg_data
1064 cache_valid
= cache_mtime
== mydir_mtime
1066 mydata
.update(metadata
)
1067 pull_me
.difference_update(mydata
)
1070 # pull any needed data and cache it
1071 aux_keys
= list(pull_me
)
1072 for k
, v
in izip(aux_keys
,
1073 self
._aux
_get
(mycpv
, aux_keys
, st
=mydir_stat
)):
1075 if not cache_valid
or cache_these
.difference(metadata
):
1077 if cache_valid
and metadata
:
1078 cache_data
.update(metadata
)
1079 for aux_key
in cache_these
:
1080 cache_data
[aux_key
] = mydata
[aux_key
]
1081 self
._aux
_cache
["packages"][mycpv
] = (mydir_mtime
, cache_data
)
1082 self
._aux
_cache
["modified"].add(mycpv
)
1083 return [mydata
[x
] for x
in wants
]
1085 def _aux_get(self
, mycpv
, wants
, st
=None):
1086 mydir
= self
.getpath(mycpv
)
1091 if e
.errno
== errno
.ENOENT
:
1092 raise KeyError(mycpv
)
1093 elif e
.errno
== PermissionDenied
.errno
:
1094 raise PermissionDenied(mydir
)
1097 if not stat
.S_ISDIR(st
.st_mode
):
1098 raise KeyError(mycpv
)
1102 results
.append(st
.st_mtime
)
1105 myf
= open(os
.path
.join(mydir
, x
), "r")
1110 # Preserve \n for metadata that is known to
1111 # contain multiple lines.
1112 if self
._aux
_multi
_line
_re
.match(x
) is None:
1113 myd
= " ".join(myd
.split())
1116 if x
== "EAPI" and not myd
:
1122 def aux_update(self
, cpv
, values
):
1123 cat
, pkg
= catsplit(cpv
)
1124 mylink
= dblink(cat
, pkg
, self
.root
, self
.settings
,
1125 treetype
="vartree", vartree
=self
.vartree
)
1126 if not mylink
.exists():
1128 for k
, v
in values
.iteritems():
1130 mylink
.setfile(k
, v
)
1133 os
.unlink(os
.path
.join(self
.getpath(cpv
), k
))
1134 except EnvironmentError:
1137 def counter_tick(self
, myroot
, mycpv
=None):
1138 return self
.counter_tick_core(myroot
, incrementing
=1, mycpv
=mycpv
)
1140 def get_counter_tick_core(self
, myroot
, mycpv
=None):
1142 Use this method to retrieve the counter instead
1143 of having to trust the value of a global counter
1144 file that can lead to invalid COUNTER
1145 generation. When cache is valid, the package COUNTER
1146 files are not read and we rely on the timestamp of
1147 the package directory to validate cache. The stat
1148 calls should only take a short time, so performance
1149 is sufficient without having to rely on a potentially
1150 corrupt global counter file.
1152 The global counter file located at
1153 $CACHE_PATH/counter serves to record the
1154 counter of the last installed package and
1155 it also corresponds to the total number of
1156 installation actions that have occurred in
1157 the history of this package database.
1159 cp_list
= self
.cp_list
1161 for cp
in self
.cp_all():
1162 for cpv
in cp_list(cp
):
1164 counter
= int(self
.aux_get(cpv
, ["COUNTER"])[0])
1165 except (KeyError, OverflowError, ValueError):
1167 if counter
> max_counter
:
1168 max_counter
= counter
1173 cfile
= open(self
._counter
_path
, "r")
1174 except EnvironmentError, e
:
1175 new_vdb
= not bool(self
.cpv_all())
1177 writemsg("!!! Unable to read COUNTER file: '%s'\n" % \
1178 self
._counter
_path
, noiselevel
=-1)
1179 writemsg("!!! %s\n" % str(e
), noiselevel
=-1)
1184 counter
= long(cfile
.readline().strip())
1187 except (OverflowError, ValueError), e
:
1188 writemsg("!!! COUNTER file is corrupt: '%s'\n" % \
1189 self
._counter
_path
, noiselevel
=-1)
1190 writemsg("!!! %s\n" % str(e
), noiselevel
=-1)
1193 # We must ensure that we return a counter
1194 # value that is at least as large as the
1195 # highest one from the installed packages,
1196 # since having a corrupt value that is too low
1197 # can trigger incorrect AUTOCLEAN behavior due
1198 # to newly installed packages having lower
1199 # COUNTERs than the previous version in the
1201 if counter
> max_counter
:
1202 max_counter
= counter
1204 if counter
< 0 and not new_vdb
:
1205 writemsg("!!! Initializing COUNTER to " + \
1206 "value of %d\n" % max_counter
, noiselevel
=-1)
1208 return max_counter
+ 1
1210 def counter_tick_core(self
, myroot
, incrementing
=1, mycpv
=None):
1211 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
1212 counter
= self
.get_counter_tick_core(myroot
, mycpv
=mycpv
) - 1
1216 # update new global counter file
1217 write_atomic(self
._counter
_path
, str(counter
))
1220 def _dblink(self
, cpv
):
1221 category
, pf
= catsplit(cpv
)
1222 return dblink(category
, pf
, self
.root
,
1223 self
.settings
, vartree
=self
.vartree
, treetype
="vartree")
1225 def removeFromContents(self
, pkg
, paths
, relative_paths
=True):
1227 @param pkg: cpv for an installed package
1229 @param paths: paths of files to remove from contents
1230 @type paths: iterable
1232 if not hasattr(pkg
, "getcontents"):
1233 pkg
= self
._dblink
(pkg
)
1235 root_len
= len(root
) - 1
1236 new_contents
= pkg
.getcontents().copy()
1239 for filename
in paths
:
1240 filename
= normalize_path(filename
)
1242 relative_filename
= filename
1244 relative_filename
= filename
[root_len
:]
1245 contents_key
= pkg
._match
_contents
(relative_filename
, root
)
1247 del new_contents
[contents_key
]
1251 f
= atomic_ofstream(os
.path
.join(pkg
.dbdir
, "CONTENTS"))
1252 write_contents(new_contents
, root
, f
)
1255 class _owners_cache(object):
1257 This class maintains an hash table that serves to index package
1258 contents by mapping the basename of file to a list of possible
1259 packages that own it. This is used to optimize owner lookups
1260 by narrowing the search down to a smaller number of packages.
1263 from hashlib
import md5
as _new_hash
1265 from md5
import new
as _new_hash
1268 _hex_chars
= _hash_bits
/ 4
1270 def __init__(self
, vardb
):
1274 root_len
= len(self
._vardb
.root
)
1275 contents
= self
._vardb
._dblink
(cpv
).getcontents()
1276 pkg_hash
= self
._hash
_pkg
(cpv
)
1278 # Empty path is a code used to represent empty contents.
1279 self
._add
_path
("", pkg_hash
)
1281 self
._add
_path
(x
[root_len
:], pkg_hash
)
1282 self
._vardb
._aux
_cache
["modified"].add(cpv
)
1284 def _add_path(self
, path
, pkg_hash
):
1286 Empty path is a code that represents empty contents.
1289 name
= os
.path
.basename(path
.rstrip(os
.path
.sep
))
1294 name_hash
= self
._hash
_str
(name
)
1295 base_names
= self
._vardb
._aux
_cache
["owners"]["base_names"]
1296 pkgs
= base_names
.get(name_hash
)
1299 base_names
[name_hash
] = pkgs
1300 pkgs
[pkg_hash
] = None
1302 def _hash_str(self
, s
):
1303 h
= self
._new
_hash
()
1306 h
= h
[-self
._hex
_chars
:]
1310 def _hash_pkg(self
, cpv
):
1311 counter
, mtime
= self
._vardb
.aux_get(
1312 cpv
, ["COUNTER", "_mtime_"])
1314 counter
= int(counter
)
1317 return (cpv
, counter
, mtime
)
1319 class _owners_db(object):
1321 def __init__(self
, vardb
):
1327 def _populate(self
):
1328 owners_cache
= vardbapi
._owners
_cache
(self
._vardb
)
1329 cached_hashes
= set()
1330 base_names
= self
._vardb
._aux
_cache
["owners"]["base_names"]
1332 # Take inventory of all cached package hashes.
1333 for name
, hash_values
in base_names
.items():
1334 if not isinstance(hash_values
, dict):
1335 del base_names
[name
]
1337 cached_hashes
.update(hash_values
)
1339 # Create sets of valid package hashes and uncached packages.
1340 uncached_pkgs
= set()
1341 hash_pkg
= owners_cache
._hash
_pkg
1342 valid_pkg_hashes
= set()
1343 for cpv
in self
._vardb
.cpv_all():
1344 hash_value
= hash_pkg(cpv
)
1345 valid_pkg_hashes
.add(hash_value
)
1346 if hash_value
not in cached_hashes
:
1347 uncached_pkgs
.add(cpv
)
1349 # Cache any missing packages.
1350 for cpv
in uncached_pkgs
:
1351 owners_cache
.add(cpv
)
1353 # Delete any stale cache.
1354 stale_hashes
= cached_hashes
.difference(valid_pkg_hashes
)
1356 for base_name_hash
, bucket
in base_names
.items():
1357 for hash_value
in stale_hashes
.intersection(bucket
):
1358 del bucket
[hash_value
]
1360 del base_names
[base_name_hash
]
1364 def get_owners(self
, path_iter
):
1366 @return the owners as a dblink -> set(files) mapping.
1369 for owner
, f
in self
.iter_owners(path_iter
):
1370 owned_files
= owners
.get(owner
)
1371 if owned_files
is None:
1373 owners
[owner
] = owned_files
1377 def getFileOwnerMap(self
, path_iter
):
1378 owners
= self
.get_owners(path_iter
)
1380 for pkg_dblink
, files
in owners
.iteritems():
1382 owner_set
= file_owners
.get(f
)
1383 if owner_set
is None:
1385 file_owners
[f
] = owner_set
1386 owner_set
.add(pkg_dblink
)
1389 def iter_owners(self
, path_iter
):
1391 Iterate over tuples of (dblink, path). In order to avoid
1392 consuming too many resources for too much time, resources
1393 are only allocated for the duration of a given iter_owners()
1394 call. Therefore, to maximize reuse of resources when searching
1395 for multiple files, it's best to search for them all in a single
1399 owners_cache
= self
._populate
()
1403 hash_pkg
= owners_cache
._hash
_pkg
1404 hash_str
= owners_cache
._hash
_str
1405 base_names
= self
._vardb
._aux
_cache
["owners"]["base_names"]
1410 x
= dblink_cache
.get(cpv
)
1412 x
= self
._vardb
._dblink
(cpv
)
1413 dblink_cache
[cpv
] = x
1416 for path
in path_iter
:
1417 name
= os
.path
.basename(path
.rstrip(os
.path
.sep
))
1421 name_hash
= hash_str(name
)
1422 pkgs
= base_names
.get(name_hash
)
1423 if pkgs
is not None:
1424 for hash_value
in pkgs
:
1425 if not isinstance(hash_value
, tuple) or \
1426 len(hash_value
) != 3:
1428 cpv
, counter
, mtime
= hash_value
1429 if not isinstance(cpv
, basestring
):
1432 current_hash
= hash_pkg(cpv
)
1436 if current_hash
!= hash_value
:
1438 if dblink(cpv
).isowner(path
, root
):
1439 yield dblink(cpv
), path
1441 class vartree(object):
1442 "this tree will scan a var/db/pkg database located at root (passed to init)"
1443 def __init__(self
, root
="/", virtual
=None, clone
=None, categories
=None,
1446 writemsg("vartree.__init__(): deprecated " + \
1447 "use of clone parameter\n", noiselevel
=-1)
1448 self
.root
= clone
.root
[:]
1449 self
.dbapi
= copy
.deepcopy(clone
.dbapi
)
1451 from portage
import config
1452 self
.settings
= config(clone
=clone
.settings
)
1455 if settings
is None:
1456 from portage
import settings
1457 self
.settings
= settings
# for key_expand calls
1458 if categories
is None:
1459 categories
= settings
.categories
1460 self
.dbapi
= vardbapi(self
.root
, categories
=categories
,
1461 settings
=settings
, vartree
=self
)
1464 def getpath(self
, mykey
, filename
=None):
1465 return self
.dbapi
.getpath(mykey
, filename
=filename
)
1467 def zap(self
, mycpv
):
1470 def inject(self
, mycpv
):
1473 def get_provide(self
, mycpv
):
1477 mylines
, myuse
= self
.dbapi
.aux_get(mycpv
, ["PROVIDE", "USE"])
1479 myuse
= myuse
.split()
1480 mylines
= flatten(use_reduce(paren_reduce(mylines
), uselist
=myuse
))
1481 for myprovide
in mylines
:
1482 mys
= catpkgsplit(myprovide
)
1484 mys
= myprovide
.split("/")
1485 myprovides
+= [mys
[0] + "/" + mys
[1]]
1487 except SystemExit, e
:
1489 except Exception, e
:
1490 mydir
= os
.path
.join(self
.root
, VDB_PATH
, mycpv
)
1491 writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir
,
1494 writemsg("Possibly Invalid: '%s'\n" % str(mylines
),
1496 writemsg("Exception: %s\n\n" % str(e
), noiselevel
=-1)
1499 def get_all_provides(self
):
1501 for node
in self
.getallcpv():
1502 for mykey
in self
.get_provide(node
):
1503 if mykey
in myprovides
:
1504 myprovides
[mykey
] += [node
]
1506 myprovides
[mykey
] = [node
]
1509 def dep_bestmatch(self
, mydep
, use_cache
=1):
1510 "compatibility method -- all matches, not just visible ones"
1511 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
1512 mymatch
= best(self
.dbapi
.match(
1513 dep_expand(mydep
, mydb
=self
.dbapi
, settings
=self
.settings
),
1514 use_cache
=use_cache
))
1520 def dep_match(self
, mydep
, use_cache
=1):
1521 "compatibility method -- we want to see all matches, not just visible ones"
1522 #mymatch = match(mydep,self.dbapi)
1523 mymatch
= self
.dbapi
.match(mydep
, use_cache
=use_cache
)
1529 def exists_specific(self
, cpv
):
1530 return self
.dbapi
.cpv_exists(cpv
)
1532 def getallcpv(self
):
1533 """temporary function, probably to be renamed --- Gets a list of all
1534 category/package-versions installed on the system."""
1535 return self
.dbapi
.cpv_all()
1537 def getallnodes(self
):
1538 """new behavior: these are all *unmasked* nodes. There may or may not be available
1539 masked package for nodes in this nodes list."""
1540 return self
.dbapi
.cp_all()
1542 def exists_specific_cat(self
, cpv
, use_cache
=1):
1543 cpv
= key_expand(cpv
, mydb
=self
.dbapi
, use_cache
=use_cache
,
1544 settings
=self
.settings
)
1545 a
= catpkgsplit(cpv
)
1548 mylist
= listdir(self
.getpath(a
[0]), EmptyOnError
=1)
1552 self
.dbapi
.invalidentry(self
.getpath(a
[0], filename
=x
))
1558 def getebuildpath(self
, fullpackage
):
1559 cat
, package
= catsplit(fullpackage
)
1560 return self
.getpath(fullpackage
, filename
=package
+".ebuild")
1562 def getnode(self
, mykey
, use_cache
=1):
1563 mykey
= key_expand(mykey
, mydb
=self
.dbapi
, use_cache
=use_cache
,
1564 settings
=self
.settings
)
1567 mysplit
= catsplit(mykey
)
1568 mydirlist
= listdir(self
.getpath(mysplit
[0]),EmptyOnError
=1)
1571 mypsplit
= pkgsplit(x
)
1573 self
.dbapi
.invalidentry(self
.getpath(mysplit
[0], filename
=x
))
1575 if mypsplit
[0] == mysplit
[1]:
1576 appendme
= [mysplit
[0]+"/"+x
, [mysplit
[0], mypsplit
[0], mypsplit
[1], mypsplit
[2]]]
1577 returnme
.append(appendme
)
1581 def getslot(self
, mycatpkg
):
1582 "Get a slot for a catpkg; assume it exists."
1584 return self
.dbapi
.aux_get(mycatpkg
, ["SLOT"])[0]
1588 def hasnode(self
, mykey
, use_cache
):
1589 """Does the particular node (cat/pkg key) exist?"""
1590 mykey
= key_expand(mykey
, mydb
=self
.dbapi
, use_cache
=use_cache
,
1591 settings
=self
.settings
)
1592 mysplit
= catsplit(mykey
)
1593 mydirlist
= listdir(self
.getpath(mysplit
[0]), EmptyOnError
=1)
1595 mypsplit
= pkgsplit(x
)
1597 self
.dbapi
.invalidentry(self
.getpath(mysplit
[0], filename
=x
))
1599 if mypsplit
[0] == mysplit
[1]:
1606 class dblink(object):
1608 This class provides an interface to the installed package database
1609 At present this is implemented as a text backend in /var/db/pkg.
1613 _normalize_needed
= re
.compile(r
'.*//.*|^[^/]|.+/$|(^|.*/)\.\.?(/.*|$)')
1614 _contents_split_counts
= {
1622 # When looping over files for merge/unmerge, temporarily yield to the
1623 # scheduler each time this many files are processed.
1624 _file_merge_yield_interval
= 20
1626 def __init__(self
, cat
, pkg
, myroot
, mysettings
, treetype
=None,
1627 vartree
=None, blockers
=None, scheduler
=None):
1629 Creates a DBlink object for a given CPV.
1630 The given CPV may not be present in the database already.
1632 @param cat: Category
1634 @param pkg: Package (PV)
1636 @param myroot: Typically ${ROOT}
1637 @type myroot: String (Path)
1638 @param mysettings: Typically portage.config
1639 @type mysettings: An instance of portage.config
1640 @param treetype: one of ['porttree','bintree','vartree']
1641 @type treetype: String
1642 @param vartree: an instance of vartree corresponding to myroot.
1643 @type vartree: vartree
1648 self
.mycpv
= self
.cat
+ "/" + self
.pkg
1649 self
.mysplit
= list(catpkgsplit(self
.mycpv
)[1:])
1650 self
.mysplit
[0] = "%s/%s" % (self
.cat
, self
.mysplit
[0])
1651 self
.treetype
= treetype
1653 from portage
import db
1654 vartree
= db
[myroot
]["vartree"]
1655 self
.vartree
= vartree
1656 self
._blockers
= blockers
1657 self
._scheduler
= scheduler
1659 self
.dbroot
= normalize_path(os
.path
.join(myroot
, VDB_PATH
))
1660 self
.dbcatdir
= self
.dbroot
+"/"+cat
1661 self
.dbpkgdir
= self
.dbcatdir
+"/"+pkg
1662 self
.dbtmpdir
= self
.dbcatdir
+"/-MERGING-"+pkg
1663 self
.dbdir
= self
.dbpkgdir
1665 self
._lock
_vdb
= None
1667 self
.settings
= mysettings
1668 if self
.settings
== 1:
1672 protect_obj
= ConfigProtect(myroot
,
1673 mysettings
.get("CONFIG_PROTECT","").split(),
1674 mysettings
.get("CONFIG_PROTECT_MASK","").split())
1675 self
.updateprotect
= protect_obj
.updateprotect
1676 self
.isprotected
= protect_obj
.isprotected
1677 self
._installed
_instance
= None
1678 self
.contentscache
= None
1679 self
._contents
_inodes
= None
1680 self
._contents
_basenames
= None
1684 raise AssertionError("Lock already held.")
1685 # At least the parent needs to exist for the lock file.
1686 ensure_dirs(self
.dbroot
)
1687 self
._lock
_vdb
= lockdir(self
.dbroot
)
1691 unlockdir(self
._lock
_vdb
)
1692 self
._lock
_vdb
= None
1695 "return path to location of db information (for >>> informational display)"
1699 "does the db entry exist? boolean."
1700 return os
.path
.exists(self
.dbdir
)
1704 Remove this entry from the database
1706 if not os
.path
.exists(self
.dbdir
):
1709 # Check validity of self.dbdir before attempting to remove it.
1710 if not self
.dbdir
.startswith(self
.dbroot
):
1711 writemsg("portage.dblink.delete(): invalid dbdir: %s\n" % \
1712 self
.dbdir
, noiselevel
=-1)
1715 shutil
.rmtree(self
.dbdir
)
1716 self
.vartree
.dbapi
._remove
(self
)
1718 def clearcontents(self
):
1720 For a given db entry (self), erase the CONTENTS values.
1722 if os
.path
.exists(self
.dbdir
+"/CONTENTS"):
1723 os
.unlink(self
.dbdir
+"/CONTENTS")
1725 def _clear_contents_cache(self
):
1726 self
.contentscache
= None
1727 self
._contents
_inodes
= None
1728 self
._contents
_basenames
= None
1730 def getcontents(self
):
1732 Get the installed files of a given package (aka what that package installed)
1734 contents_file
= os
.path
.join(self
.dbdir
, "CONTENTS")
1735 if self
.contentscache
is not None:
1736 return self
.contentscache
1739 myc
= open(contents_file
,"r")
1740 except EnvironmentError, e
:
1741 if e
.errno
!= errno
.ENOENT
:
1744 self
.contentscache
= pkgfiles
1746 mylines
= myc
.readlines()
1749 normalize_needed
= self
._normalize
_needed
1750 contents_split_counts
= self
._contents
_split
_counts
1751 myroot
= self
.myroot
1752 if myroot
== os
.path
.sep
:
1756 for pos
, line
in enumerate(mylines
):
1757 if null_byte
in line
:
1758 # Null bytes are a common indication of corruption.
1759 errors
.append((pos
+ 1, "Null byte found in CONTENTS entry"))
1761 line
= line
.rstrip("\n")
1762 # Split on " " so that even file paths that
1763 # end with spaces can be handled.
1764 mydat
= line
.split(" ")
1765 entry_type
= mydat
[0] # empty string if line is empty
1766 correct_split_count
= contents_split_counts
.get(entry_type
)
1767 if correct_split_count
and len(mydat
) > correct_split_count
:
1768 # Apparently file paths contain spaces, so reassemble
1769 # the split have the correct_split_count.
1770 newsplit
= [entry_type
]
1771 spaces_total
= len(mydat
) - correct_split_count
1772 if entry_type
== "sym":
1774 splitter
= mydat
.index("->", 2, len(mydat
) - 2)
1776 errors
.append((pos
+ 1, "Unrecognized CONTENTS entry"))
1778 spaces_in_path
= splitter
- 2
1779 spaces_in_target
= spaces_total
- spaces_in_path
1780 newsplit
.append(" ".join(mydat
[1:splitter
]))
1781 newsplit
.append("->")
1782 target_end
= splitter
+ spaces_in_target
+ 2
1783 newsplit
.append(" ".join(mydat
[splitter
+ 1:target_end
]))
1784 newsplit
.extend(mydat
[target_end
:])
1786 path_end
= spaces_total
+ 2
1787 newsplit
.append(" ".join(mydat
[1:path_end
]))
1788 newsplit
.extend(mydat
[path_end
:])
1791 # we do this so we can remove from non-root filesystems
1792 # (use the ROOT var to allow maintenance on other partitions)
1794 if normalize_needed
.match(mydat
[1]):
1795 mydat
[1] = normalize_path(mydat
[1])
1796 if not mydat
[1].startswith(os
.path
.sep
):
1797 mydat
[1] = os
.path
.sep
+ mydat
[1]
1799 mydat
[1] = os
.path
.join(myroot
, mydat
[1].lstrip(os
.path
.sep
))
1800 if mydat
[0] == "obj":
1801 #format: type, mtime, md5sum
1802 pkgfiles
[mydat
[1]] = [mydat
[0], mydat
[3], mydat
[2]]
1803 elif mydat
[0] == "dir":
1805 pkgfiles
[mydat
[1]] = [mydat
[0]]
1806 elif mydat
[0] == "sym":
1807 #format: type, mtime, dest
1808 pkgfiles
[mydat
[1]] = [mydat
[0], mydat
[4], mydat
[3]]
1809 elif mydat
[0] == "dev":
1811 pkgfiles
[mydat
[1]] = [mydat
[0]]
1812 elif mydat
[0]=="fif":
1814 pkgfiles
[mydat
[1]] = [mydat
[0]]
1816 errors
.append((pos
+ 1, "Unrecognized CONTENTS entry"))
1817 except (KeyError, IndexError):
1818 errors
.append((pos
+ 1, "Unrecognized CONTENTS entry"))
1820 writemsg("!!! Parse error in '%s'\n" % contents_file
, noiselevel
=-1)
1821 for pos
, e
in errors
:
1822 writemsg("!!! line %d: %s\n" % (pos
, e
), noiselevel
=-1)
1823 self
.contentscache
= pkgfiles
1826 def unmerge(self
, pkgfiles
=None, trimworld
=1, cleanup
=1,
1827 ldpath_mtimes
=None, others_in_slot
=None):
1830 Unmerges a given package (CPV)
1835 @param pkgfiles: files to unmerge (generally self.getcontents() )
1836 @type pkgfiles: Dictionary
1837 @param trimworld: Remove CPV from world file if True, not if False
1838 @type trimworld: Boolean
1839 @param cleanup: cleanup to pass to doebuild (see doebuild)
1840 @type cleanup: Boolean
1841 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1842 @type ldpath_mtimes: Dictionary
1843 @param others_in_slot: all dblink instances in this slot, excluding self
1844 @type others_in_slot: list
1847 1. os.EX_OK if everything went well.
1848 2. return code of the failed phase (for prerm, postrm, cleanrm)
1851 The caller must ensure that lockdb() and unlockdb() are called
1852 before and after this method.
1854 showMessage
= self
._display
_merge
1855 if self
.vartree
.dbapi
._categories
is not None:
1856 self
.vartree
.dbapi
._categories
= None
1857 # When others_in_slot is supplied, the security check has already been
1858 # done for this slot, so it shouldn't be repeated until the next
1859 # replacement or unmerge operation.
1860 if others_in_slot
is None:
1861 slot
= self
.vartree
.dbapi
.aux_get(self
.mycpv
, ["SLOT"])[0]
1862 slot_matches
= self
.vartree
.dbapi
.match(
1863 "%s:%s" % (dep_getkey(self
.mycpv
), slot
))
1865 for cur_cpv
in slot_matches
:
1866 if cur_cpv
== self
.mycpv
:
1868 others_in_slot
.append(dblink(self
.cat
, catsplit(cur_cpv
)[1],
1869 self
.vartree
.root
, self
.settings
, vartree
=self
.vartree
,
1870 treetype
="vartree"))
1872 retval
= self
._security
_check
([self
] + others_in_slot
)
1876 contents
= self
.getcontents()
1877 # Now, don't assume that the name of the ebuild is the same as the
1878 # name of the dir; the package may have been moved.
1880 ebuild_phase
= "prerm"
1882 mystuff
= os
.listdir(self
.dbdir
)
1884 if x
.endswith(".ebuild"):
1885 myebuildpath
= os
.path
.join(self
.dbdir
, self
.pkg
+ ".ebuild")
1886 if x
[:-7] != self
.pkg
:
1887 # Clean up after vardbapi.move_ent() breakage in
1888 # portage versions before 2.1.2
1889 os
.rename(os
.path
.join(self
.dbdir
, x
), myebuildpath
)
1890 write_atomic(os
.path
.join(self
.dbdir
, "PF"), self
.pkg
+"\n")
1893 self
.settings
.setcpv(self
.mycpv
, mydb
=self
.vartree
.dbapi
)
1896 doebuild_environment(myebuildpath
, "prerm", self
.myroot
,
1897 self
.settings
, 0, 0, self
.vartree
.dbapi
)
1898 except UnsupportedAPIException
, e
:
1899 # Sometimes this happens due to corruption of the EAPI file.
1900 writemsg("!!! FAILED prerm: %s\n" % \
1901 os
.path
.join(self
.dbdir
, "EAPI"), noiselevel
=-1)
1902 writemsg("%s\n" % str(e
), noiselevel
=-1)
1905 catdir
= os
.path
.dirname(self
.settings
["PORTAGE_BUILDDIR"])
1906 ensure_dirs(os
.path
.dirname(catdir
), uid
=portage_uid
,
1907 gid
=portage_gid
, mode
=070, mask
=0)
1909 builddir_lock
= None
1911 scheduler
= self
._scheduler
1915 catdir_lock
= lockdir(catdir
)
1917 uid
=portage_uid
, gid
=portage_gid
,
1919 builddir_lock
= lockdir(
1920 self
.settings
["PORTAGE_BUILDDIR"])
1922 unlockdir(catdir_lock
)
1926 prepare_build_dirs(self
.myroot
, self
.settings
, 1)
1927 log_path
= self
.settings
.get("PORTAGE_LOG_FILE")
1929 if scheduler
is None:
1930 retval
= doebuild(myebuildpath
, ebuild_phase
, self
.myroot
,
1931 self
.settings
, cleanup
=cleanup
, use_cache
=0,
1932 mydbapi
=self
.vartree
.dbapi
, tree
=self
.treetype
,
1933 vartree
=self
.vartree
)
1935 retval
= scheduler
.dblinkEbuildPhase(
1936 self
, self
.vartree
.dbapi
, myebuildpath
, ebuild_phase
)
1938 # XXX: Decide how to handle failures here.
1939 if retval
!= os
.EX_OK
:
1940 writemsg("!!! FAILED prerm: %s\n" % retval
, noiselevel
=-1)
1942 self
._unmerge
_pkgfiles
(pkgfiles
, others_in_slot
)
1944 # Remove the registration of preserved libs for this pkg instance
1945 plib_registry
= self
.vartree
.dbapi
.plib_registry
1946 plib_registry
.unregister(self
.mycpv
, self
.settings
["SLOT"],
1947 self
.vartree
.dbapi
.cpv_counter(self
.mycpv
))
1950 ebuild_phase
= "postrm"
1951 if scheduler
is None:
1952 retval
= doebuild(myebuildpath
, ebuild_phase
, self
.myroot
,
1953 self
.settings
, use_cache
=0, tree
=self
.treetype
,
1954 mydbapi
=self
.vartree
.dbapi
, vartree
=self
.vartree
)
1956 retval
= scheduler
.dblinkEbuildPhase(
1957 self
, self
.vartree
.dbapi
, myebuildpath
, ebuild_phase
)
1959 # XXX: Decide how to handle failures here.
1960 if retval
!= os
.EX_OK
:
1961 writemsg("!!! FAILED postrm: %s\n" % retval
, noiselevel
=-1)
1963 # regenerate reverse NEEDED map
1964 self
.vartree
.dbapi
.linkmap
.rebuild()
1966 # remove preserved libraries that don't have any consumers left
1967 # FIXME: this code is quite ugly and can likely be optimized in several ways
1968 plib_dict
= plib_registry
.getPreservedLibs()
1969 for cpv
in plib_dict
:
1970 plib_dict
[cpv
].sort()
1971 # for the loop below to work correctly, we need all
1972 # symlinks to come before the actual files, such that
1973 # the recorded symlinks (sonames) will be resolved into
1974 # their real target before the object is found not to be
1975 # in the reverse NEEDED map
1976 def symlink_compare(x
, y
):
1977 if os
.path
.islink(x
):
1978 if os
.path
.islink(y
):
1982 elif os
.path
.islink(y
):
1987 plib_dict
[cpv
].sort(symlink_compare
)
1988 for f
in plib_dict
[cpv
]:
1989 if not os
.path
.exists(f
):
1992 consumers
= self
.vartree
.dbapi
.linkmap
.findConsumers(f
)
1994 unlink_list
.append(f
)
1998 if c
not in self
.getcontents():
2002 unlink_list
.append(f
)
2003 for obj
in unlink_list
:
2005 if os
.path
.islink(obj
):
2010 showMessage("<<< !needed %s %s\n" % (obj_type
, obj
))
2012 if e
.errno
== errno
.ENOENT
:
2016 plib_registry
.pruneNonExisting()
2022 if retval
!= os
.EX_OK
:
2024 msg
= ("The '%s' " % ebuild_phase
) + \
2025 ("phase of the '%s' package " % self
.mycpv
) + \
2026 ("has failed with exit value %s." % retval
)
2027 from textwrap
import wrap
2028 msg_lines
.extend(wrap(msg
, 72))
2029 msg_lines
.append("")
2031 ebuild_name
= os
.path
.basename(myebuildpath
)
2032 ebuild_dir
= os
.path
.dirname(myebuildpath
)
2033 msg
= "The problem occurred while executing " + \
2034 ("the ebuild file named '%s' " % ebuild_name
) + \
2035 ("located in the '%s' directory. " \
2037 "If necessary, manually remove " + \
2038 "the environment.bz2 file and/or the " + \
2039 "ebuild file located in that directory."
2040 msg_lines
.extend(wrap(msg
, 72))
2041 msg_lines
.append("")
2043 msg
= "Removal " + \
2044 "of the environment.bz2 file is " + \
2045 "preferred since it may allow the " + \
2046 "removal phases to execute successfully. " + \
2047 "The ebuild will be " + \
2048 "sourced and the eclasses " + \
2049 "from the current portage tree will be used " + \
2050 "when necessary. Removal of " + \
2051 "the ebuild file will cause the " + \
2052 "pkg_prerm() and pkg_postrm() removal " + \
2053 "phases to be skipped entirely."
2054 msg_lines
.extend(wrap(msg
, 72))
2056 self
._eerror
(ebuild_phase
, msg_lines
)
2058 # process logs created during pre/postrm
2059 elog_process(self
.mycpv
, self
.settings
, phasefilter
=filter_unmergephases
)
2060 if retval
== os
.EX_OK
:
2061 doebuild(myebuildpath
, "cleanrm", self
.myroot
,
2062 self
.settings
, tree
="vartree",
2063 mydbapi
=self
.vartree
.dbapi
,
2064 vartree
=self
.vartree
)
2066 unlockdir(builddir_lock
)
2068 if myebuildpath
and not catdir_lock
:
2069 # Lock catdir for removal if empty.
2070 catdir_lock
= lockdir(catdir
)
2076 if e
.errno
not in (errno
.ENOENT
,
2077 errno
.ENOTEMPTY
, errno
.EEXIST
):
2080 unlockdir(catdir_lock
)
2082 if log_path
is not None and os
.path
.exists(log_path
):
2083 # Restore this since it gets lost somewhere above and it
2084 # needs to be set for _display_merge() to be able to log.
2085 # Note that the log isn't necessarily supposed to exist
2086 # since if PORT_LOGDIR is unset then it's a temp file
2087 # so it gets cleaned above.
2088 self
.settings
["PORTAGE_LOG_FILE"] = log_path
2090 self
.settings
.pop("PORTAGE_LOG_FILE", None)
2092 env_update(target_root
=self
.myroot
, prev_mtimes
=ldpath_mtimes
,
2093 contents
=contents
, env
=self
.settings
.environ(),
2094 writemsg_level
=self
._display
_merge
)
2097 def _display_merge(self
, msg
, level
=0, noiselevel
=0):
2098 if self
._scheduler
is not None:
2099 self
._scheduler
.dblinkDisplayMerge(self
, msg
,
2100 level
=level
, noiselevel
=noiselevel
)
2102 writemsg_level(msg
, level
=level
, noiselevel
=noiselevel
)
2104 def _unmerge_pkgfiles(self
, pkgfiles
, others_in_slot
):
2107 Unmerges the contents of a package from the liveFS
2108 Removes the VDB entry for self
2110 @param pkgfiles: typically self.getcontents()
2111 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
2112 @param others_in_slot: all dblink instances in this slot, excluding self
2113 @type others_in_slot: list
2117 showMessage
= self
._display
_merge
2118 scheduler
= self
._scheduler
2121 showMessage("No package files given... Grabbing a set.\n")
2122 pkgfiles
= self
.getcontents()
2124 if others_in_slot
is None:
2126 slot
= self
.vartree
.dbapi
.aux_get(self
.mycpv
, ["SLOT"])[0]
2127 slot_matches
= self
.vartree
.dbapi
.match(
2128 "%s:%s" % (dep_getkey(self
.mycpv
), slot
))
2129 for cur_cpv
in slot_matches
:
2130 if cur_cpv
== self
.mycpv
:
2132 others_in_slot
.append(dblink(self
.cat
, catsplit(cur_cpv
)[1],
2133 self
.vartree
.root
, self
.settings
,
2134 vartree
=self
.vartree
, treetype
="vartree"))
2136 dest_root
= normalize_path(self
.vartree
.root
).rstrip(os
.path
.sep
) + \
2138 dest_root_len
= len(dest_root
) - 1
2140 conf_mem_file
= os
.path
.join(dest_root
, CONFIG_MEMORY_FILE
)
2141 cfgfiledict
= grabdict(conf_mem_file
)
2144 unmerge_orphans
= "unmerge-orphans" in self
.settings
.features
2147 self
.updateprotect()
2148 mykeys
= pkgfiles
.keys()
2152 #process symlinks second-to-last, directories last.
2154 ignored_unlink_errnos
= (
2155 errno
.EBUSY
, errno
.ENOENT
,
2156 errno
.ENOTDIR
, errno
.EISDIR
)
2157 ignored_rmdir_errnos
= (
2158 errno
.EEXIST
, errno
.ENOTEMPTY
,
2159 errno
.EBUSY
, errno
.ENOENT
,
2160 errno
.ENOTDIR
, errno
.EISDIR
)
2161 modprotect
= os
.path
.join(self
.vartree
.root
, "lib/modules/")
2163 def unlink(file_name
, lstatobj
):
2165 if lstatobj
.st_flags
!= 0:
2166 bsd_chflags
.lchflags(file_name
, 0)
2167 parent_name
= os
.path
.dirname(file_name
)
2168 # Use normal stat/chflags for the parent since we want to
2169 # follow any symlinks to the real parent directory.
2170 pflags
= os
.stat(parent_name
).st_flags
2172 bsd_chflags
.chflags(parent_name
, 0)
2174 if not stat
.S_ISLNK(lstatobj
.st_mode
):
2175 # Remove permissions to ensure that any hardlinks to
2176 # suid/sgid files are rendered harmless.
2177 os
.chmod(file_name
, 0)
2178 os
.unlink(file_name
)
2180 if bsd_chflags
and pflags
!= 0:
2181 # Restore the parent flags we saved before unlinking
2182 bsd_chflags
.chflags(parent_name
, pflags
)
2184 def show_unmerge(zing
, desc
, file_type
, file_name
):
2185 showMessage("%s %s %s %s\n" % \
2186 (zing
, desc
.ljust(8), file_type
, file_name
))
2187 for i
, objkey
in enumerate(mykeys
):
2189 if scheduler
is not None and \
2190 0 == i
% self
._file
_merge
_yield
_interval
:
2191 scheduler
.scheduleYield()
2193 obj
= normalize_path(objkey
)
2194 file_data
= pkgfiles
[objkey
]
2195 file_type
= file_data
[0]
2198 statobj
= os
.stat(obj
)
2203 lstatobj
= os
.lstat(obj
)
2204 except (OSError, AttributeError):
2206 islink
= lstatobj
is not None and stat
.S_ISLNK(lstatobj
.st_mode
)
2207 if lstatobj
is None:
2208 show_unmerge("---", "!found", file_type
, obj
)
2210 if obj
.startswith(dest_root
):
2211 relative_path
= obj
[dest_root_len
:]
2213 for dblnk
in others_in_slot
:
2214 if dblnk
.isowner(relative_path
, dest_root
):
2218 # A new instance of this package claims the file, so
2220 show_unmerge("---", "replaced", file_type
, obj
)
2222 elif relative_path
in cfgfiledict
:
2223 stale_confmem
.append(relative_path
)
2224 # next line includes a tweak to protect modules from being unmerged,
2225 # but we don't protect modules from being overwritten if they are
2226 # upgraded. We effectively only want one half of the config protection
2227 # functionality for /lib/modules. For portage-ng both capabilities
2228 # should be able to be independently specified.
2229 if obj
.startswith(modprotect
):
2230 show_unmerge("---", "cfgpro", file_type
, obj
)
2233 # Don't unlink symlinks to directories here since that can
2234 # remove /lib and /usr/lib symlinks.
2235 if unmerge_orphans
and \
2236 lstatobj
and not stat
.S_ISDIR(lstatobj
.st_mode
) and \
2237 not (islink
and statobj
and stat
.S_ISDIR(statobj
.st_mode
)) and \
2238 not self
.isprotected(obj
):
2240 unlink(obj
, lstatobj
)
2241 except EnvironmentError, e
:
2242 if e
.errno
not in ignored_unlink_errnos
:
2245 show_unmerge("<<<", "", file_type
, obj
)
2248 lmtime
= str(lstatobj
[stat
.ST_MTIME
])
2249 if (pkgfiles
[objkey
][0] not in ("dir", "fif", "dev")) and (lmtime
!= pkgfiles
[objkey
][1]):
2250 show_unmerge("---", "!mtime", file_type
, obj
)
2253 if pkgfiles
[objkey
][0] == "dir":
2254 if statobj
is None or not stat
.S_ISDIR(statobj
.st_mode
):
2255 show_unmerge("---", "!dir", file_type
, obj
)
2258 elif pkgfiles
[objkey
][0] == "sym":
2260 show_unmerge("---", "!sym", file_type
, obj
)
2262 # Go ahead and unlink symlinks to directories here when
2263 # they're actually recorded as symlinks in the contents.
2264 # Normally, symlinks such as /lib -> lib64 are not recorded
2265 # as symlinks in the contents of a package. If a package
2266 # installs something into ${D}/lib/, it is recorded in the
2267 # contents as a directory even if it happens to correspond
2268 # to a symlink when it's merged to the live filesystem.
2270 unlink(obj
, lstatobj
)
2271 show_unmerge("<<<", "", file_type
, obj
)
2272 except (OSError, IOError),e
:
2273 if e
.errno
not in ignored_unlink_errnos
:
2276 show_unmerge("!!!", "", file_type
, obj
)
2277 elif pkgfiles
[objkey
][0] == "obj":
2278 if statobj
is None or not stat
.S_ISREG(statobj
.st_mode
):
2279 show_unmerge("---", "!obj", file_type
, obj
)
2283 mymd5
= perform_md5(obj
, calc_prelink
=1)
2284 except FileNotFound
, e
:
2285 # the file has disappeared between now and our stat call
2286 show_unmerge("---", "!obj", file_type
, obj
)
2289 # string.lower is needed because db entries used to be in upper-case. The
2290 # string.lower allows for backwards compatibility.
2291 if mymd5
!= pkgfiles
[objkey
][2].lower():
2292 show_unmerge("---", "!md5", file_type
, obj
)
2295 unlink(obj
, lstatobj
)
2296 except (OSError, IOError), e
:
2297 if e
.errno
not in ignored_unlink_errnos
:
2300 show_unmerge("<<<", "", file_type
, obj
)
2301 elif pkgfiles
[objkey
][0] == "fif":
2302 if not stat
.S_ISFIFO(lstatobj
[stat
.ST_MODE
]):
2303 show_unmerge("---", "!fif", file_type
, obj
)
2305 show_unmerge("---", "", file_type
, obj
)
2306 elif pkgfiles
[objkey
][0] == "dev":
2307 show_unmerge("---", "", file_type
, obj
)
2315 lstatobj
= os
.lstat(obj
)
2316 if lstatobj
.st_flags
!= 0:
2317 bsd_chflags
.lchflags(obj
, 0)
2318 parent_name
= os
.path
.dirname(obj
)
2319 # Use normal stat/chflags for the parent since we want to
2320 # follow any symlinks to the real parent directory.
2321 pflags
= os
.stat(parent_name
).st_flags
2323 bsd_chflags
.chflags(parent_name
, 0)
2327 if bsd_chflags
and pflags
!= 0:
2328 # Restore the parent flags we saved before unlinking
2329 bsd_chflags
.chflags(parent_name
, pflags
)
2330 show_unmerge("<<<", "", "dir", obj
)
2331 except EnvironmentError, e
:
2332 if e
.errno
not in ignored_rmdir_errnos
:
2334 if e
.errno
!= errno
.ENOENT
:
2335 show_unmerge("---", "!empty", "dir", obj
)
2338 # Remove stale entries from config memory.
2340 for filename
in stale_confmem
:
2341 del cfgfiledict
[filename
]
2342 writedict(cfgfiledict
, conf_mem_file
)
2344 #remove self from vartree database so that our own virtual gets zapped if we're the last node
2345 self
.vartree
.zap(self
.mycpv
)
2347 def isowner(self
, filename
, destroot
):
2349 Check if a file belongs to this package. This may
2350 result in a stat call for the parent directory of
2351 every installed file, since the inode numbers are
2352 used to work around the problem of ambiguous paths
2353 caused by symlinked directories. The results of
2354 stat calls are cached to optimize multiple calls
2363 1. True if this package owns the file.
2364 2. False if this package does not own the file.
2366 return bool(self
._match
_contents
(filename
, destroot
))
2368 def _match_contents(self
, filename
, destroot
):
2370 The matching contents entry is returned, which is useful
2371 since the path may differ from the one given by the caller,
2375 @return: the contents entry corresponding to the given path, or False
2376 if the file is not owned by this package.
2379 destfile
= normalize_path(
2380 os
.path
.join(destroot
, filename
.lstrip(os
.path
.sep
)))
2382 pkgfiles
= self
.getcontents()
2383 if pkgfiles
and destfile
in pkgfiles
:
2386 basename
= os
.path
.basename(destfile
)
2387 if self
._contents
_basenames
is None:
2388 self
._contents
_basenames
= set(
2389 os
.path
.basename(x
) for x
in pkgfiles
)
2390 if basename
not in self
._contents
_basenames
:
2391 # This is a shortcut that, in most cases, allows us to
2392 # eliminate this package as an owner without the need
2393 # to examine inode numbers of parent directories.
2396 # Use stat rather than lstat since we want to follow
2397 # any symlinks to the real parent directory.
2398 parent_path
= os
.path
.dirname(destfile
)
2400 parent_stat
= os
.stat(parent_path
)
2401 except EnvironmentError, e
:
2402 if e
.errno
!= errno
.ENOENT
:
2406 if self
._contents
_inodes
is None:
2407 self
._contents
_inodes
= {}
2408 parent_paths
= set()
2410 p_path
= os
.path
.dirname(x
)
2411 if p_path
in parent_paths
:
2413 parent_paths
.add(p_path
)
2419 inode_key
= (s
.st_dev
, s
.st_ino
)
2420 # Use lists of paths in case multiple
2421 # paths reference the same inode.
2422 p_path_list
= self
._contents
_inodes
.get(inode_key
)
2423 if p_path_list
is None:
2425 self
._contents
_inodes
[inode_key
] = p_path_list
2426 if p_path
not in p_path_list
:
2427 p_path_list
.append(p_path
)
2428 p_path_list
= self
._contents
_inodes
.get(
2429 (parent_stat
.st_dev
, parent_stat
.st_ino
))
2431 for p_path
in p_path_list
:
2432 x
= os
.path
.join(p_path
, basename
)
2438 def _preserve_libs(self
, srcroot
, destroot
, mycontents
, counter
, inforoot
):
2439 showMessage
= self
._display
_merge
2440 # read global reverse NEEDED map
2441 linkmap
= self
.vartree
.dbapi
.linkmap
2442 linkmap
.rebuild(include_file
=os
.path
.join(inforoot
, "NEEDED.ELF.2"))
2443 liblist
= linkmap
.listLibraryObjects()
2445 # get list of libraries from old package instance
2446 root_len
= len(self
.myroot
) - 1
2447 old_contents
= set(p
[root_len
:] \
2448 for p
in self
._installed
_instance
.getcontents())
2449 old_libs
= old_contents
.intersection(liblist
)
2451 # get list of libraries from new package instance
2452 mylibs
= set([os
.path
.join(os
.sep
, x
) for x
in mycontents
]).intersection(liblist
)
2454 # check which libs are present in the old, but not the new package instance
2455 candidates
= old_libs
.difference(mylibs
)
2457 for x
in old_contents
:
2458 if os
.path
.islink(x
) and os
.path
.realpath(x
) in candidates
and x
not in mycontents
:
2464 # ignore any libs that are only internally used by the package
2465 def has_external_consumers(lib
, contents
, otherlibs
):
2466 consumers
= consumer_cache
.get(lib
)
2467 if consumers
is None:
2468 consumers
= linkmap
.findConsumers(lib
)
2469 consumer_cache
[lib
] = consumers
2470 contents_without_libs
= [x
for x
in contents
if x
not in otherlibs
]
2472 # just used by objects that will be autocleaned
2473 if len(consumers
.difference(contents_without_libs
)) == 0:
2475 # used by objects that are referenced as well, need to check those
2476 # recursively to break any reference cycles
2477 elif len(consumers
.difference(contents
)) == 0:
2478 otherlibs
= set(otherlibs
)
2479 for ol
in otherlibs
.intersection(consumers
):
2480 if has_external_consumers(ol
, contents
, otherlibs
.difference([lib
])):
2483 # used by external objects directly
2487 for lib
in list(candidates
):
2488 if not has_external_consumers(lib
, old_contents
, candidates
):
2489 candidates
.remove(lib
)
2491 if linkmap
.isMasterLink(lib
):
2492 candidates
.remove(lib
)
2494 # only preserve the lib if there is no other copy to use for each consumer
2497 lib_consumers
= consumer_cache
.get(lib
)
2498 if lib_consumers
is None:
2499 lib_consumers
= linkmap
.findConsumers(lib
)
2500 consumer_cache
[lib
] = lib_consumers
2502 for c
in lib_consumers
:
2504 providers
= provider_cache
.get(c
)
2505 if providers
is None:
2506 providers
= linkmap
.findProviders(c
)
2507 provider_cache
[c
] = providers
2509 for soname
in providers
:
2510 if lib
in providers
[soname
]:
2511 for p
in providers
[soname
]:
2512 if p
not in candidates
or os
.path
.exists(os
.path
.join(srcroot
, p
.lstrip(os
.sep
))):
2519 candidates
.remove(lib
)
2522 del mylibs
, mycontents
, old_contents
, liblist
2524 # inject files that should be preserved into our image dir
2527 candidates_stack
= list(candidates
)
2528 while candidates_stack
:
2529 x
= candidates_stack
.pop()
2530 # skip existing files so the 'new' libs aren't overwritten
2531 if os
.path
.exists(os
.path
.join(srcroot
, x
.lstrip(os
.sep
))):
2533 showMessage("injecting %s into %s\n" % (x
, srcroot
),
2535 if not os
.path
.exists(os
.path
.join(destroot
, x
.lstrip(os
.sep
))):
2536 showMessage("%s does not exist so can't be preserved\n" % x
,
2539 mydir
= os
.path
.join(srcroot
, os
.path
.dirname(x
).lstrip(os
.sep
))
2540 if not os
.path
.exists(mydir
):
2543 # resolve symlinks and extend preserve list
2544 # NOTE: we're extending the list in the loop to emulate recursion to
2545 # also get indirect symlinks
2546 if os
.path
.islink(x
):
2547 linktarget
= os
.readlink(x
)
2548 os
.symlink(linktarget
, os
.path
.join(srcroot
, x
.lstrip(os
.sep
)))
2549 if linktarget
[0] != os
.sep
:
2550 linktarget
= os
.path
.join(os
.path
.dirname(x
), linktarget
)
2551 if linktarget
not in candidates
:
2552 candidates
.add(linktarget
)
2553 candidates_stack
.append(linktarget
)
2555 shutil
.copy2(os
.path
.join(destroot
, x
.lstrip(os
.sep
)),
2556 os
.path
.join(srcroot
, x
.lstrip(os
.sep
)))
2557 preserve_paths
.append(x
)
2561 # keep track of the libs we preserved
2562 self
.vartree
.dbapi
.plib_registry
.register(self
.mycpv
, self
.settings
["SLOT"], counter
, preserve_paths
)
2566 def _collision_protect(self
, srcroot
, destroot
, mypkglist
, mycontents
):
2567 collision_ignore
= set([normalize_path(myignore
) for myignore
in \
2568 shlex
.split(self
.settings
.get("COLLISION_IGNORE", ""))])
2570 showMessage
= self
._display
_merge
2571 scheduler
= self
._scheduler
2574 destroot
= normalize_path(destroot
).rstrip(os
.path
.sep
) + \
2576 showMessage("%s checking %d files for package collisions\n" % \
2577 (green("*"), len(mycontents
)))
2578 for i
, f
in enumerate(mycontents
):
2579 if i
% 1000 == 0 and i
!= 0:
2580 showMessage("%d files checked ...\n" % i
)
2582 if scheduler
is not None and \
2583 0 == i
% self
._file
_merge
_yield
_interval
:
2584 scheduler
.scheduleYield()
2586 dest_path
= normalize_path(
2587 os
.path
.join(destroot
, f
.lstrip(os
.path
.sep
)))
2589 dest_lstat
= os
.lstat(dest_path
)
2590 except EnvironmentError, e
:
2591 if e
.errno
== errno
.ENOENT
:
2594 elif e
.errno
== errno
.ENOTDIR
:
2596 # A non-directory is in a location where this package
2597 # expects to have a directory.
2599 parent_path
= dest_path
2600 while len(parent_path
) > len(destroot
):
2601 parent_path
= os
.path
.dirname(parent_path
)
2603 dest_lstat
= os
.lstat(parent_path
)
2605 except EnvironmentError, e
:
2606 if e
.errno
!= errno
.ENOTDIR
:
2610 raise AssertionError(
2611 "unable to find non-directory " + \
2612 "parent for '%s'" % dest_path
)
2613 dest_path
= parent_path
2614 f
= os
.path
.sep
+ dest_path
[len(destroot
):]
2622 for ver
in [self
] + mypkglist
:
2623 if (ver
.isowner(f
, destroot
) or ver
.isprotected(f
)):
2628 if collision_ignore
:
2629 if f
in collision_ignore
:
2632 for myignore
in collision_ignore
:
2633 if f
.startswith(myignore
+ os
.path
.sep
):
2637 collisions
.append(f
)
2640 def _security_check(self
, installed_instances
):
2641 if not installed_instances
:
2644 showMessage
= self
._display
_merge
2645 scheduler
= self
._scheduler
2648 for dblnk
in installed_instances
:
2649 file_paths
.update(dblnk
.getcontents())
2652 for i
, path
in enumerate(file_paths
):
2654 if scheduler
is not None and \
2655 0 == i
% self
._file
_merge
_yield
_interval
:
2656 scheduler
.scheduleYield()
2661 if e
.errno
not in (errno
.ENOENT
, errno
.ENOTDIR
):
2665 if not stat
.S_ISREG(s
.st_mode
):
2667 path
= os
.path
.realpath(path
)
2668 if path
in real_paths
:
2670 real_paths
.add(path
)
2671 if s
.st_nlink
> 1 and \
2672 s
.st_mode
& (stat
.S_ISUID | stat
.S_ISGID
):
2673 k
= (s
.st_dev
, s
.st_ino
)
2674 inode_map
.setdefault(k
, []).append((path
, s
))
2675 suspicious_hardlinks
= []
2676 for path_list
in inode_map
.itervalues():
2677 path
, s
= path_list
[0]
2678 if len(path_list
) == s
.st_nlink
:
2679 # All hardlinks seem to be owned by this package.
2681 suspicious_hardlinks
.append(path_list
)
2682 if not suspicious_hardlinks
:
2686 msg
.append("suid/sgid file(s) " + \
2687 "with suspicious hardlink(s):")
2689 for path_list
in suspicious_hardlinks
:
2690 for path
, s
in path_list
:
2691 msg
.append("\t%s" % path
)
2693 msg
.append("See the Gentoo Security Handbook " + \
2694 "guide for advice on how to proceed.")
2696 self
._eerror
("preinst", msg
)
2700 def _eerror(self
, phase
, lines
):
2701 from portage
.elog
.messages
import eerror
as _eerror
2702 if self
._scheduler
is None:
2704 _eerror(l
, phase
=phase
, key
=self
.settings
.mycpv
)
2706 self
._scheduler
.dblinkElog(self
,
2707 phase
, _eerror
, lines
)
2709 def treewalk(self
, srcroot
, destroot
, inforoot
, myebuild
, cleanup
=0,
2710 mydbapi
=None, prev_mtimes
=None):
2713 This function does the following:
2715 calls self._preserve_libs if FEATURES=preserve-libs
2716 calls self._collision_protect if FEATURES=collision-protect
2717 calls doebuild(mydo=pkg_preinst)
2718 Merges the package to the livefs
2719 unmerges old version (if required)
2720 calls doebuild(mydo=pkg_postinst)
2724 @param srcroot: Typically this is ${D}
2725 @type srcroot: String (Path)
2726 @param destroot: Path to merge to (usually ${ROOT})
2727 @type destroot: String (Path)
2728 @param inforoot: root of the vardb entry ?
2729 @type inforoot: String (Path)
2730 @param myebuild: path to the ebuild that we are processing
2731 @type myebuild: String (Path)
2732 @param mydbapi: dbapi which is handed to doebuild.
2733 @type mydbapi: portdbapi instance
2734 @param prev_mtimes: { Filename:mtime } mapping for env_update
2735 @type prev_mtimes: Dictionary
2741 secondhand is a list of symlinks that have been skipped due to their target
2742 not existing; we will merge these symlinks at a later time.
2745 showMessage
= self
._display
_merge
2746 scheduler
= self
._scheduler
2748 srcroot
= normalize_path(srcroot
).rstrip(os
.path
.sep
) + os
.path
.sep
2749 destroot
= normalize_path(destroot
).rstrip(os
.path
.sep
) + os
.path
.sep
2751 if not os
.path
.isdir(srcroot
):
2752 showMessage("!!! Directory Not Found: D='%s'\n" % srcroot
,
2753 level
=logging
.ERROR
, noiselevel
=-1)
2756 inforoot_slot_file
= os
.path
.join(inforoot
, "SLOT")
2759 f
= open(inforoot_slot_file
)
2761 slot
= f
.read().strip()
2764 except EnvironmentError, e
:
2765 if e
.errno
!= errno
.ENOENT
:
2773 self
._eerror
("preinst", lines
)
2775 if slot
!= self
.settings
["SLOT"]:
2776 showMessage("!!! WARNING: Expected SLOT='%s', got '%s'\n" % \
2777 (self
.settings
["SLOT"], slot
), level
=logging
.WARN
)
2779 if not os
.path
.exists(self
.dbcatdir
):
2780 os
.makedirs(self
.dbcatdir
)
2783 for v
in self
.vartree
.dbapi
.cp_list(self
.mysplit
[0]):
2784 otherversions
.append(v
.split("/")[1])
2786 # filter any old-style virtual matches
2787 slot_matches
= [cpv
for cpv
in self
.vartree
.dbapi
.match(
2788 "%s:%s" % (cpv_getkey(self
.mycpv
), slot
)) \
2789 if cpv_getkey(cpv
) == cpv_getkey(self
.mycpv
)]
2791 if self
.mycpv
not in slot_matches
and \
2792 self
.vartree
.dbapi
.cpv_exists(self
.mycpv
):
2793 # handle multislot or unapplied slotmove
2794 slot_matches
.append(self
.mycpv
)
2797 from portage
import config
2798 for cur_cpv
in slot_matches
:
2799 # Clone the config in case one of these has to be unmerged since
2800 # we need it to have private ${T} etc... for things like elog.
2801 others_in_slot
.append(dblink(self
.cat
, catsplit(cur_cpv
)[1],
2802 self
.vartree
.root
, config(clone
=self
.settings
),
2803 vartree
=self
.vartree
, treetype
="vartree",
2804 scheduler
=self
._scheduler
))
2806 retval
= self
._security
_check
(others_in_slot
)
2811 # Used by self.isprotected().
2814 for dblnk
in others_in_slot
:
2815 cur_counter
= self
.vartree
.dbapi
.cpv_counter(dblnk
.mycpv
)
2816 if cur_counter
> max_counter
:
2817 max_counter
= cur_counter
2819 self
._installed
_instance
= max_dblnk
2821 # get current counter value (counter_tick also takes care of incrementing it)
2822 # XXX Need to make this destroot, but it needs to be initialized first. XXX
2823 # XXX bis: leads to some invalidentry() call through cp_all().
2824 # Note: The counter is generated here but written later because preserve_libs
2825 # needs the counter value but has to be before dbtmpdir is made (which
2826 # has to be before the counter is written) - genone
2827 counter
= self
.vartree
.dbapi
.counter_tick(self
.myroot
, mycpv
=self
.mycpv
)
2829 # Save this for unregistering preserved-libs if the merge fails.
2830 self
.settings
["COUNTER"] = str(counter
)
2831 self
.settings
.backup_changes("COUNTER")
2837 for parent
, dirs
, files
in os
.walk(srcroot
, onerror
=onerror
):
2839 file_path
= os
.path
.join(parent
, f
)
2840 file_mode
= os
.lstat(file_path
).st_mode
2841 if stat
.S_ISREG(file_mode
):
2842 myfilelist
.append(file_path
[len(srcroot
):])
2843 elif stat
.S_ISLNK(file_mode
):
2844 # Note: os.walk puts symlinks to directories in the "dirs"
2845 # list and it does not traverse them since that could lead
2846 # to an infinite recursion loop.
2847 mylinklist
.append(file_path
[len(srcroot
):])
2849 # If there are no files to merge, and an installed package in the same
2850 # slot has files, it probably means that something went wrong.
2851 if self
.settings
.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
2852 not myfilelist
and not mylinklist
and others_in_slot
:
2853 installed_files
= None
2854 for other_dblink
in others_in_slot
:
2855 installed_files
= other_dblink
.getcontents()
2856 if not installed_files
:
2858 from textwrap
import wrap
2865 msg
.extend(wrap(("The '%s' package will not install " + \
2866 "any files, but the currently installed '%s'" + \
2867 " package has the following files: ") % d
, wrap_width
))
2869 msg
.extend(sorted(installed_files
))
2871 msg
.append("package %s NOT merged" % self
.mycpv
)
2874 ("Manually run `emerge --unmerge =%s` " % \
2875 other_dblink
.mycpv
) + "if you really want to " + \
2876 "remove the above files. Set " + \
2877 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in " + \
2878 "/etc/make.conf if you do not want to " + \
2879 "abort in cases like this.",
2885 # Preserve old libs if they are still in use
2886 if slot_matches
and "preserve-libs" in self
.settings
.features
:
2887 self
._preserve
_libs
(srcroot
, destroot
, myfilelist
+mylinklist
, counter
, inforoot
)
2889 # check for package collisions
2891 if self
._blockers
is not None:
2892 # This is only supposed to be called when
2893 # the vdb is locked, like it is here.
2894 blockers
= self
._blockers
()
2895 if blockers
is None:
2897 collisions
= self
._collision
_protect
(srcroot
, destroot
,
2898 others_in_slot
+ blockers
, myfilelist
+ mylinklist
)
2900 # Make sure the ebuild environment is initialized and that ${T}/elog
2901 # exists for logging of collision-protect eerror messages.
2902 if myebuild
is None:
2903 myebuild
= os
.path
.join(inforoot
, self
.pkg
+ ".ebuild")
2904 doebuild_environment(myebuild
, "preinst", destroot
,
2905 self
.settings
, 0, 0, mydbapi
)
2906 prepare_build_dirs(destroot
, self
.settings
, cleanup
)
2909 collision_protect
= "collision-protect" in self
.settings
.features
2910 msg
= "This package will overwrite one or more files that" + \
2911 " may belong to other packages (see list below)."
2912 if not collision_protect
:
2913 msg
+= " Add \"collision-protect\" to FEATURES in" + \
2914 " make.conf if you would like the merge to abort" + \
2915 " in cases like this."
2916 if self
.settings
.get("PORTAGE_QUIET") != "1":
2917 msg
+= " You can use a command such as" + \
2918 " `portageq owners / <filename>` to identify the" + \
2919 " installed package that owns a file. If portageq" + \
2920 " reports that only one package owns a file then do NOT" + \
2921 " file a bug report. A bug report is only useful if it" + \
2922 " identifies at least two or more packages that are known" + \
2923 " to install the same file(s)." + \
2924 " If a collision occurs and you" + \
2925 " can not explain where the file came from then you" + \
2926 " should simply ignore the collision since there is not" + \
2927 " enough information to determine if a real problem" + \
2928 " exists. Please do NOT file a bug report at" + \
2929 " http://bugs.gentoo.org unless you report exactly which" + \
2930 " two packages install the same file(s). Once again," + \
2931 " please do NOT file a bug report unless you have" + \
2932 " completely understood the above message."
2934 self
.settings
["EBUILD_PHASE"] = "preinst"
2935 from textwrap
import wrap
2937 if collision_protect
:
2939 msg
.append("package %s NOT merged" % self
.settings
.mycpv
)
2941 msg
.append("Detected file collision(s):")
2944 for f
in collisions
:
2945 msg
.append("\t%s" % \
2946 os
.path
.join(destroot
, f
.lstrip(os
.path
.sep
)))
2952 msg
.append("Searching all installed" + \
2953 " packages for file collisions...")
2955 msg
.append("Press Ctrl-C to Stop")
2959 owners
= self
.vartree
.dbapi
._owners
.get_owners(collisions
)
2960 self
.vartree
.dbapi
.flush_cache()
2962 for pkg
, owned_files
in owners
.iteritems():
2965 msg
.append("%s" % cpv
)
2966 for f
in sorted(owned_files
):
2967 msg
.append("\t%s" % os
.path
.join(destroot
,
2968 f
.lstrip(os
.path
.sep
)))
2973 eerror(["None of the installed" + \
2974 " packages claim the file(s).", ""])
2976 # The explanation about the collision and how to solve
2977 # it may not be visible via a scrollback buffer, especially
2978 # if the number of file collisions is large. Therefore,
2979 # show a summary at the end.
2980 if collision_protect
:
2981 msg
= "Package '%s' NOT merged due to file collisions." % \
2984 msg
= "Package '%s' merged despite file collisions." % \
2986 msg
+= " If necessary, refer to your elog " + \
2987 "messages for the whole content of the above message."
2988 eerror(wrap(msg
, 70))
2990 if collision_protect
:
2993 # The merge process may move files out of the image directory,
2994 # which causes invalidation of the .installed flag.
2996 os
.unlink(os
.path
.join(
2997 os
.path
.dirname(normalize_path(srcroot
)), ".installed"))
2999 if e
.errno
!= errno
.ENOENT
:
3003 self
.dbdir
= self
.dbtmpdir
3005 ensure_dirs(self
.dbtmpdir
)
3007 # run preinst script
3008 if scheduler
is None:
3009 showMessage(">>> Merging %s to %s\n" % (self
.mycpv
, destroot
))
3010 a
= doebuild(myebuild
, "preinst", destroot
, self
.settings
,
3011 use_cache
=0, tree
=self
.treetype
, mydbapi
=mydbapi
,
3012 vartree
=self
.vartree
)
3014 a
= scheduler
.dblinkEbuildPhase(
3015 self
, mydbapi
, myebuild
, "preinst")
3017 # XXX: Decide how to handle failures here.
3019 showMessage("!!! FAILED preinst: "+str(a
)+"\n",
3020 level
=logging
.ERROR
, noiselevel
=-1)
3023 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
3024 for x
in os
.listdir(inforoot
):
3025 self
.copyfile(inforoot
+"/"+x
)
3027 # write local package counter for recording
3028 lcfile
= open(os
.path
.join(self
.dbtmpdir
, "COUNTER"),"w")
3029 lcfile
.write(str(counter
))
3032 # open CONTENTS file (possibly overwriting old one) for recording
3033 outfile
= open(os
.path
.join(self
.dbtmpdir
, "CONTENTS"),"w")
3035 self
.updateprotect()
3037 #if we have a file containing previously-merged config file md5sums, grab it.
3038 conf_mem_file
= os
.path
.join(destroot
, CONFIG_MEMORY_FILE
)
3039 cfgfiledict
= grabdict(conf_mem_file
)
3040 if "NOCONFMEM" in self
.settings
:
3041 cfgfiledict
["IGNORE"]=1
3043 cfgfiledict
["IGNORE"]=0
3045 # Always behave like --noconfmem is enabled for downgrades
3046 # so that people who don't know about this option are less
3047 # likely to get confused when doing upgrade/downgrade cycles.
3048 pv_split
= catpkgsplit(self
.mycpv
)[1:]
3049 for other
in others_in_slot
:
3050 if pkgcmp(pv_split
, catpkgsplit(other
.mycpv
)[1:]) < 0:
3051 cfgfiledict
["IGNORE"] = 1
3054 # Don't bump mtimes on merge since some application require
3055 # preservation of timestamps. This means that the unmerge phase must
3056 # check to see if file belongs to an installed instance in the same
3060 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
3061 prevmask
= os
.umask(0)
3064 # we do a first merge; this will recurse through all files in our srcroot but also build up a
3065 # "second hand" of symlinks to merge later
3066 if self
.mergeme(srcroot
, destroot
, outfile
, secondhand
, "", cfgfiledict
, mymtime
):
3069 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
3070 # broken symlinks. We'll merge them too.
3072 while len(secondhand
) and len(secondhand
)!=lastlen
:
3073 # clear the thirdhand. Anything from our second hand that
3074 # couldn't get merged will be added to thirdhand.
3077 self
.mergeme(srcroot
, destroot
, outfile
, thirdhand
, secondhand
, cfgfiledict
, mymtime
)
3080 lastlen
= len(secondhand
)
3082 # our thirdhand now becomes our secondhand. It's ok to throw
3083 # away secondhand since thirdhand contains all the stuff that
3084 # couldn't be merged.
3085 secondhand
= thirdhand
3088 # force merge of remaining symlinks (broken or circular; oh well)
3089 self
.mergeme(srcroot
, destroot
, outfile
, None, secondhand
, cfgfiledict
, mymtime
)
3094 #if we opened it, close it
3098 # write out our collection of md5sums
3099 cfgfiledict
.pop("IGNORE", None)
3100 ensure_dirs(os
.path
.dirname(conf_mem_file
),
3101 gid
=portage_gid
, mode
=02750, mask
=02)
3102 writedict(cfgfiledict
, conf_mem_file
)
3104 # These caches are populated during collision-protect and the data
3105 # they contain is now invalid. It's very important to invalidate
3106 # the contents_inodes cache so that FEATURES=unmerge-orphans
3107 # doesn't unmerge anything that belongs to this package that has
3109 others_in_slot
.append(self
) # self has just been merged
3110 for dblnk
in others_in_slot
:
3111 dblnk
.contentscache
= None
3112 dblnk
._contents
_inodes
= None
3113 dblnk
._contents
_basenames
= None
3115 # If portage is reinstalling itself, remove the old
3116 # version now since we want to use the temporary
3117 # PORTAGE_BIN_PATH that will be removed when we return.
3118 reinstall_self
= False
3119 if self
.myroot
== "/" and \
3120 "sys-apps" == self
.cat
and \
3121 "portage" == pkgsplit(self
.pkg
)[0]:
3122 reinstall_self
= True
3124 autoclean
= self
.settings
.get("AUTOCLEAN", "yes") == "yes"
3125 for dblnk
in list(others_in_slot
):
3128 if not (autoclean
or dblnk
.mycpv
== self
.mycpv
or reinstall_self
):
3130 showMessage(">>> Safely unmerging already-installed instance...\n")
3131 others_in_slot
.remove(dblnk
) # dblnk will unmerge itself now
3132 dblnk
.unmerge(trimworld
=0, ldpath_mtimes
=prev_mtimes
,
3133 others_in_slot
=others_in_slot
)
3134 # TODO: Check status and abort if necessary.
3136 showMessage(">>> Original instance of package unmerged safely.\n")
3138 if len(others_in_slot
) > 1:
3139 from portage
.output
import colorize
3140 showMessage(colorize("WARN", "WARNING:")
3141 + " AUTOCLEAN is disabled. This can cause serious"
3142 + " problems due to overlapping packages.\n",
3143 level
=logging
.WARN
, noiselevel
=-1)
3145 # We hold both directory locks.
3146 self
.dbdir
= self
.dbpkgdir
3148 _movefile(self
.dbtmpdir
, self
.dbpkgdir
, mysettings
=self
.settings
)
3150 # Check for file collisions with blocking packages
3151 # and remove any colliding files from their CONTENTS
3152 # since they now belong to this package.
3153 self
._clear
_contents
_cache
()
3154 contents
= self
.getcontents()
3155 destroot_len
= len(destroot
) - 1
3156 for blocker
in blockers
:
3157 self
.vartree
.dbapi
.removeFromContents(blocker
, iter(contents
),
3158 relative_paths
=False)
3160 self
.vartree
.dbapi
._add
(self
)
3161 contents
= self
.getcontents()
3163 # regenerate reverse NEEDED map
3164 self
.vartree
.dbapi
.linkmap
.rebuild()
3167 self
.settings
["PORTAGE_UPDATE_ENV"] = \
3168 os
.path
.join(self
.dbpkgdir
, "environment.bz2")
3169 self
.settings
.backup_changes("PORTAGE_UPDATE_ENV")
3171 if scheduler
is None:
3172 a
= doebuild(myebuild
, "postinst", destroot
, self
.settings
,
3173 use_cache
=0, tree
=self
.treetype
, mydbapi
=mydbapi
,
3174 vartree
=self
.vartree
)
3176 showMessage(">>> %s %s\n" % (self
.mycpv
, "merged."))
3178 a
= scheduler
.dblinkEbuildPhase(
3179 self
, mydbapi
, myebuild
, "postinst")
3181 self
.settings
.pop("PORTAGE_UPDATE_ENV", None)
3183 # XXX: Decide how to handle failures here.
3185 showMessage("!!! FAILED postinst: "+str(a
)+"\n",
3186 level
=logging
.ERROR
, noiselevel
=-1)
3190 for v
in otherversions
:
3191 if pkgcmp(catpkgsplit(self
.pkg
)[1:], catpkgsplit(v
)[1:]) < 0:
3194 #update environment settings, library paths. DO NOT change symlinks.
3195 env_update(makelinks
=(not downgrade
),
3196 target_root
=self
.settings
["ROOT"], prev_mtimes
=prev_mtimes
,
3197 contents
=contents
, env
=self
.settings
.environ(),
3198 writemsg_level
=self
._display
_merge
)
3202 def mergeme(self
, srcroot
, destroot
, outfile
, secondhand
, stufftomerge
, cfgfiledict
, thismtime
):
3205 This function handles actual merging of the package contents to the livefs.
3206 It also handles config protection.
3208 @param srcroot: Where are we copying files from (usually ${D})
3209 @type srcroot: String (Path)
3210 @param destroot: Typically ${ROOT}
3211 @type destroot: String (Path)
3212 @param outfile: File to log operations to
3213 @type outfile: File Object
3214 @param secondhand: A set of items to merge in pass two (usually
3215 or symlinks that point to non-existing files that may get merged later)
3216 @type secondhand: List
3217 @param stufftomerge: Either a diretory to merge, or a list of items.
3218 @type stufftomerge: String or List
3219 @param cfgfiledict: { File:mtime } mapping for config_protected files
3220 @type cfgfiledict: Dictionary
3221 @param thismtime: The current time (typically long(time.time())
3222 @type thismtime: Long
3223 @rtype: None or Boolean
3230 showMessage
= self
._display
_merge
3231 scheduler
= self
._scheduler
3233 from os
.path
import sep
, join
3234 srcroot
= normalize_path(srcroot
).rstrip(sep
) + sep
3235 destroot
= normalize_path(destroot
).rstrip(sep
) + sep
3237 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
3238 if isinstance(stufftomerge
, basestring
):
3239 #A directory is specified. Figure out protection paths, listdir() it and process it.
3240 mergelist
= os
.listdir(join(srcroot
, stufftomerge
))
3241 offset
= stufftomerge
3243 mergelist
= stufftomerge
3246 for i
, x
in enumerate(mergelist
):
3248 if scheduler
is not None and \
3249 0 == i
% self
._file
_merge
_yield
_interval
:
3250 scheduler
.scheduleYield()
3252 mysrc
= join(srcroot
, offset
, x
)
3253 mydest
= join(destroot
, offset
, x
)
3254 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
3255 myrealdest
= join(sep
, offset
, x
)
3256 # stat file once, test using S_* macros many times (faster that way)
3258 mystat
= os
.lstat(mysrc
)
3261 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
3262 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
3263 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
3264 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
3265 writemsg(red("!!! File: ")+str(mysrc
)+"\n", noiselevel
=-1)
3266 writemsg(red("!!! Error: ")+str(e
)+"\n", noiselevel
=-1)
3268 except Exception, e
:
3270 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
3271 writemsg(red("!!! A stat call returned the following error for the following file:"))
3272 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
3273 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
3274 writemsg( "!!! File: "+str(mysrc
)+"\n", noiselevel
=-1)
3275 writemsg( "!!! Error: "+str(e
)+"\n", noiselevel
=-1)
3279 mymode
= mystat
[stat
.ST_MODE
]
3280 # handy variables; mydest is the target object on the live filesystems;
3281 # mysrc is the source object in the temporary install dir
3283 mydstat
= os
.lstat(mydest
)
3284 mydmode
= mydstat
.st_mode
3286 if e
.errno
!= errno
.ENOENT
:
3289 #dest file doesn't exist
3293 if stat
.S_ISLNK(mymode
):
3294 # we are merging a symbolic link
3295 myabsto
= abssymlink(mysrc
)
3296 if myabsto
.startswith(srcroot
):
3297 myabsto
= myabsto
[len(srcroot
):]
3298 myabsto
= myabsto
.lstrip(sep
)
3299 myto
= os
.readlink(mysrc
)
3300 if self
.settings
and self
.settings
["D"]:
3301 if myto
.startswith(self
.settings
["D"]):
3302 myto
= myto
[len(self
.settings
["D"]):]
3303 # myrealto contains the path of the real file to which this symlink points.
3304 # we can simply test for existence of this file to see if the target has been merged yet
3305 myrealto
= normalize_path(os
.path
.join(destroot
, myabsto
))
3308 if not stat
.S_ISLNK(mydmode
):
3309 if stat
.S_ISDIR(mydmode
):
3310 # directory in the way: we can't merge a symlink over a directory
3311 # we won't merge this, continue with next file...
3314 if os
.path
.exists(mysrc
) and stat
.S_ISDIR(os
.stat(mysrc
)[stat
.ST_MODE
]):
3315 # Kill file blocking installation of symlink to dir #71787
3317 elif self
.isprotected(mydest
):
3318 # Use md5 of the target in ${D} if it exists...
3320 newmd5
= perform_md5(join(srcroot
, myabsto
))
3321 except FileNotFound
:
3322 # Maybe the target is merged already.
3324 newmd5
= perform_md5(myrealto
)
3325 except FileNotFound
:
3327 mydest
= new_protect_filename(mydest
, newmd5
=newmd5
)
3329 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
3330 if (secondhand
!= None) and (not os
.path
.exists(myrealto
)):
3331 # either the target directory doesn't exist yet or the target file doesn't exist -- or
3332 # the target is a broken symlink. We will add this file to our "second hand" and merge
3334 secondhand
.append(mysrc
[len(srcroot
):])
3336 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
3337 mymtime
= movefile(mysrc
, mydest
, newmtime
=thismtime
, sstat
=mystat
, mysettings
=self
.settings
)
3339 showMessage(">>> %s -> %s\n" % (mydest
, myto
))
3340 outfile
.write("sym "+myrealdest
+" -> "+myto
+" "+str(mymtime
)+"\n")
3342 print "!!! Failed to move file."
3343 print "!!!", mydest
, "->", myto
3345 elif stat
.S_ISDIR(mymode
):
3346 # we are merging a directory
3348 # destination exists
3351 # Save then clear flags on dest.
3352 dflags
= mydstat
.st_flags
3354 bsd_chflags
.lchflags(mydest
, 0)
3356 if not os
.access(mydest
, os
.W_OK
):
3357 pkgstuff
= pkgsplit(self
.pkg
)
3358 writemsg("\n!!! Cannot write to '"+mydest
+"'.\n", noiselevel
=-1)
3359 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
3360 writemsg("!!! You may start the merge process again by using ebuild:\n")
3361 writemsg("!!! ebuild "+self
.settings
["PORTDIR"]+"/"+self
.cat
+"/"+pkgstuff
[0]+"/"+self
.pkg
+".ebuild merge\n")
3362 writemsg("!!! And finish by running this: env-update\n\n")
3365 if stat
.S_ISLNK(mydmode
) or stat
.S_ISDIR(mydmode
):
3366 # a symlink to an existing directory will work for us; keep it:
3367 showMessage("--- %s/\n" % mydest
)
3369 bsd_chflags
.lchflags(mydest
, dflags
)
3371 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
3372 if movefile(mydest
, mydest
+".backup", mysettings
=self
.settings
) is None:
3374 print "bak", mydest
, mydest
+".backup"
3375 #now create our directory
3376 if self
.settings
.selinux_enabled():
3378 sid
= selinux
.get_sid(mysrc
)
3379 selinux
.secure_mkdir(mydest
,sid
)
3383 bsd_chflags
.lchflags(mydest
, dflags
)
3384 os
.chmod(mydest
, mystat
[0])
3385 os
.chown(mydest
, mystat
[4], mystat
[5])
3386 showMessage(">>> %s/\n" % mydest
)
3388 #destination doesn't exist
3389 if self
.settings
.selinux_enabled():
3391 sid
= selinux
.get_sid(mysrc
)
3392 selinux
.secure_mkdir(mydest
, sid
)
3395 os
.chmod(mydest
, mystat
[0])
3396 os
.chown(mydest
, mystat
[4], mystat
[5])
3397 showMessage(">>> %s/\n" % mydest
)
3398 outfile
.write("dir "+myrealdest
+"\n")
3399 # recurse and merge this directory
3400 if self
.mergeme(srcroot
, destroot
, outfile
, secondhand
,
3401 join(offset
, x
), cfgfiledict
, thismtime
):
3403 elif stat
.S_ISREG(mymode
):
3404 # we are merging a regular file
3405 mymd5
= perform_md5(mysrc
, calc_prelink
=1)
3406 # calculate config file protection stuff
3407 mydestdir
= os
.path
.dirname(mydest
)
3412 # destination file exists
3413 if stat
.S_ISDIR(mydmode
):
3414 # install of destination is blocked by an existing directory with the same name
3416 showMessage("!!! %s\n" % mydest
,
3417 level
=logging
.ERROR
, noiselevel
=-1)
3418 elif stat
.S_ISREG(mydmode
) or (stat
.S_ISLNK(mydmode
) and os
.path
.exists(mydest
) and stat
.S_ISREG(os
.stat(mydest
)[stat
.ST_MODE
])):
3420 # install of destination is blocked by an existing regular file,
3421 # or by a symlink to an existing regular file;
3422 # now, config file management may come into play.
3423 # we only need to tweak mydest if cfg file management is in play.
3424 if self
.isprotected(mydest
):
3425 # we have a protection path; enable config file management.
3426 destmd5
= perform_md5(mydest
, calc_prelink
=1)
3427 if mymd5
== destmd5
:
3428 #file already in place; simply update mtimes of destination
3431 if mymd5
== cfgfiledict
.get(myrealdest
, [None])[0]:
3432 """ An identical update has previously been
3433 merged. Skip it unless the user has chosen
3435 moveme
= cfgfiledict
["IGNORE"]
3436 cfgprot
= cfgfiledict
["IGNORE"]
3439 mymtime
= long(mystat
.st_mtime
)
3444 # Merging a new file, so update confmem.
3445 cfgfiledict
[myrealdest
] = [mymd5
]
3446 elif destmd5
== cfgfiledict
.get(myrealdest
, [None])[0]:
3447 """A previously remembered update has been
3448 accepted, so it is removed from confmem."""
3449 del cfgfiledict
[myrealdest
]
3451 mydest
= new_protect_filename(mydest
, newmd5
=mymd5
)
3453 # whether config protection or not, we merge the new file the
3454 # same way. Unless moveme=0 (blocking directory)
3456 mymtime
= movefile(mysrc
, mydest
, newmtime
=thismtime
, sstat
=mystat
, mysettings
=self
.settings
)
3462 outfile
.write("obj "+myrealdest
+" "+mymd5
+" "+str(mymtime
)+"\n")
3463 showMessage("%s %s\n" % (zing
,mydest
))
3465 # we are merging a fifo or device node
3468 # destination doesn't exist
3469 if movefile(mysrc
, mydest
, newmtime
=thismtime
, sstat
=mystat
, mysettings
=self
.settings
) != None:
3473 if stat
.S_ISFIFO(mymode
):
3474 outfile
.write("fif %s\n" % myrealdest
)
3476 outfile
.write("dev %s\n" % myrealdest
)
3477 showMessage(zing
+ " " + mydest
+ "\n")
3479 def merge(self
, mergeroot
, inforoot
, myroot
, myebuild
=None, cleanup
=0,
3480 mydbapi
=None, prev_mtimes
=None):
3482 If portage is reinstalling itself, create temporary
3483 copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
3484 to avoid relying on the new versions which may be
3485 incompatible. Register an atexit hook to clean up the
3486 temporary directories. Pre-load elog modules here since
3487 we won't be able to later if they get unmerged (happens
3488 when namespace changes).
3490 if self
.vartree
.dbapi
._categories
is not None:
3491 self
.vartree
.dbapi
._categories
= None
3492 if self
.myroot
== "/" and \
3493 "sys-apps" == self
.cat
and \
3494 "portage" == pkgsplit(self
.pkg
)[0]:
3495 settings
= self
.settings
3496 base_path_orig
= os
.path
.dirname(settings
["PORTAGE_BIN_PATH"])
3497 from tempfile
import mkdtemp
3499 # Make the temp directory inside PORTAGE_TMPDIR since, unlike
3500 # /tmp, it can't be mounted with the "noexec" option.
3501 base_path_tmp
= mkdtemp("", "._portage_reinstall_.",
3502 settings
["PORTAGE_TMPDIR"])
3503 from portage
.process
import atexit_register
3504 atexit_register(shutil
.rmtree
, base_path_tmp
)
3506 for subdir
in "bin", "pym":
3507 var_name
= "PORTAGE_%s_PATH" % subdir
.upper()
3508 var_orig
= settings
[var_name
]
3509 var_new
= os
.path
.join(base_path_tmp
, subdir
)
3510 settings
[var_name
] = var_new
3511 settings
.backup_changes(var_name
)
3512 shutil
.copytree(var_orig
, var_new
, symlinks
=True)
3513 os
.chmod(var_new
, dir_perms
)
3514 os
.chmod(base_path_tmp
, dir_perms
)
3515 # This serves so pre-load the modules.
3516 elog_process(self
.mycpv
, self
.settings
,
3517 phasefilter
=filter_mergephases
)
3519 return self
._merge
(mergeroot
, inforoot
,
3520 myroot
, myebuild
=myebuild
, cleanup
=cleanup
,
3521 mydbapi
=mydbapi
, prev_mtimes
=prev_mtimes
)
3523 def _merge(self
, mergeroot
, inforoot
, myroot
, myebuild
=None, cleanup
=0,
3524 mydbapi
=None, prev_mtimes
=None):
3528 retval
= self
.treewalk(mergeroot
, myroot
, inforoot
, myebuild
,
3529 cleanup
=cleanup
, mydbapi
=mydbapi
, prev_mtimes
=prev_mtimes
)
3530 # undo registrations of preserved libraries, bug #210501
3531 if retval
!= os
.EX_OK
:
3532 self
.vartree
.dbapi
.plib_registry
.unregister(self
.mycpv
, self
.settings
["SLOT"], self
.settings
["COUNTER"])
3533 # Process ebuild logfiles
3534 elog_process(self
.mycpv
, self
.settings
, phasefilter
=filter_mergephases
)
3535 if retval
== os
.EX_OK
and "noclean" not in self
.settings
.features
:
3536 if myebuild
is None:
3537 myebuild
= os
.path
.join(inforoot
, self
.pkg
+ ".ebuild")
3538 doebuild(myebuild
, "clean", myroot
, self
.settings
,
3539 tree
=self
.treetype
, mydbapi
=mydbapi
, vartree
=self
.vartree
)
3544 def getstring(self
,name
):
3545 "returns contents of a file with whitespace converted to spaces"
3546 if not os
.path
.exists(self
.dbdir
+"/"+name
):
3548 myfile
= open(self
.dbdir
+"/"+name
,"r")
3549 mydata
= myfile
.read().split()
3551 return " ".join(mydata
)
3553 def copyfile(self
,fname
):
3555 shutil
.copyfile(fname
,self
.dbdir
+"/"+os
.path
.basename(fname
))
3557 def getfile(self
,fname
):
3558 if not os
.path
.exists(self
.dbdir
+"/"+fname
):
3560 myfile
= open(self
.dbdir
+"/"+fname
,"r")
3561 mydata
= myfile
.read()
3565 def setfile(self
,fname
,data
):
3566 write_atomic(os
.path
.join(self
.dbdir
, fname
), data
)
3568 def getelements(self
,ename
):
3569 if not os
.path
.exists(self
.dbdir
+"/"+ename
):
3571 myelement
= open(self
.dbdir
+"/"+ename
,"r")
3572 mylines
= myelement
.readlines()
3575 for y
in x
[:-1].split():
3580 def setelements(self
,mylist
,ename
):
3581 myelement
= open(self
.dbdir
+"/"+ename
,"w")
3583 myelement
.write(x
+"\n")
3586 def isregular(self
):
3587 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
3588 return os
.path
.exists(os
.path
.join(self
.dbdir
, "CATEGORY"))
3590 def write_contents(contents
, root
, f
):
3592 Write contents to any file like object. The file will be left open.
3594 root_len
= len(root
) - 1
3595 for filename
in sorted(contents
):
3596 entry_data
= contents
[filename
]
3597 entry_type
= entry_data
[0]
3598 relative_filename
= filename
[root_len
:]
3599 if entry_type
== "obj":
3600 entry_type
, mtime
, md5sum
= entry_data
3601 line
= "%s %s %s %s\n" % \
3602 (entry_type
, relative_filename
, md5sum
, mtime
)
3603 elif entry_type
== "sym":
3604 entry_type
, mtime
, link
= entry_data
3605 line
= "%s %s -> %s %s\n" % \
3606 (entry_type
, relative_filename
, link
, mtime
)
3607 else: # dir, dev, fif
3608 line
= "%s %s\n" % (entry_type
, relative_filename
)
3611 def tar_contents(contents
, root
, tar
, protect
=None, onProgress
=None):
3612 from portage
.util
import normalize_path
3614 root
= normalize_path(root
).rstrip(os
.path
.sep
) + os
.path
.sep
3616 maxval
= len(contents
)
3619 onProgress(maxval
, 0)
3620 paths
= contents
.keys()
3625 lst
= os
.lstat(path
)
3627 if e
.errno
!= errno
.ENOENT
:
3631 onProgress(maxval
, curval
)
3633 contents_type
= contents
[path
][0]
3634 if path
.startswith(root
):
3635 arcname
= path
[len(root
):]
3637 raise ValueError("invalid root argument: '%s'" % root
)
3639 if 'dir' == contents_type
and \
3640 not stat
.S_ISDIR(lst
.st_mode
) and \
3641 os
.path
.isdir(live_path
):
3642 # Even though this was a directory in the original ${D}, it exists
3643 # as a symlink to a directory in the live filesystem. It must be
3644 # recorded as a real directory in the tar file to ensure that tar
3645 # can properly extract it's children.
3646 live_path
= os
.path
.realpath(live_path
)
3647 tarinfo
= tar
.gettarinfo(live_path
, arcname
)
3648 # store numbers instead of real names like tar's --numeric-owner
3649 tarinfo
.uname
= id_strings
.setdefault(tarinfo
.uid
, str(tarinfo
.uid
))
3650 tarinfo
.gname
= id_strings
.setdefault(tarinfo
.gid
, str(tarinfo
.gid
))
3652 if stat
.S_ISREG(lst
.st_mode
):
3653 # break hardlinks due to bug #185305
3654 tarinfo
.type = tarfile
.REGTYPE
3655 if protect
and protect(path
):
3656 # Create an empty file as a place holder in order to avoid
3657 # potential collision-protect issues.
3659 tar
.addfile(tarinfo
)
3663 tar
.addfile(tarinfo
, f
)
3667 tar
.addfile(tarinfo
)
3669 onProgress(maxval
, curval
)