minor
[revdep-rebuild-reimplementation.git] / pym / portage / dbapi / vartree.py
blob85e10bb8acc9d795e9334898f7bd3116c6b45cfa
1 # Copyright 1998-2007 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
3 # $Id: vartree.py 10696 2008-06-17 18:13:56Z genone $
5 from portage.checksum import perform_md5
6 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, PORTAGE_BIN_PATH, \
7 PRIVATE_PATH, VDB_PATH
8 from portage.data import portage_gid, portage_uid, secpass
9 from portage.dbapi import dbapi
10 from portage.dep import dep_getslot, use_reduce, paren_reduce, isvalidatom, \
11 isjustname, dep_getkey, match_from_list
12 from portage.exception import InvalidAtom, InvalidData, InvalidPackageName, \
13 FileNotFound, PermissionDenied, UnsupportedAPIException
14 from portage.locks import lockdir, unlockdir
15 from portage.output import bold, red, green
16 from portage.update import fixdbentries
17 from portage.util import apply_secpass_permissions, ConfigProtect, ensure_dirs, \
18 writemsg, writemsg_stdout, write_atomic, atomic_ofstream, writedict, \
19 grabfile, grabdict, normalize_path, new_protect_filename, getlibpaths
20 from portage.versions import pkgsplit, catpkgsplit, catsplit, best, pkgcmp
22 from portage import listdir, dep_expand, flatten, key_expand, \
23 doebuild_environment, doebuild, env_update, prepare_build_dirs, \
24 abssymlink, movefile, _movefile, bsd_chflags, cpv_getkey
26 from portage.elog import elog_process
27 from portage.elog.messages import ewarn
28 from portage.elog.filtering import filter_mergephases, filter_unmergephases
30 import os, re, sys, stat, errno, commands, copy, time, subprocess
31 from itertools import izip
33 try:
34 import cPickle
35 except ImportError:
36 import pickle as cPickle
38 class PreservedLibsRegistry(object):
39 """ This class handles the tracking of preserved library objects """
40 def __init__(self, filename, autocommit=True):
41 """ @param filename: absolute path for saving the preserved libs records
42 @type filename: String
43 @param autocommit: determines if the file is written after every update
44 @type autocommit: Boolean
45 """
46 self._filename = filename
47 self._autocommit = autocommit
48 self.load()
50 def load(self):
51 """ Reload the registry data from file """
52 try:
53 self._data = cPickle.load(open(self._filename, "r"))
54 except IOError, e:
55 if e.errno == errno.ENOENT:
56 self._data = {}
57 elif e.errno == PermissionDenied.errno:
58 raise PermissionDenied(self._filename)
59 else:
60 raise e
62 def store(self):
63 """ Store the registry data to file. No need to call this if autocommit
64 was enabled.
65 """
66 f = atomic_ofstream(self._filename)
67 cPickle.dump(self._data, f)
68 f.close()
70 def register(self, cpv, slot, counter, paths):
71 """ Register new objects in the registry. If there is a record with the
72 same packagename (internally derived from cpv) and slot it is
73 overwritten with the new data.
74 @param cpv: package instance that owns the objects
75 @type cpv: CPV (as String)
76 @param slot: the value of SLOT of the given package instance
77 @type slot: String
78 @param counter: vdb counter value for the package instace
79 @type counter: Integer
80 @param paths: absolute paths of objects that got preserved during an update
81 @type paths: List
82 """
83 cp = "/".join(catpkgsplit(cpv)[:2])
84 cps = cp+":"+slot
85 if len(paths) == 0 and self._data.has_key(cps) \
86 and self._data[cps][0] == cpv and int(self._data[cps][1]) == int(counter):
87 del self._data[cps]
88 elif len(paths) > 0:
89 self._data[cps] = (cpv, counter, paths)
90 if self._autocommit:
91 self.store()
93 def unregister(self, cpv, slot, counter):
94 """ Remove a previous registration of preserved objects for the given package.
95 @param cpv: package instance whose records should be removed
96 @type cpv: CPV (as String)
97 @param slot: the value of SLOT of the given package instance
98 @type slot: String
99 """
100 self.register(cpv, slot, counter, [])
102 def pruneNonExisting(self):
103 """ Remove all records for objects that no longer exist on the filesystem. """
104 for cps in self._data.keys():
105 cpv, counter, paths = self._data[cps]
106 paths = [f for f in paths if os.path.exists(f)]
107 if len(paths) > 0:
108 self._data[cps] = (cpv, counter, paths)
109 else:
110 del self._data[cps]
111 if self._autocommit:
112 self.store()
114 def hasEntries(self):
115 """ Check if this registry contains any records. """
116 return len(self._data) > 0
118 def getPreservedLibs(self):
119 """ Return a mapping of packages->preserved objects.
120 @returns mapping of package instances to preserved objects
121 @rtype Dict cpv->list-of-paths
123 rValue = {}
124 for cps in self._data:
125 rValue[self._data[cps][0]] = self._data[cps][2]
126 return rValue
128 class LinkageMap(object):
129 def __init__(self, vardbapi):
130 self._dbapi = vardbapi
131 self._libs = {}
132 self._obj_properties = {}
133 self._defpath = getlibpaths()
135 def rebuild(self, include_file=None):
136 libs = {}
137 obj_properties = {}
138 lines = []
139 for cpv in self._dbapi.cpv_all():
140 lines += self._dbapi.aux_get(cpv, ["NEEDED.ELF.2"])[0].split('\n')
141 # Cache NEEDED.* files avoid doing excessive IO for every rebuild.
142 self._dbapi.flush_cache()
144 if include_file:
145 lines += grabfile(include_file)
147 # have to call scanelf for preserved libs here as they aren't
148 # registered in NEEDED.ELF.2 files
149 if self._dbapi.plib_registry and self._dbapi.plib_registry.getPreservedLibs():
150 args = ["/usr/bin/scanelf", "-qF", "%a;%F;%S;%r;%n"]
151 for items in self._dbapi.plib_registry.getPreservedLibs().values():
152 args += [x.lstrip(".") for x in items]
153 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
154 output = [l[3:] for l in proc.communicate()[0].split("\n")]
155 lines += output
157 for l in lines:
158 if l.strip() == "":
159 continue
160 fields = l.strip("\n").split(";")
161 if len(fields) < 5:
162 print "Error", fields
163 # insufficient field length
164 continue
165 arch = fields[0]
166 obj = os.path.realpath(fields[1])
167 soname = fields[2]
168 # When fields[3]=="", this prevents the empty string from being
169 # inserted in paths.
170 if fields[3]:
171 path = fields[3].replace("${ORIGIN}", os.path.dirname(obj)).replace("$ORIGIN", os.path.dirname(obj)).split(":")
172 else:
173 path = []
174 # When fields[4]=="", this prevents the empty string from being
175 # inserted as a key into libs.
176 if fields[4]:
177 needed = fields[4].split(",")
178 else:
179 needed = []
180 if soname:
181 libs.setdefault(soname, {arch: {"providers": [], "consumers": []}})
182 libs[soname].setdefault(arch, {"providers": [], "consumers": []})
183 libs[soname][arch]["providers"].append(obj)
184 for x in needed:
185 libs.setdefault(x, {arch: {"providers": [], "consumers": []}})
186 libs[x].setdefault(arch, {"providers": [], "consumers": []})
187 libs[x][arch]["consumers"].append(obj)
188 obj_properties[obj] = (arch, needed, path, soname)
190 self._libs = libs
191 self._obj_properties = obj_properties
193 def isMasterLink(self, obj):
194 basename = os.path.basename(obj)
195 if obj not in self._obj_properties:
196 obj = os.path.realpath(obj)
197 if obj not in self._obj_properties:
198 raise KeyError("%s not in object list" % obj)
199 soname = self._obj_properties[obj][3]
200 return (len(basename) < len(soname))
202 def listBrokenBinaries(self):
204 Find binaries and their needed sonames, which have no providers.
206 @rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
207 @return: The return value is an object -> set-of-sonames mapping, where
208 object is a broken binary and the set consists of sonames needed by
209 object that have no corresponding libraries to fulfill the dependency.
212 class SonameCache(object):
215 Caches sonames and realpaths associated with paths.
217 The purpose of this class is to prevent multiple calls of
218 os.path.realpath and os.path.isfile on the same paths.
222 def __init__(cache_self):
223 cache_self.cache = {}
225 def get(cache_self, path):
227 Caches and returns the soname and realpath for a path.
229 @param path: absolute path (can be symlink)
230 @type path: string (example: '/usr/lib/libfoo.so')
231 @rtype: 3-tuple with types (string or None, string, boolean)
232 @return: 3-tuple with the following components:
233 1. soname as a string or None if it does not exist,
234 2. realpath as a string,
235 3. the result of os.path.isfile(realpath)
236 (example: ('libfoo.so.1', '/usr/lib/libfoo.so.1.5.1', True))
239 if path in cache_self.cache:
240 return cache_self.cache[path]
241 else:
242 realpath = os.path.realpath(path)
243 # Check that the library exists on the filesystem.
244 if os.path.isfile(realpath):
245 # Get the soname from LinkageMap._obj_properties if it
246 # exists. Otherwise, None.
247 soname = self._obj_properties.get(realpath, (None,)*3)[3]
248 # Both path and realpath are cached and the result is returned.
249 cache_self.cache.setdefault(realpath, (soname, realpath, True))
250 return cache_self.cache.setdefault(path, (soname, realpath, True))
251 else:
252 # realpath is not cached here, because the large majority of
253 # cases where realpath is not a file, path is the same as
254 # realpath. Thus storing twice slows down the cache
255 # performance.
256 return cache_self.cache.setdefault(path, (None, realpath, False))
258 debug = False
259 rValue = {}
260 cache = SonameCache()
261 providers = self.listProviders()
262 # providers = self.listProvidersForReachableBinaries(self.getBinaries())
264 # Iterate over all binaries and their providers.
265 for obj, sonames in providers.items():
266 # Iterate over each needed soname and the set of library paths that
267 # fulfill the soname to determine if the dependency is broken.
268 for soname, libraries in sonames.items():
269 # validLibraries is used to store libraries, which satisfy soname,
270 # so if no valid libraries are found, the soname is not satisfied
271 # for obj. Thus obj must be emerged.
272 validLibraries = set()
273 # It could be the case that the library to satisfy the soname is
274 # not in the obj's runpath, but a symlink to the library is (eg
275 # libnvidia-tls.so.1 in nvidia-drivers). Also, since LinkageMap
276 # does not catalog symlinks, broken or missing symlinks may go
277 # unnoticed. As a result of these cases, check that a file with
278 # the same name as the soname exists in the binary's runpath.
279 path = self._obj_properties[obj][2] + self._defpath
280 for dir in path:
281 cachedSoname, cachedRealpath, cachedExists = \
282 cache.get(os.path.join(dir, soname))
283 # Check that the this library provides the needed soname. Doing
284 # this, however, will cause consumers of libraries missing
285 # sonames to be unnecessarily emerged. (eg libmix.so)
286 if cachedSoname == soname:
287 validLibraries.add(cachedRealpath)
288 if debug and cachedRealpath not in libraries:
289 print "Unregistered symlink:", \
290 os.path.join(dir, soname), cachedRealpath
291 # A valid library has been found, so there is no need to
292 # continue.
293 break
294 if debug and cachedRealpath in self._obj_properties:
295 print "Broken symlink or missing/bad soname:", \
296 os.path.join(dir, soname), '->', cachedRealpath, \
297 cachedSoname
298 # This conditional checks if there are no libraries to satisfy the
299 # soname (empty set).
300 if not validLibraries:
301 rValue.setdefault(obj, set()).add(soname)
302 # If no valid libraries have been found by this point, then
303 # there are no files named with the soname within obj's runpath,
304 # but if there are libraries (from the providers mapping), it is
305 # likely that symlinks or the actual libraries are missing.
306 # Thus possible symlinks and missing libraries are added to the
307 # rValue to emerge corrupt library packages.
308 for lib in libraries:
309 cachedSoname, cachedRealpath, cachedExists = cache.get(lib)
310 if not cachedExists:
311 # The library's package needs to be emerged to repair the
312 # missing library.
313 rValue.setdefault(lib, set()).add(soname)
314 else:
315 rValue.setdefault(os.path.join(os.path.dirname(lib), \
316 soname), set()).add(soname)
317 if debug:
318 if not cachedExists:
319 print "Missing lib:", lib
320 else:
321 print "Possibly missing symlink:", \
322 os.path.join(os.path.dirname(lib), soname)
324 return rValue
326 def listProviders(self):
328 Find the providers for all binaries.
330 @rtype: dict (example:
331 {'/usr/bin/foo': {'libbar.so': set(['/lib/libbar.so.1.5'])}})
332 @return: The return value is an object -> providers mapping, where
333 providers is a mapping of soname -> set-of-library-paths returned
334 from the findProviders method.
337 rValue = {}
338 if not self._libs:
339 self.rebuild()
340 # Iterate over all binaries within LinkageMap.
341 for obj in self._obj_properties.keys():
342 rValue.setdefault(obj, self.findProviders(obj))
343 return rValue
345 def getBinaries(self):
347 Get binaries from PATH variables and shared library directories.
349 @rtype: set of strings
350 @return: the set of binaries found in PATH variables and shared library
351 directories
354 if not self._libs:
355 self.rebuild()
356 searchDirectories = set()
357 rValue = set()
359 # Gather set of directories from PATH variables and shared library
360 # directories.
361 pathvar = self._dbapi.settings['PATH'].split(':')
362 rootpathvar = self._dbapi.settings['ROOTPATH'].split(':')
363 if pathvar != ['']:
364 searchDirectories.update(set(pathvar))
365 if rootpathvar != ['']:
366 searchDirectories.update(set(rootpathvar))
367 searchDirectories.update(set(['/bin', '/sbin', '/usr/bin', '/usr/sbin']))
368 for file in os.listdir('/usr'):
369 if file.startswith('lib'):
370 searchDirectories.add(os.path.join('/usr', file))
371 searchDirectories.update(set(self._defpath))
372 print searchDirectories
374 for obj in self._obj_properties.keys():
375 if os.path.dirname(obj) in searchDirectories:
376 rValue.add(obj)
378 return rValue
380 def listProvidersForReachableBinaries(self, binarySet, rValue={}):
382 Recursively find reachable binaries and their providers.
384 @param binarySet: set of paths to binaries, which must be in
385 _obj_properties
386 @type binarySet: set of strings
387 @param rValue: same as return
388 @type rValue: same as rtype
389 @rtype: dict (example:
390 {'/usr/bin/foo': {'libbar.so': set(['/lib/libbar.so.1.5'])}})
391 @return: The return value is an object -> providers mapping, where
392 providers is a mapping of soname -> set-of-library-paths returned
393 from the findProviders method.
396 for binary in binarySet:
397 if binary in rValue:
398 continue
399 rValue.setdefault(binary, self.findProviders(binary))
400 libraries = set()
401 for libs in rValue[binary].values():
402 libraries.update(set(libs))
403 rValue.update(self.listProvidersForReachableBinaries(libraries, rValue=rValue))
405 return rValue
407 def findDeepProviders(self, obj, rValue={}):
408 """Recursively finds all direct and indirect providers.
410 @param obj: pathname of obj in _obj_properties
411 @type obj: string
412 @param rValue: same as return
413 @type rValue: same as rtype
414 @rtype: dict (example:
415 {'libbar.so': set(['/lib/libbar.so.1.5'])})
416 @return: The return value is an soname -> set-of-library-paths mapping.
419 if not self._libs:
420 self.rebuild()
421 if obj in rValue or os.path.realpath(obj) in rValue:
422 return rValue
423 if obj not in self._obj_properties:
424 obj = os.path.realpath(obj)
425 if obj not in self._obj_properties:
426 raise KeyError("%s not in object list" % obj)
427 arch, needed, path, soname = self._obj_properties[obj]
428 path = [os.path.realpath(x) for x in path + self._defpath]
429 for x in needed:
430 if x not in rValue:
431 rValue.setdefault(x, set())
432 if x not in self._libs or arch not in self._libs[x]:
433 continue
434 for y in self._libs[x][arch]["providers"]:
435 if x[0] == os.sep and os.path.realpath(x) == os.path.realpath(y):
436 rValue[x].add(y)
437 self.findDeepProviders(obj=y, rValue=rValue)
438 elif os.path.realpath(os.path.dirname(y)) in path:
439 rValue[x].add(y)
440 self.findDeepProviders(obj=y, rValue=rValue)
441 return rValue
443 def listLibraryObjects(self):
444 rValue = []
445 if not self._libs:
446 self.rebuild()
447 for soname in self._libs:
448 for arch in self._libs[soname]:
449 rValue.extend(self._libs[soname][arch]["providers"])
450 return rValue
452 def findProviders(self, obj):
453 if not self._libs:
454 self.rebuild()
455 rValue = {}
456 if obj not in self._obj_properties:
457 obj = os.path.realpath(obj)
458 if obj not in self._obj_properties:
459 raise KeyError("%s not in object list" % obj)
460 arch, needed, path, soname = self._obj_properties[obj]
461 path = [os.path.realpath(x) for x in path + self._defpath]
462 for x in needed:
463 rValue[x] = set()
464 if x not in self._libs or arch not in self._libs[x]:
465 continue
466 for y in self._libs[x][arch]["providers"]:
467 if x[0] == os.sep and os.path.realpath(x) == os.path.realpath(y):
468 rValue[x].add(y)
469 elif os.path.realpath(os.path.dirname(y)) in path:
470 rValue[x].add(y)
471 return rValue
473 def findConsumers(self, obj):
474 if not self._libs:
475 self.rebuild()
476 if obj not in self._obj_properties:
477 obj = os.path.realpath(obj)
478 if obj not in self._obj_properties:
479 raise KeyError("%s not in object list" % obj)
480 rValue = set()
481 for soname in self._libs:
482 for arch in self._libs[soname]:
483 if obj in self._libs[soname][arch]["providers"]:
484 for x in self._libs[soname][arch]["consumers"]:
485 path = self._obj_properties[x][2]
486 path = [os.path.realpath(y) for y in path+self._defpath]
487 if soname[0] == os.sep and os.path.realpath(soname) == os.path.realpath(obj):
488 rValue.add(x)
489 elif os.path.realpath(os.path.dirname(obj)) in path:
490 rValue.add(x)
491 return rValue
493 class vardbapi(dbapi):
495 _excluded_dirs = ["CVS", "lost+found"]
496 _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
497 _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
498 "|".join(_excluded_dirs) + r')$')
500 _aux_cache_version = "1"
501 _owners_cache_version = "1"
503 # Number of uncached packages to trigger cache update, since
504 # it's wasteful to update it for every vdb change.
505 _aux_cache_threshold = 5
507 _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
508 _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
510 def __init__(self, root, categories=None, settings=None, vartree=None):
512 The categories parameter is unused since the dbapi class
513 now has a categories property that is generated from the
514 available packages.
516 self.root = root[:]
518 #cache for category directory mtimes
519 self.mtdircache = {}
521 #cache for dependency checks
522 self.matchcache = {}
524 #cache for cp_list results
525 self.cpcache = {}
527 self.blockers = None
528 if settings is None:
529 from portage import settings
530 self.settings = settings
531 if vartree is None:
532 from portage import db
533 vartree = db[root]["vartree"]
534 self.vartree = vartree
535 self._aux_cache_keys = set(
536 ["CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
537 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
538 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
539 "repository", "RESTRICT" , "SLOT", "USE"])
540 self._aux_cache_obj = None
541 self._aux_cache_filename = os.path.join(self.root,
542 CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
543 self._counter_path = os.path.join(root,
544 CACHE_PATH.lstrip(os.path.sep), "counter")
546 try:
547 self.plib_registry = PreservedLibsRegistry(
548 os.path.join(self.root, PRIVATE_PATH, "preserved_libs_registry"))
549 except PermissionDenied:
550 # apparently this user isn't allowed to access PRIVATE_PATH
551 self.plib_registry = None
553 self.linkmap = LinkageMap(self)
554 self._owners = self._owners_db(self)
556 def getpath(self, mykey, filename=None):
557 rValue = os.path.join(self.root, VDB_PATH, mykey)
558 if filename != None:
559 rValue = os.path.join(rValue, filename)
560 return rValue
562 def cpv_exists(self, mykey):
563 "Tells us whether an actual ebuild exists on disk (no masking)"
564 return os.path.exists(self.getpath(mykey))
566 def cpv_counter(self, mycpv):
567 "This method will grab the COUNTER. Returns a counter value."
568 try:
569 return long(self.aux_get(mycpv, ["COUNTER"])[0])
570 except (KeyError, ValueError):
571 pass
572 cdir = self.getpath(mycpv)
573 cpath = self.getpath(mycpv, filename="COUNTER")
575 # We write our new counter value to a new file that gets moved into
576 # place to avoid filesystem corruption on XFS (unexpected reboot.)
577 corrupted = 0
578 if os.path.exists(cpath):
579 cfile = open(cpath, "r")
580 try:
581 counter = long(cfile.readline())
582 except ValueError:
583 print "portage: COUNTER for", mycpv, "was corrupted; resetting to value of 0"
584 counter = long(0)
585 corrupted = 1
586 cfile.close()
587 elif os.path.exists(cdir):
588 mys = pkgsplit(mycpv)
589 myl = self.match(mys[0], use_cache=0)
590 print mys, myl
591 if len(myl) == 1:
592 try:
593 # Only one package... Counter doesn't matter.
594 write_atomic(cpath, "1")
595 counter = 1
596 except SystemExit, e:
597 raise
598 except Exception, e:
599 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
600 noiselevel=-1)
601 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
602 noiselevel=-1)
603 writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
604 writemsg("!!! %s\n" % e, noiselevel=-1)
605 sys.exit(1)
606 else:
607 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
608 noiselevel=-1)
609 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
610 noiselevel=-1)
611 writemsg("!!! remerge the package.\n", noiselevel=-1)
612 sys.exit(1)
613 else:
614 counter = long(0)
615 if corrupted:
616 # update new global counter file
617 write_atomic(cpath, str(counter))
618 return counter
620 def cpv_inject(self, mycpv):
621 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
622 os.makedirs(self.getpath(mycpv))
623 counter = self.counter_tick(self.root, mycpv=mycpv)
624 # write local package counter so that emerge clean does the right thing
625 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
627 def isInjected(self, mycpv):
628 if self.cpv_exists(mycpv):
629 if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
630 return True
631 if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
632 return True
633 return False
635 def move_ent(self, mylist):
636 origcp = mylist[1]
637 newcp = mylist[2]
639 # sanity check
640 for cp in [origcp, newcp]:
641 if not (isvalidatom(cp) and isjustname(cp)):
642 raise InvalidPackageName(cp)
643 origmatches = self.match(origcp, use_cache=0)
644 moves = 0
645 if not origmatches:
646 return moves
647 for mycpv in origmatches:
648 mycpsplit = catpkgsplit(mycpv)
649 mynewcpv = newcp + "-" + mycpsplit[2]
650 mynewcat = newcp.split("/")[0]
651 if mycpsplit[3] != "r0":
652 mynewcpv += "-" + mycpsplit[3]
653 mycpsplit_new = catpkgsplit(mynewcpv)
654 origpath = self.getpath(mycpv)
655 if not os.path.exists(origpath):
656 continue
657 moves += 1
658 if not os.path.exists(self.getpath(mynewcat)):
659 #create the directory
660 os.makedirs(self.getpath(mynewcat))
661 newpath = self.getpath(mynewcpv)
662 if os.path.exists(newpath):
663 #dest already exists; keep this puppy where it is.
664 continue
665 _movefile(origpath, newpath, mysettings=self.settings)
667 # We need to rename the ebuild now.
668 old_pf = catsplit(mycpv)[1]
669 new_pf = catsplit(mynewcpv)[1]
670 if new_pf != old_pf:
671 try:
672 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
673 os.path.join(newpath, new_pf + ".ebuild"))
674 except EnvironmentError, e:
675 if e.errno != errno.ENOENT:
676 raise
677 del e
678 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
679 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
680 fixdbentries([mylist], newpath)
681 return moves
683 def cp_list(self, mycp, use_cache=1):
684 mysplit=catsplit(mycp)
685 if mysplit[0] == '*':
686 mysplit[0] = mysplit[0][1:]
687 try:
688 mystat = os.stat(self.getpath(mysplit[0]))[stat.ST_MTIME]
689 except OSError:
690 mystat = 0
691 if use_cache and self.cpcache.has_key(mycp):
692 cpc = self.cpcache[mycp]
693 if cpc[0] == mystat:
694 return cpc[1][:]
695 cat_dir = self.getpath(mysplit[0])
696 try:
697 dir_list = os.listdir(cat_dir)
698 except EnvironmentError, e:
699 from portage.exception import PermissionDenied
700 if e.errno == PermissionDenied.errno:
701 raise PermissionDenied(cat_dir)
702 del e
703 dir_list = []
705 returnme = []
706 for x in dir_list:
707 if self._excluded_dirs.match(x) is not None:
708 continue
709 ps = pkgsplit(x)
710 if not ps:
711 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
712 continue
713 if len(mysplit) > 1:
714 if ps[0] == mysplit[1]:
715 returnme.append(mysplit[0]+"/"+x)
716 self._cpv_sort_ascending(returnme)
717 if use_cache:
718 self.cpcache[mycp] = [mystat, returnme[:]]
719 elif self.cpcache.has_key(mycp):
720 del self.cpcache[mycp]
721 return returnme
723 def cpv_all(self, use_cache=1):
725 Set use_cache=0 to bypass the portage.cachedir() cache in cases
726 when the accuracy of mtime staleness checks should not be trusted
727 (generally this is only necessary in critical sections that
728 involve merge or unmerge of packages).
730 returnme = []
731 basepath = os.path.join(self.root, VDB_PATH) + os.path.sep
733 if use_cache:
734 from portage import listdir
735 else:
736 def listdir(p, **kwargs):
737 try:
738 return [x for x in os.listdir(p) \
739 if os.path.isdir(os.path.join(p, x))]
740 except EnvironmentError, e:
741 if e.errno == PermissionDenied.errno:
742 raise PermissionDenied(p)
743 del e
744 return []
746 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
747 if self._excluded_dirs.match(x) is not None:
748 continue
749 if not self._category_re.match(x):
750 continue
751 for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
752 if self._excluded_dirs.match(y) is not None:
753 continue
754 subpath = x + "/" + y
755 # -MERGING- should never be a cpv, nor should files.
756 try:
757 if catpkgsplit(subpath) is None:
758 self.invalidentry(os.path.join(self.root, subpath))
759 continue
760 except portage.exception.InvalidData:
761 self.invalidentry(os.path.join(self.root, subpath))
762 continue
763 returnme.append(subpath)
764 return returnme
766 def cp_all(self, use_cache=1):
767 mylist = self.cpv_all(use_cache=use_cache)
768 d={}
769 for y in mylist:
770 if y[0] == '*':
771 y = y[1:]
772 try:
773 mysplit = catpkgsplit(y)
774 except portage.exception.InvalidData:
775 self.invalidentry(self.getpath(y))
776 continue
777 if not mysplit:
778 self.invalidentry(self.getpath(y))
779 continue
780 d[mysplit[0]+"/"+mysplit[1]] = None
781 return d.keys()
783 def checkblockers(self, origdep):
784 pass
786 def _add(self, pkg_dblink):
787 self._clear_cache(pkg_dblink)
789 def _remove(self, pkg_dblink):
790 self._clear_cache(pkg_dblink)
792 def _clear_cache(self, pkg_dblink):
793 # Due to 1 second mtime granularity in <python-2.5, mtime checks
794 # are not always sufficient to invalidate vardbapi caches. Therefore,
795 # the caches need to be actively invalidated here.
796 self.mtdircache.pop(pkg_dblink.cat, None)
797 self.matchcache.pop(pkg_dblink.cat, None)
798 self.cpcache.pop(pkg_dblink.mysplit[0], None)
799 from portage import dircache
800 dircache.pop(pkg_dblink.dbcatdir, None)
802 def match(self, origdep, use_cache=1):
803 "caching match function"
804 mydep = dep_expand(
805 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
806 mykey = dep_getkey(mydep)
807 mycat = catsplit(mykey)[0]
808 if not use_cache:
809 if self.matchcache.has_key(mycat):
810 del self.mtdircache[mycat]
811 del self.matchcache[mycat]
812 return list(self._iter_match(mydep,
813 self.cp_list(mydep.cp, use_cache=use_cache)))
814 try:
815 curmtime = os.stat(self.root+VDB_PATH+"/"+mycat).st_mtime
816 except (IOError, OSError):
817 curmtime=0
819 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
820 # clear cache entry
821 self.mtdircache[mycat] = curmtime
822 self.matchcache[mycat] = {}
823 if not self.matchcache[mycat].has_key(mydep):
824 mymatch = list(self._iter_match(mydep,
825 self.cp_list(mydep.cp, use_cache=use_cache)))
826 self.matchcache[mycat][mydep] = mymatch
827 return self.matchcache[mycat][mydep][:]
829 def findname(self, mycpv):
830 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
832 def flush_cache(self):
833 """If the current user has permission and the internal aux_get cache has
834 been updated, save it to disk and mark it unmodified. This is called
835 by emerge after it has loaded the full vdb for use in dependency
836 calculations. Currently, the cache is only written if the user has
837 superuser privileges (since that's required to obtain a lock), but all
838 users have read access and benefit from faster metadata lookups (as
839 long as at least part of the cache is still valid)."""
840 if self._aux_cache is not None and \
841 len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
842 secpass >= 2:
843 self._owners.populate() # index any unindexed contents
844 valid_nodes = set(self.cpv_all())
845 for cpv in self._aux_cache["packages"].keys():
846 if cpv not in valid_nodes:
847 del self._aux_cache["packages"][cpv]
848 del self._aux_cache["modified"]
849 try:
850 f = atomic_ofstream(self._aux_cache_filename)
851 cPickle.dump(self._aux_cache, f, -1)
852 f.close()
853 apply_secpass_permissions(
854 self._aux_cache_filename, gid=portage_gid, mode=0644)
855 except (IOError, OSError), e:
856 pass
857 self._aux_cache["modified"] = set()
859 @property
860 def _aux_cache(self):
861 if self._aux_cache_obj is None:
862 self._aux_cache_init()
863 return self._aux_cache_obj
865 def _aux_cache_init(self):
866 aux_cache = None
867 try:
868 f = open(self._aux_cache_filename)
869 mypickle = cPickle.Unpickler(f)
870 mypickle.find_global = None
871 aux_cache = mypickle.load()
872 f.close()
873 del f
874 except (IOError, OSError, EOFError, cPickle.UnpicklingError), e:
875 if isinstance(e, cPickle.UnpicklingError):
876 writemsg("!!! Error loading '%s': %s\n" % \
877 (self._aux_cache_filename, str(e)), noiselevel=-1)
878 del e
880 if not aux_cache or \
881 not isinstance(aux_cache, dict) or \
882 aux_cache.get("version") != self._aux_cache_version or \
883 not aux_cache.get("packages"):
884 aux_cache = {"version": self._aux_cache_version}
885 aux_cache["packages"] = {}
887 owners = aux_cache.get("owners")
888 if owners is not None:
889 if not isinstance(owners, dict):
890 owners = None
891 elif "version" not in owners:
892 owners = None
893 elif owners["version"] != self._owners_cache_version:
894 owners = None
895 elif "base_names" not in owners:
896 owners = None
897 elif not isinstance(owners["base_names"], dict):
898 owners = None
900 if owners is None:
901 owners = {
902 "base_names" : {},
903 "version" : self._owners_cache_version
905 aux_cache["owners"] = owners
907 aux_cache["modified"] = set()
908 self._aux_cache_obj = aux_cache
910 def aux_get(self, mycpv, wants):
911 """This automatically caches selected keys that are frequently needed
912 by emerge for dependency calculations. The cached metadata is
913 considered valid if the mtime of the package directory has not changed
914 since the data was cached. The cache is stored in a pickled dict
915 object with the following format:
917 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
919 If an error occurs while loading the cache pickle or the version is
920 unrecognized, the cache will simple be recreated from scratch (it is
921 completely disposable).
923 cache_these_wants = self._aux_cache_keys.intersection(wants)
924 for x in wants:
925 if self._aux_cache_keys_re.match(x) is not None:
926 cache_these_wants.add(x)
928 if not cache_these_wants:
929 return self._aux_get(mycpv, wants)
931 cache_these = set(self._aux_cache_keys)
932 cache_these.update(cache_these_wants)
934 mydir = self.getpath(mycpv)
935 mydir_stat = None
936 try:
937 mydir_stat = os.stat(mydir)
938 except OSError, e:
939 if e.errno != errno.ENOENT:
940 raise
941 raise KeyError(mycpv)
942 mydir_mtime = long(mydir_stat.st_mtime)
943 pkg_data = self._aux_cache["packages"].get(mycpv)
944 pull_me = cache_these.union(wants)
945 mydata = {"_mtime_" : mydir_mtime}
946 cache_valid = False
947 cache_incomplete = False
948 cache_mtime = None
949 metadata = None
950 if pkg_data is not None:
951 if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
952 pkg_data = None
953 else:
954 cache_mtime, metadata = pkg_data
955 if not isinstance(cache_mtime, (long, int)) or \
956 not isinstance(metadata, dict):
957 pkg_data = None
959 if pkg_data:
960 cache_mtime, metadata = pkg_data
961 cache_valid = cache_mtime == mydir_mtime
962 if cache_valid:
963 mydata.update(metadata)
964 pull_me.difference_update(metadata)
966 if pull_me:
967 # pull any needed data and cache it
968 aux_keys = list(pull_me)
969 for k, v in izip(aux_keys,
970 self._aux_get(mycpv, aux_keys, st=mydir_stat)):
971 mydata[k] = v
972 if not cache_valid or cache_these.difference(metadata):
973 cache_data = {}
974 if cache_valid and metadata:
975 cache_data.update(metadata)
976 for aux_key in cache_these:
977 cache_data[aux_key] = mydata[aux_key]
978 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
979 self._aux_cache["modified"].add(mycpv)
980 return [mydata[x] for x in wants]
982 def _aux_get(self, mycpv, wants, st=None):
983 mydir = self.getpath(mycpv)
984 if st is None:
985 try:
986 st = os.stat(mydir)
987 except OSError, e:
988 if e.errno == errno.ENOENT:
989 raise KeyError(mycpv)
990 elif e.errno == PermissionDenied.errno:
991 raise PermissionDenied(mydir)
992 else:
993 raise
994 if not stat.S_ISDIR(st.st_mode):
995 raise KeyError(mycpv)
996 results = []
997 for x in wants:
998 if x == "_mtime_":
999 results.append(st.st_mtime)
1000 continue
1001 try:
1002 myf = open(os.path.join(mydir, x), "r")
1003 try:
1004 myd = myf.read()
1005 finally:
1006 myf.close()
1007 # Preserve \n for metadata that is known to
1008 # contain multiple lines.
1009 if self._aux_multi_line_re.match(x) is None:
1010 myd = " ".join(myd.split())
1011 except IOError:
1012 myd = ""
1013 if x == "EAPI" and not myd:
1014 results.append("0")
1015 else:
1016 results.append(myd)
1017 return results
1019 def aux_update(self, cpv, values):
1020 cat, pkg = catsplit(cpv)
1021 mylink = dblink(cat, pkg, self.root, self.settings,
1022 treetype="vartree", vartree=self.vartree)
1023 if not mylink.exists():
1024 raise KeyError(cpv)
1025 for k, v in values.iteritems():
1026 if v:
1027 mylink.setfile(k, v)
1028 else:
1029 try:
1030 os.unlink(os.path.join(self.getpath(cpv), k))
1031 except EnvironmentError:
1032 pass
1034 def counter_tick(self, myroot, mycpv=None):
1035 return self.counter_tick_core(myroot, incrementing=1, mycpv=mycpv)
1037 def get_counter_tick_core(self, myroot, mycpv=None):
1039 Use this method to retrieve the counter instead
1040 of having to trust the value of a global counter
1041 file that can lead to invalid COUNTER
1042 generation. When cache is valid, the package COUNTER
1043 files are not read and we rely on the timestamp of
1044 the package directory to validate cache. The stat
1045 calls should only take a short time, so performance
1046 is sufficient without having to rely on a potentially
1047 corrupt global counter file.
1049 The global counter file located at
1050 $CACHE_PATH/counter serves to record the
1051 counter of the last installed package and
1052 it also corresponds to the total number of
1053 installation actions that have occurred in
1054 the history of this package database.
1056 cp_list = self.cp_list
1057 max_counter = 0
1058 for cp in self.cp_all():
1059 for cpv in cp_list(cp):
1060 try:
1061 counter = int(self.aux_get(cpv, ["COUNTER"])[0])
1062 except (KeyError, OverflowError, ValueError):
1063 continue
1064 if counter > max_counter:
1065 max_counter = counter
1067 counter = -1
1068 try:
1069 cfile = open(self._counter_path, "r")
1070 except EnvironmentError, e:
1071 writemsg("!!! Unable to read COUNTER file: '%s'\n" % \
1072 self._counter_path, noiselevel=-1)
1073 writemsg("!!! %s\n" % str(e), noiselevel=-1)
1074 del e
1075 else:
1076 try:
1077 try:
1078 counter = long(cfile.readline().strip())
1079 finally:
1080 cfile.close()
1081 except (OverflowError, ValueError), e:
1082 writemsg("!!! COUNTER file is corrupt: '%s'\n" % \
1083 self._counter_path, noiselevel=-1)
1084 writemsg("!!! %s\n" % str(e), noiselevel=-1)
1085 del e
1087 # We must ensure that we return a counter
1088 # value that is at least as large as the
1089 # highest one from the installed packages,
1090 # since having a corrupt value that is too low
1091 # can trigger incorrect AUTOCLEAN behavior due
1092 # to newly installed packages having lower
1093 # COUNTERs than the previous version in the
1094 # same slot.
1095 if counter > max_counter:
1096 max_counter = counter
1098 if counter < 0:
1099 writemsg("!!! Initializing COUNTER to " + \
1100 "value of %d\n" % max_counter, noiselevel=-1)
1102 return max_counter + 1
1104 def counter_tick_core(self, myroot, incrementing=1, mycpv=None):
1105 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
1106 counter = self.get_counter_tick_core(myroot, mycpv=mycpv) - 1
1107 if incrementing:
1108 #increment counter
1109 counter += 1
1110 # update new global counter file
1111 write_atomic(self._counter_path, str(counter))
1112 return counter
1114 def _dblink(self, cpv):
1115 category, pf = catsplit(cpv)
1116 return dblink(category, pf, self.root,
1117 self.settings, vartree=self.vartree)
1119 class _owners_cache(object):
1121 This class maintains an hash table that serves to index package
1122 contents by mapping the basename of file to a list of possible
1123 packages that own it. This is used to optimize owner lookups
1124 by narrowing the search down to a smaller number of packages.
1126 try:
1127 from hashlib import md5 as _new_hash
1128 except ImportError:
1129 from md5 import new as _new_hash
1131 _hash_bits = 16
1132 _hex_chars = _hash_bits / 4
1134 def __init__(self, vardb):
1135 self._vardb = vardb
1137 def add(self, cpv):
1138 root_len = len(self._vardb.root)
1139 contents = self._vardb._dblink(cpv).getcontents()
1140 pkg_hash = self._hash_pkg(cpv)
1141 if not contents:
1142 # Empty path is a code used to represent empty contents.
1143 self._add_path("", pkg_hash)
1144 for x in contents:
1145 self._add_path(x[root_len:], pkg_hash)
1146 self._vardb._aux_cache["modified"].add(cpv)
1148 def _add_path(self, path, pkg_hash):
1150 Empty path is a code that represents empty contents.
1152 if path:
1153 name = os.path.basename(path.rstrip(os.path.sep))
1154 if not name:
1155 return
1156 else:
1157 name = path
1158 name_hash = self._hash_str(name)
1159 base_names = self._vardb._aux_cache["owners"]["base_names"]
1160 pkgs = base_names.get(name_hash)
1161 if pkgs is None:
1162 pkgs = {}
1163 base_names[name_hash] = pkgs
1164 pkgs[pkg_hash] = None
1166 def _hash_str(self, s):
1167 h = self._new_hash()
1168 h.update(s)
1169 h = h.hexdigest()
1170 h = h[-self._hex_chars:]
1171 h = int(h, 16)
1172 return h
1174 def _hash_pkg(self, cpv):
1175 counter, mtime = self._vardb.aux_get(
1176 cpv, ["COUNTER", "_mtime_"])
1177 try:
1178 counter = int(counter)
1179 except ValueError:
1180 counter = 0
1181 return (cpv, counter, mtime)
1183 class _owners_db(object):
1185 def __init__(self, vardb):
1186 self._vardb = vardb
1188 def populate(self):
1189 self._populate()
1191 def _populate(self):
1192 owners_cache = vardbapi._owners_cache(self._vardb)
1193 cached_hashes = set()
1194 base_names = self._vardb._aux_cache["owners"]["base_names"]
1196 # Take inventory of all cached package hashes.
1197 for name, hash_values in base_names.items():
1198 if not isinstance(hash_values, dict):
1199 del base_names[name]
1200 continue
1201 cached_hashes.update(hash_values)
1203 # Create sets of valid package hashes and uncached packages.
1204 uncached_pkgs = set()
1205 hash_pkg = owners_cache._hash_pkg
1206 valid_pkg_hashes = set()
1207 for cpv in self._vardb.cpv_all():
1208 hash_value = hash_pkg(cpv)
1209 valid_pkg_hashes.add(hash_value)
1210 if hash_value not in cached_hashes:
1211 uncached_pkgs.add(cpv)
1213 # Cache any missing packages.
1214 for cpv in uncached_pkgs:
1215 owners_cache.add(cpv)
1217 # Delete any stale cache.
1218 stale_hashes = cached_hashes.difference(valid_pkg_hashes)
1219 if stale_hashes:
1220 for base_name_hash, bucket in base_names.items():
1221 for hash_value in stale_hashes.intersection(bucket):
1222 del bucket[hash_value]
1223 if not bucket:
1224 del base_names[base_name_hash]
1226 return owners_cache
1228 def get_owners(self, path_iter):
1230 @return the owners as a dblink -> set(files) mapping.
1232 owners = {}
1233 for owner, f in self.iter_owners(path_iter):
1234 owned_files = owners.get(owner)
1235 if owned_files is None:
1236 owned_files = set()
1237 owners[owner] = owned_files
1238 owned_files.add(f)
1239 return owners
1241 def iter_owners(self, path_iter):
1243 Iterate over tuples of (dblink, path). In order to avoid
1244 consuming too many resources for too much time, resources
1245 are only allocated for the duration of a given iter_owners()
1246 call. Therefore, to maximize reuse of resources when searching
1247 for multiple files, it's best to search for them all in a single
1248 call.
1251 owners_cache = self._populate()
1253 vardb = self._vardb
1254 root = vardb.root
1255 hash_pkg = owners_cache._hash_pkg
1256 hash_str = owners_cache._hash_str
1257 base_names = self._vardb._aux_cache["owners"]["base_names"]
1259 dblink_cache = {}
1261 def dblink(cpv):
1262 x = dblink_cache.get(cpv)
1263 if x is None:
1264 x = self._vardb._dblink(cpv)
1265 dblink_cache[cpv] = x
1266 return x
1268 for path in path_iter:
1269 name = os.path.basename(path.rstrip(os.path.sep))
1270 if not name:
1271 continue
1273 name_hash = hash_str(name)
1274 pkgs = base_names.get(name_hash)
1275 if pkgs is not None:
1276 for hash_value in pkgs:
1277 if not isinstance(hash_value, tuple) or \
1278 len(hash_value) != 3:
1279 continue
1280 cpv, counter, mtime = hash_value
1281 if not isinstance(cpv, basestring):
1282 continue
1283 try:
1284 current_hash = hash_pkg(cpv)
1285 except KeyError:
1286 continue
1288 if current_hash != hash_value:
1289 continue
1290 if dblink(cpv).isowner(path, root):
1291 yield dblink(cpv), path
1293 class vartree(object):
1294 "this tree will scan a var/db/pkg database located at root (passed to init)"
1295 def __init__(self, root="/", virtual=None, clone=None, categories=None,
1296 settings=None):
1297 if clone:
1298 writemsg("vartree.__init__(): deprecated " + \
1299 "use of clone parameter\n", noiselevel=-1)
1300 self.root = clone.root[:]
1301 self.dbapi = copy.deepcopy(clone.dbapi)
1302 self.populated = 1
1303 from portage import config
1304 self.settings = config(clone=clone.settings)
1305 else:
1306 self.root = root[:]
1307 if settings is None:
1308 from portage import settings
1309 self.settings = settings # for key_expand calls
1310 if categories is None:
1311 categories = settings.categories
1312 self.dbapi = vardbapi(self.root, categories=categories,
1313 settings=settings, vartree=self)
1314 self.populated = 1
1316 def getpath(self, mykey, filename=None):
1317 return self.dbapi.getpath(mykey, filename=filename)
1319 def zap(self, mycpv):
1320 return
1322 def inject(self, mycpv):
1323 return
1325 def get_provide(self, mycpv):
1326 myprovides = []
1327 mylines = None
1328 try:
1329 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
1330 if mylines:
1331 myuse = myuse.split()
1332 mylines = flatten(use_reduce(paren_reduce(mylines), uselist=myuse))
1333 for myprovide in mylines:
1334 mys = catpkgsplit(myprovide)
1335 if not mys:
1336 mys = myprovide.split("/")
1337 myprovides += [mys[0] + "/" + mys[1]]
1338 return myprovides
1339 except SystemExit, e:
1340 raise
1341 except Exception, e:
1342 mydir = os.path.join(self.root, VDB_PATH, mycpv)
1343 writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
1344 noiselevel=-1)
1345 if mylines:
1346 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
1347 noiselevel=-1)
1348 writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
1349 return []
1351 def get_all_provides(self):
1352 myprovides = {}
1353 for node in self.getallcpv():
1354 for mykey in self.get_provide(node):
1355 if myprovides.has_key(mykey):
1356 myprovides[mykey] += [node]
1357 else:
1358 myprovides[mykey] = [node]
1359 return myprovides
1361 def dep_bestmatch(self, mydep, use_cache=1):
1362 "compatibility method -- all matches, not just visible ones"
1363 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
1364 mymatch = best(self.dbapi.match(
1365 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
1366 use_cache=use_cache))
1367 if mymatch is None:
1368 return ""
1369 else:
1370 return mymatch
1372 def dep_match(self, mydep, use_cache=1):
1373 "compatibility method -- we want to see all matches, not just visible ones"
1374 #mymatch = match(mydep,self.dbapi)
1375 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
1376 if mymatch is None:
1377 return []
1378 else:
1379 return mymatch
1381 def exists_specific(self, cpv):
1382 return self.dbapi.cpv_exists(cpv)
1384 def getallcpv(self):
1385 """temporary function, probably to be renamed --- Gets a list of all
1386 category/package-versions installed on the system."""
1387 return self.dbapi.cpv_all()
1389 def getallnodes(self):
1390 """new behavior: these are all *unmasked* nodes. There may or may not be available
1391 masked package for nodes in this nodes list."""
1392 return self.dbapi.cp_all()
1394 def exists_specific_cat(self, cpv, use_cache=1):
1395 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
1396 settings=self.settings)
1397 a = catpkgsplit(cpv)
1398 if not a:
1399 return 0
1400 mylist = listdir(self.getpath(a[0]), EmptyOnError=1)
1401 for x in mylist:
1402 b = pkgsplit(x)
1403 if not b:
1404 self.dbapi.invalidentry(self.getpath(a[0], filename=x))
1405 continue
1406 if a[1] == b[0]:
1407 return 1
1408 return 0
1410 def getebuildpath(self, fullpackage):
1411 cat, package = catsplit(fullpackage)
1412 return self.getpath(fullpackage, filename=package+".ebuild")
1414 def getnode(self, mykey, use_cache=1):
1415 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
1416 settings=self.settings)
1417 if not mykey:
1418 return []
1419 mysplit = catsplit(mykey)
1420 mydirlist = listdir(self.getpath(mysplit[0]),EmptyOnError=1)
1421 returnme = []
1422 for x in mydirlist:
1423 mypsplit = pkgsplit(x)
1424 if not mypsplit:
1425 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
1426 continue
1427 if mypsplit[0] == mysplit[1]:
1428 appendme = [mysplit[0]+"/"+x, [mysplit[0], mypsplit[0], mypsplit[1], mypsplit[2]]]
1429 returnme.append(appendme)
1430 return returnme
1433 def getslot(self, mycatpkg):
1434 "Get a slot for a catpkg; assume it exists."
1435 try:
1436 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
1437 except KeyError:
1438 return ""
1440 def hasnode(self, mykey, use_cache):
1441 """Does the particular node (cat/pkg key) exist?"""
1442 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
1443 settings=self.settings)
1444 mysplit = catsplit(mykey)
1445 mydirlist = listdir(self.getpath(mysplit[0]), EmptyOnError=1)
1446 for x in mydirlist:
1447 mypsplit = pkgsplit(x)
1448 if not mypsplit:
1449 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
1450 continue
1451 if mypsplit[0] == mysplit[1]:
1452 return 1
1453 return 0
1455 def populate(self):
1456 self.populated=1
1458 class dblink(object):
1460 This class provides an interface to the installed package database
1461 At present this is implemented as a text backend in /var/db/pkg.
1464 import re
1465 _normalize_needed = re.compile(r'.*//.*|^[^/]|.+/$|(^|.*/)\.\.?(/.*|$)')
1466 _contents_split_counts = {
1467 "dev": 2,
1468 "dir": 2,
1469 "fif": 2,
1470 "obj": 4,
1471 "sym": 5
1474 def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
1475 vartree=None, blockers=None):
1477 Creates a DBlink object for a given CPV.
1478 The given CPV may not be present in the database already.
1480 @param cat: Category
1481 @type cat: String
1482 @param pkg: Package (PV)
1483 @type pkg: String
1484 @param myroot: Typically ${ROOT}
1485 @type myroot: String (Path)
1486 @param mysettings: Typically portage.config
1487 @type mysettings: An instance of portage.config
1488 @param treetype: one of ['porttree','bintree','vartree']
1489 @type treetype: String
1490 @param vartree: an instance of vartree corresponding to myroot.
1491 @type vartree: vartree
1494 self.cat = cat
1495 self.pkg = pkg
1496 self.mycpv = self.cat + "/" + self.pkg
1497 self.mysplit = list(catpkgsplit(self.mycpv)[1:])
1498 self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
1499 self.treetype = treetype
1500 if vartree is None:
1501 from portage import db
1502 vartree = db[myroot]["vartree"]
1503 self.vartree = vartree
1504 self._blockers = blockers
1506 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
1507 self.dbcatdir = self.dbroot+"/"+cat
1508 self.dbpkgdir = self.dbcatdir+"/"+pkg
1509 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
1510 self.dbdir = self.dbpkgdir
1512 self._lock_vdb = None
1514 self.settings = mysettings
1515 if self.settings == 1:
1516 raise ValueError
1518 self.myroot=myroot
1519 protect_obj = ConfigProtect(myroot,
1520 mysettings.get("CONFIG_PROTECT","").split(),
1521 mysettings.get("CONFIG_PROTECT_MASK","").split())
1522 self.updateprotect = protect_obj.updateprotect
1523 self.isprotected = protect_obj.isprotected
1524 self._installed_instance = None
1525 self.contentscache = None
1526 self._contents_inodes = None
1527 self._contents_basenames = None
1529 def lockdb(self):
1530 if self._lock_vdb:
1531 raise AssertionError("Lock already held.")
1532 # At least the parent needs to exist for the lock file.
1533 ensure_dirs(self.dbroot)
1534 self._lock_vdb = lockdir(self.dbroot)
1536 def unlockdb(self):
1537 if self._lock_vdb:
1538 unlockdir(self._lock_vdb)
1539 self._lock_vdb = None
1541 def getpath(self):
1542 "return path to location of db information (for >>> informational display)"
1543 return self.dbdir
1545 def exists(self):
1546 "does the db entry exist? boolean."
1547 return os.path.exists(self.dbdir)
1549 def delete(self):
1551 Remove this entry from the database
1553 if not os.path.exists(self.dbdir):
1554 return
1556 # Check validity of self.dbdir before attempting to remove it.
1557 if not self.dbdir.startswith(self.dbroot):
1558 writemsg("portage.dblink.delete(): invalid dbdir: %s\n" % \
1559 self.dbdir, noiselevel=-1)
1560 return
1561 import shutil
1562 shutil.rmtree(self.dbdir)
1563 self.vartree.dbapi._remove(self)
1565 def clearcontents(self):
1567 For a given db entry (self), erase the CONTENTS values.
1569 if os.path.exists(self.dbdir+"/CONTENTS"):
1570 os.unlink(self.dbdir+"/CONTENTS")
1572 def _clear_contents_cache(self):
1573 self.contentscache = None
1574 self._contents_inodes = None
1575 self._contents_basenames = None
1577 def getcontents(self):
1579 Get the installed files of a given package (aka what that package installed)
1581 contents_file = os.path.join(self.dbdir, "CONTENTS")
1582 if self.contentscache is not None:
1583 return self.contentscache
1584 pkgfiles = {}
1585 try:
1586 myc = open(contents_file,"r")
1587 except EnvironmentError, e:
1588 if e.errno != errno.ENOENT:
1589 raise
1590 del e
1591 self.contentscache = pkgfiles
1592 return pkgfiles
1593 mylines = myc.readlines()
1594 myc.close()
1595 null_byte = "\0"
1596 normalize_needed = self._normalize_needed
1597 contents_split_counts = self._contents_split_counts
1598 myroot = self.myroot
1599 if myroot == os.path.sep:
1600 myroot = None
1601 pos = 0
1602 errors = []
1603 for pos, line in enumerate(mylines):
1604 if null_byte in line:
1605 # Null bytes are a common indication of corruption.
1606 errors.append((pos + 1, "Null byte found in CONTENTS entry"))
1607 continue
1608 line = line.rstrip("\n")
1609 # Split on " " so that even file paths that
1610 # end with spaces can be handled.
1611 mydat = line.split(" ")
1612 entry_type = mydat[0] # empty string if line is empty
1613 correct_split_count = contents_split_counts.get(entry_type)
1614 if correct_split_count and len(mydat) > correct_split_count:
1615 # Apparently file paths contain spaces, so reassemble
1616 # the split have the correct_split_count.
1617 newsplit = [entry_type]
1618 spaces_total = len(mydat) - correct_split_count
1619 if entry_type == "sym":
1620 try:
1621 splitter = mydat.index("->", 2, len(mydat) - 2)
1622 except ValueError:
1623 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1624 continue
1625 spaces_in_path = splitter - 2
1626 spaces_in_target = spaces_total - spaces_in_path
1627 newsplit.append(" ".join(mydat[1:splitter]))
1628 newsplit.append("->")
1629 target_end = splitter + spaces_in_target + 2
1630 newsplit.append(" ".join(mydat[splitter + 1:target_end]))
1631 newsplit.extend(mydat[target_end:])
1632 else:
1633 path_end = spaces_total + 2
1634 newsplit.append(" ".join(mydat[1:path_end]))
1635 newsplit.extend(mydat[path_end:])
1636 mydat = newsplit
1638 # we do this so we can remove from non-root filesystems
1639 # (use the ROOT var to allow maintenance on other partitions)
1640 try:
1641 if normalize_needed.match(mydat[1]):
1642 mydat[1] = normalize_path(mydat[1])
1643 if not mydat[1].startswith(os.path.sep):
1644 mydat[1] = os.path.sep + mydat[1]
1645 if myroot:
1646 mydat[1] = os.path.join(myroot, mydat[1].lstrip(os.path.sep))
1647 if mydat[0] == "obj":
1648 #format: type, mtime, md5sum
1649 pkgfiles[mydat[1]] = [mydat[0], mydat[3], mydat[2]]
1650 elif mydat[0] == "dir":
1651 #format: type
1652 pkgfiles[mydat[1]] = [mydat[0]]
1653 elif mydat[0] == "sym":
1654 #format: type, mtime, dest
1655 pkgfiles[mydat[1]] = [mydat[0], mydat[4], mydat[3]]
1656 elif mydat[0] == "dev":
1657 #format: type
1658 pkgfiles[mydat[1]] = [mydat[0]]
1659 elif mydat[0]=="fif":
1660 #format: type
1661 pkgfiles[mydat[1]] = [mydat[0]]
1662 else:
1663 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1664 except (KeyError, IndexError):
1665 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1666 if errors:
1667 writemsg("!!! Parse error in '%s'\n" % contents_file, noiselevel=-1)
1668 for pos, e in errors:
1669 writemsg("!!! line %d: %s\n" % (pos, e), noiselevel=-1)
1670 self.contentscache = pkgfiles
1671 return pkgfiles
1673 def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
1674 ldpath_mtimes=None, others_in_slot=None):
1676 Calls prerm
1677 Unmerges a given package (CPV)
1678 calls postrm
1679 calls cleanrm
1680 calls env_update
1682 @param pkgfiles: files to unmerge (generally self.getcontents() )
1683 @type pkgfiles: Dictionary
1684 @param trimworld: Remove CPV from world file if True, not if False
1685 @type trimworld: Boolean
1686 @param cleanup: cleanup to pass to doebuild (see doebuild)
1687 @type cleanup: Boolean
1688 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1689 @type ldpath_mtimes: Dictionary
1690 @param others_in_slot: all dblink instances in this slot, excluding self
1691 @type others_in_slot: list
1692 @rtype: Integer
1693 @returns:
1694 1. os.EX_OK if everything went well.
1695 2. return code of the failed phase (for prerm, postrm, cleanrm)
1697 Notes:
1698 The caller must ensure that lockdb() and unlockdb() are called
1699 before and after this method.
1701 if self.vartree.dbapi._categories is not None:
1702 self.vartree.dbapi._categories = None
1703 # When others_in_slot is supplied, the security check has already been
1704 # done for this slot, so it shouldn't be repeated until the next
1705 # replacement or unmerge operation.
1706 if others_in_slot is None:
1707 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1708 slot_matches = self.vartree.dbapi.match(
1709 "%s:%s" % (dep_getkey(self.mycpv), slot))
1710 others_in_slot = []
1711 for cur_cpv in slot_matches:
1712 if cur_cpv == self.mycpv:
1713 continue
1714 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1715 self.vartree.root, self.settings, vartree=self.vartree))
1716 retval = self._security_check([self] + others_in_slot)
1717 if retval:
1718 return retval
1720 contents = self.getcontents()
1721 # Now, don't assume that the name of the ebuild is the same as the
1722 # name of the dir; the package may have been moved.
1723 myebuildpath = None
1724 ebuild_phase = "prerm"
1725 mystuff = listdir(self.dbdir, EmptyOnError=1)
1726 for x in mystuff:
1727 if x.endswith(".ebuild"):
1728 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
1729 if x[:-7] != self.pkg:
1730 # Clean up after vardbapi.move_ent() breakage in
1731 # portage versions before 2.1.2
1732 os.rename(os.path.join(self.dbdir, x), myebuildpath)
1733 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
1734 break
1736 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
1737 if myebuildpath:
1738 try:
1739 doebuild_environment(myebuildpath, "prerm", self.myroot,
1740 self.settings, 0, 0, self.vartree.dbapi)
1741 except UnsupportedAPIException, e:
1742 # Sometimes this happens due to corruption of the EAPI file.
1743 writemsg("!!! FAILED prerm: %s\n" % \
1744 os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
1745 writemsg("%s\n" % str(e), noiselevel=-1)
1746 return 1
1747 catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
1748 ensure_dirs(os.path.dirname(catdir),
1749 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
1750 builddir_lock = None
1751 catdir_lock = None
1752 retval = -1
1753 try:
1754 if myebuildpath:
1755 catdir_lock = lockdir(catdir)
1756 ensure_dirs(catdir,
1757 uid=portage_uid, gid=portage_gid,
1758 mode=070, mask=0)
1759 builddir_lock = lockdir(
1760 self.settings["PORTAGE_BUILDDIR"])
1761 try:
1762 unlockdir(catdir_lock)
1763 finally:
1764 catdir_lock = None
1765 # Eventually, we'd like to pass in the saved ebuild env here...
1766 retval = doebuild(myebuildpath, "prerm", self.myroot,
1767 self.settings, cleanup=cleanup, use_cache=0,
1768 mydbapi=self.vartree.dbapi, tree="vartree",
1769 vartree=self.vartree)
1770 # XXX: Decide how to handle failures here.
1771 if retval != os.EX_OK:
1772 writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
1773 return retval
1775 self._unmerge_pkgfiles(pkgfiles, others_in_slot)
1777 # Remove the registration of preserved libs for this pkg instance
1778 plib_registry = self.vartree.dbapi.plib_registry
1779 plib_registry.unregister(self.mycpv, self.settings["SLOT"],
1780 self.vartree.dbapi.cpv_counter(self.mycpv))
1782 if myebuildpath:
1783 ebuild_phase = "postrm"
1784 retval = doebuild(myebuildpath, "postrm", self.myroot,
1785 self.settings, use_cache=0, tree="vartree",
1786 mydbapi=self.vartree.dbapi, vartree=self.vartree)
1788 # XXX: Decide how to handle failures here.
1789 if retval != os.EX_OK:
1790 writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
1791 return retval
1793 # regenerate reverse NEEDED map
1794 self.vartree.dbapi.linkmap.rebuild()
1796 # remove preserved libraries that don't have any consumers left
1797 # FIXME: this code is quite ugly and can likely be optimized in several ways
1798 plib_dict = plib_registry.getPreservedLibs()
1799 for cpv in plib_dict:
1800 plib_dict[cpv].sort()
1801 # for the loop below to work correctly, we need all
1802 # symlinks to come before the actual files, such that
1803 # the recorded symlinks (sonames) will be resolved into
1804 # their real target before the object is found not to be
1805 # in the reverse NEEDED map
1806 def symlink_compare(x, y):
1807 if os.path.islink(x):
1808 if os.path.islink(y):
1809 return 0
1810 else:
1811 return -1
1812 elif os.path.islink(y):
1813 return 1
1814 else:
1815 return 0
1817 plib_dict[cpv].sort(symlink_compare)
1818 for f in plib_dict[cpv]:
1819 if not os.path.exists(f):
1820 continue
1821 unlink_list = []
1822 consumers = self.vartree.dbapi.linkmap.findConsumers(f)
1823 if not consumers:
1824 unlink_list.append(f)
1825 else:
1826 keep=False
1827 for c in consumers:
1828 if c not in self.getcontents():
1829 keep=True
1830 break
1831 if not keep:
1832 unlink_list.append(f)
1833 for obj in unlink_list:
1834 try:
1835 if os.path.islink(obj):
1836 obj_type = "sym"
1837 else:
1838 obj_type = "obj"
1839 os.unlink(obj)
1840 writemsg_stdout("<<< !needed %s %s\n" % (obj_type, obj))
1841 except OSError, e:
1842 if e.errno == errno.ENOENT:
1843 pass
1844 else:
1845 raise e
1846 plib_registry.pruneNonExisting()
1848 finally:
1849 if builddir_lock:
1850 try:
1851 if myebuildpath:
1852 if retval != os.EX_OK:
1853 msg_lines = []
1854 msg = ("The '%s' " % ebuild_phase) + \
1855 ("phase of the '%s' package " % self.mycpv) + \
1856 ("has failed with exit value %s." % retval)
1857 from textwrap import wrap
1858 msg_lines.extend(wrap(msg, 72))
1859 msg_lines.append("")
1861 ebuild_name = os.path.basename(myebuildpath)
1862 ebuild_dir = os.path.dirname(myebuildpath)
1863 msg = "The problem occurred while executing " + \
1864 ("the ebuild file named '%s' " % ebuild_name) + \
1865 ("located in the '%s' directory. " \
1866 % ebuild_dir) + \
1867 "If necessary, manually remove " + \
1868 "the environment.bz2 file and/or the " + \
1869 "ebuild file located in that directory."
1870 msg_lines.extend(wrap(msg, 72))
1871 msg_lines.append("")
1873 msg = "Removal " + \
1874 "of the environment.bz2 file is " + \
1875 "preferred since it may allow the " + \
1876 "removal phases to execute successfully. " + \
1877 "The ebuild will be " + \
1878 "sourced and the eclasses " + \
1879 "from the current portage tree will be used " + \
1880 "when necessary. Removal of " + \
1881 "the ebuild file will cause the " + \
1882 "pkg_prerm() and pkg_postrm() removal " + \
1883 "phases to be skipped entirely."
1884 msg_lines.extend(wrap(msg, 72))
1885 from portage.elog.messages import eerror
1886 for l in msg_lines:
1887 eerror(l, phase=ebuild_phase, key=self.mycpv)
1889 # process logs created during pre/postrm
1890 elog_process(self.mycpv, self.settings, phasefilter=filter_unmergephases)
1891 if retval == os.EX_OK:
1892 doebuild(myebuildpath, "cleanrm", self.myroot,
1893 self.settings, tree="vartree",
1894 mydbapi=self.vartree.dbapi,
1895 vartree=self.vartree)
1896 finally:
1897 unlockdir(builddir_lock)
1898 try:
1899 if myebuildpath and not catdir_lock:
1900 # Lock catdir for removal if empty.
1901 catdir_lock = lockdir(catdir)
1902 finally:
1903 if catdir_lock:
1904 try:
1905 os.rmdir(catdir)
1906 except OSError, e:
1907 if e.errno not in (errno.ENOENT,
1908 errno.ENOTEMPTY, errno.EEXIST):
1909 raise
1910 del e
1911 unlockdir(catdir_lock)
1912 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
1913 contents=contents, env=self.settings.environ())
1914 return os.EX_OK
1916 def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
1919 Unmerges the contents of a package from the liveFS
1920 Removes the VDB entry for self
1922 @param pkgfiles: typically self.getcontents()
1923 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
1924 @param others_in_slot: all dblink instances in this slot, excluding self
1925 @type others_in_slot: list
1926 @rtype: None
1929 if not pkgfiles:
1930 writemsg_stdout("No package files given... Grabbing a set.\n")
1931 pkgfiles = self.getcontents()
1933 if others_in_slot is None:
1934 others_in_slot = []
1935 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1936 slot_matches = self.vartree.dbapi.match(
1937 "%s:%s" % (dep_getkey(self.mycpv), slot))
1938 for cur_cpv in slot_matches:
1939 if cur_cpv == self.mycpv:
1940 continue
1941 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1942 self.vartree.root, self.settings,
1943 vartree=self.vartree))
1944 dest_root = normalize_path(self.vartree.root).rstrip(os.path.sep) + \
1945 os.path.sep
1946 dest_root_len = len(dest_root) - 1
1948 conf_mem_file = os.path.join(dest_root, CONFIG_MEMORY_FILE)
1949 cfgfiledict = grabdict(conf_mem_file)
1950 stale_confmem = []
1952 unmerge_orphans = "unmerge-orphans" in self.settings.features
1954 if pkgfiles:
1955 self.updateprotect()
1956 mykeys = pkgfiles.keys()
1957 mykeys.sort()
1958 mykeys.reverse()
1960 #process symlinks second-to-last, directories last.
1961 mydirs = []
1962 ignored_unlink_errnos = (
1963 errno.EBUSY, errno.ENOENT,
1964 errno.ENOTDIR, errno.EISDIR)
1965 ignored_rmdir_errnos = (
1966 errno.EEXIST, errno.ENOTEMPTY,
1967 errno.EBUSY, errno.ENOENT,
1968 errno.ENOTDIR, errno.EISDIR)
1969 modprotect = os.path.join(self.vartree.root, "lib/modules/")
1971 def unlink(file_name, lstatobj):
1972 if bsd_chflags:
1973 if lstatobj.st_flags != 0:
1974 bsd_chflags.lchflags(file_name, 0)
1975 parent_name = os.path.dirname(file_name)
1976 # Use normal stat/chflags for the parent since we want to
1977 # follow any symlinks to the real parent directory.
1978 pflags = os.stat(parent_name).st_flags
1979 if pflags != 0:
1980 bsd_chflags.chflags(parent_name, 0)
1981 try:
1982 if not stat.S_ISLNK(lstatobj.st_mode):
1983 # Remove permissions to ensure that any hardlinks to
1984 # suid/sgid files are rendered harmless.
1985 os.chmod(file_name, 0)
1986 os.unlink(file_name)
1987 finally:
1988 if bsd_chflags and pflags != 0:
1989 # Restore the parent flags we saved before unlinking
1990 bsd_chflags.chflags(parent_name, pflags)
1992 def show_unmerge(zing, desc, file_type, file_name):
1993 writemsg_stdout("%s %s %s %s\n" % \
1994 (zing, desc.ljust(8), file_type, file_name))
1995 for objkey in mykeys:
1996 obj = normalize_path(objkey)
1997 file_data = pkgfiles[objkey]
1998 file_type = file_data[0]
1999 statobj = None
2000 try:
2001 statobj = os.stat(obj)
2002 except OSError:
2003 pass
2004 lstatobj = None
2005 try:
2006 lstatobj = os.lstat(obj)
2007 except (OSError, AttributeError):
2008 pass
2009 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
2010 if lstatobj is None:
2011 show_unmerge("---", "!found", file_type, obj)
2012 continue
2013 if obj.startswith(dest_root):
2014 relative_path = obj[dest_root_len:]
2015 is_owned = False
2016 for dblnk in others_in_slot:
2017 if dblnk.isowner(relative_path, dest_root):
2018 is_owned = True
2019 break
2020 if is_owned:
2021 # A new instance of this package claims the file, so
2022 # don't unmerge it.
2023 show_unmerge("---", "replaced", file_type, obj)
2024 continue
2025 elif relative_path in cfgfiledict:
2026 stale_confmem.append(relative_path)
2027 # next line includes a tweak to protect modules from being unmerged,
2028 # but we don't protect modules from being overwritten if they are
2029 # upgraded. We effectively only want one half of the config protection
2030 # functionality for /lib/modules. For portage-ng both capabilities
2031 # should be able to be independently specified.
2032 if obj.startswith(modprotect):
2033 show_unmerge("---", "cfgpro", file_type, obj)
2034 continue
2036 # Don't unlink symlinks to directories here since that can
2037 # remove /lib and /usr/lib symlinks.
2038 if unmerge_orphans and \
2039 lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
2040 not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
2041 not self.isprotected(obj):
2042 try:
2043 unlink(obj, lstatobj)
2044 except EnvironmentError, e:
2045 if e.errno not in ignored_unlink_errnos:
2046 raise
2047 del e
2048 show_unmerge("<<<", "", file_type, obj)
2049 continue
2051 lmtime = str(lstatobj[stat.ST_MTIME])
2052 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
2053 show_unmerge("---", "!mtime", file_type, obj)
2054 continue
2056 if pkgfiles[objkey][0] == "dir":
2057 if statobj is None or not stat.S_ISDIR(statobj.st_mode):
2058 show_unmerge("---", "!dir", file_type, obj)
2059 continue
2060 mydirs.append(obj)
2061 elif pkgfiles[objkey][0] == "sym":
2062 if not islink:
2063 show_unmerge("---", "!sym", file_type, obj)
2064 continue
2065 # Go ahead and unlink symlinks to directories here when
2066 # they're actually recorded as symlinks in the contents.
2067 # Normally, symlinks such as /lib -> lib64 are not recorded
2068 # as symlinks in the contents of a package. If a package
2069 # installs something into ${D}/lib/, it is recorded in the
2070 # contents as a directory even if it happens to correspond
2071 # to a symlink when it's merged to the live filesystem.
2072 try:
2073 unlink(obj, lstatobj)
2074 show_unmerge("<<<", "", file_type, obj)
2075 except (OSError, IOError),e:
2076 if e.errno not in ignored_unlink_errnos:
2077 raise
2078 del e
2079 show_unmerge("!!!", "", file_type, obj)
2080 elif pkgfiles[objkey][0] == "obj":
2081 if statobj is None or not stat.S_ISREG(statobj.st_mode):
2082 show_unmerge("---", "!obj", file_type, obj)
2083 continue
2084 mymd5 = None
2085 try:
2086 mymd5 = perform_md5(obj, calc_prelink=1)
2087 except FileNotFound, e:
2088 # the file has disappeared between now and our stat call
2089 show_unmerge("---", "!obj", file_type, obj)
2090 continue
2092 # string.lower is needed because db entries used to be in upper-case. The
2093 # string.lower allows for backwards compatibility.
2094 if mymd5 != pkgfiles[objkey][2].lower():
2095 show_unmerge("---", "!md5", file_type, obj)
2096 continue
2097 try:
2098 unlink(obj, lstatobj)
2099 except (OSError, IOError), e:
2100 if e.errno not in ignored_unlink_errnos:
2101 raise
2102 del e
2103 show_unmerge("<<<", "", file_type, obj)
2104 elif pkgfiles[objkey][0] == "fif":
2105 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
2106 show_unmerge("---", "!fif", file_type, obj)
2107 continue
2108 show_unmerge("---", "", file_type, obj)
2109 elif pkgfiles[objkey][0] == "dev":
2110 show_unmerge("---", "", file_type, obj)
2112 mydirs.sort()
2113 mydirs.reverse()
2115 for obj in mydirs:
2116 try:
2117 if bsd_chflags:
2118 lstatobj = os.lstat(obj)
2119 if lstatobj.st_flags != 0:
2120 bsd_chflags.lchflags(obj, 0)
2121 parent_name = os.path.dirname(obj)
2122 # Use normal stat/chflags for the parent since we want to
2123 # follow any symlinks to the real parent directory.
2124 pflags = os.stat(parent_name).st_flags
2125 if pflags != 0:
2126 bsd_chflags.chflags(parent_name, 0)
2127 try:
2128 os.rmdir(obj)
2129 finally:
2130 if bsd_chflags and pflags != 0:
2131 # Restore the parent flags we saved before unlinking
2132 bsd_chflags.chflags(parent_name, pflags)
2133 show_unmerge("<<<", "", "dir", obj)
2134 except EnvironmentError, e:
2135 if e.errno not in ignored_rmdir_errnos:
2136 raise
2137 if e.errno != errno.ENOENT:
2138 show_unmerge("---", "!empty", "dir", obj)
2139 del e
2141 # Remove stale entries from config memory.
2142 if stale_confmem:
2143 for filename in stale_confmem:
2144 del cfgfiledict[filename]
2145 writedict(cfgfiledict, conf_mem_file)
2147 #remove self from vartree database so that our own virtual gets zapped if we're the last node
2148 self.vartree.zap(self.mycpv)
2150 def isowner(self,filename, destroot):
2151 """
2152 Check if a file belongs to this package. This may
2153 result in a stat call for the parent directory of
2154 every installed file, since the inode numbers are
2155 used to work around the problem of ambiguous paths
2156 caused by symlinked directories. The results of
2157 stat calls are cached to optimize multiple calls
2158 to this method.
2160 @param filename:
2161 @type filename:
2162 @param destroot:
2163 @type destroot:
2164 @rtype: Boolean
2165 @returns:
2166 1. True if this package owns the file.
2167 2. False if this package does not own the file.
2169 destfile = normalize_path(
2170 os.path.join(destroot, filename.lstrip(os.path.sep)))
2172 pkgfiles = self.getcontents()
2173 if pkgfiles and destfile in pkgfiles:
2174 return True
2175 if pkgfiles:
2176 basename = os.path.basename(destfile)
2177 if self._contents_basenames is None:
2178 self._contents_basenames = set(
2179 os.path.basename(x) for x in pkgfiles)
2180 if basename not in self._contents_basenames:
2181 # This is a shortcut that, in most cases, allows us to
2182 # eliminate this package as an owner without the need
2183 # to examine inode numbers of parent directories.
2184 return False
2186 # Use stat rather than lstat since we want to follow
2187 # any symlinks to the real parent directory.
2188 parent_path = os.path.dirname(destfile)
2189 try:
2190 parent_stat = os.stat(parent_path)
2191 except EnvironmentError, e:
2192 if e.errno != errno.ENOENT:
2193 raise
2194 del e
2195 return False
2196 if self._contents_inodes is None:
2197 self._contents_inodes = {}
2198 parent_paths = set()
2199 for x in pkgfiles:
2200 p_path = os.path.dirname(x)
2201 if p_path in parent_paths:
2202 continue
2203 parent_paths.add(p_path)
2204 try:
2205 s = os.stat(p_path)
2206 except OSError:
2207 pass
2208 else:
2209 inode_key = (s.st_dev, s.st_ino)
2210 # Use lists of paths in case multiple
2211 # paths reference the same inode.
2212 p_path_list = self._contents_inodes.get(inode_key)
2213 if p_path_list is None:
2214 p_path_list = []
2215 self._contents_inodes[inode_key] = p_path_list
2216 if p_path not in p_path_list:
2217 p_path_list.append(p_path)
2218 p_path_list = self._contents_inodes.get(
2219 (parent_stat.st_dev, parent_stat.st_ino))
2220 if p_path_list:
2221 for p_path in p_path_list:
2222 x = os.path.join(p_path, basename)
2223 if x in pkgfiles:
2224 return True
2226 return False
2228 def _preserve_libs(self, srcroot, destroot, mycontents, counter, inforoot):
2229 # read global reverse NEEDED map
2230 linkmap = self.vartree.dbapi.linkmap
2231 linkmap.rebuild(include_file=os.path.join(inforoot, "NEEDED.ELF.2"))
2232 liblist = linkmap.listLibraryObjects()
2234 # get list of libraries from old package instance
2235 old_contents = self._installed_instance.getcontents().keys()
2236 old_libs = set(old_contents).intersection(liblist)
2238 # get list of libraries from new package instance
2239 mylibs = set([os.path.join(os.sep, x) for x in mycontents]).intersection(liblist)
2241 # check which libs are present in the old, but not the new package instance
2242 candidates = old_libs.difference(mylibs)
2244 for x in old_contents:
2245 if os.path.islink(x) and os.path.realpath(x) in candidates and x not in mycontents:
2246 candidates.add(x)
2248 # ignore any libs that are only internally used by the package
2249 def has_external_consumers(lib, contents, otherlibs):
2250 consumers = linkmap.findConsumers(lib)
2251 contents_without_libs = [x for x in contents if x not in otherlibs]
2253 # just used by objects that will be autocleaned
2254 if len(consumers.difference(contents_without_libs)) == 0:
2255 return False
2256 # used by objects that are referenced as well, need to check those
2257 # recursively to break any reference cycles
2258 elif len(consumers.difference(contents)) == 0:
2259 otherlibs = set(otherlibs)
2260 for ol in otherlibs.intersection(consumers):
2261 if has_external_consumers(ol, contents, otherlibs.difference([lib])):
2262 return True
2263 return False
2264 # used by external objects directly
2265 else:
2266 return True
2268 for lib in list(candidates):
2269 if not has_external_consumers(lib, old_contents, candidates):
2270 candidates.remove(lib)
2271 continue
2272 if linkmap.isMasterLink(lib):
2273 candidates.remove(lib)
2274 continue
2275 # only preserve the lib if there is no other copy to use for each consumer
2276 keep = False
2277 for c in linkmap.findConsumers(lib):
2278 localkeep = True
2279 providers = linkmap.findProviders(c)
2281 for soname in providers:
2282 if lib in providers[soname]:
2283 for p in providers[soname]:
2284 if p not in candidates or os.path.exists(os.path.join(srcroot, p.lstrip(os.sep))):
2285 localkeep = False
2286 break
2287 break
2288 if localkeep:
2289 keep = True
2290 if not keep:
2291 candidates.remove(lib)
2292 continue
2294 del mylibs, mycontents, old_contents, liblist
2296 # inject files that should be preserved into our image dir
2297 import shutil
2298 preserve_paths = []
2299 candidates_stack = list(candidates)
2300 while candidates_stack:
2301 x = candidates_stack.pop()
2302 # skip existing files so the 'new' libs aren't overwritten
2303 if os.path.exists(os.path.join(srcroot, x.lstrip(os.sep))):
2304 continue
2305 print "injecting %s into %s" % (x, srcroot)
2306 if not os.path.exists(os.path.join(destroot, x.lstrip(os.sep))):
2307 print "%s does not exist so can't be preserved" % x
2308 continue
2309 mydir = os.path.join(srcroot, os.path.dirname(x).lstrip(os.sep))
2310 if not os.path.exists(mydir):
2311 os.makedirs(mydir)
2313 # resolve symlinks and extend preserve list
2314 # NOTE: we're extending the list in the loop to emulate recursion to
2315 # also get indirect symlinks
2316 if os.path.islink(x):
2317 linktarget = os.readlink(x)
2318 os.symlink(linktarget, os.path.join(srcroot, x.lstrip(os.sep)))
2319 if linktarget[0] != os.sep:
2320 linktarget = os.path.join(os.path.dirname(x), linktarget)
2321 candidates.add(linktarget)
2322 candidates_stack.append(linktarget)
2323 else:
2324 shutil.copy2(os.path.join(destroot, x.lstrip(os.sep)),
2325 os.path.join(srcroot, x.lstrip(os.sep)))
2326 preserve_paths.append(x)
2328 del candidates
2330 # keep track of the libs we preserved
2331 self.vartree.dbapi.plib_registry.register(self.mycpv, self.settings["SLOT"], counter, preserve_paths)
2333 del preserve_paths
2335 def _collision_protect(self, srcroot, destroot, mypkglist, mycontents):
2336 collision_ignore = set([normalize_path(myignore) for myignore in \
2337 self.settings.get("COLLISION_IGNORE", "").split()])
2339 stopmerge = False
2341 collisions = []
2342 destroot = normalize_path(destroot).rstrip(os.path.sep) + \
2343 os.path.sep
2344 writemsg_stdout("%s checking %d files for package collisions\n" % \
2345 (green("*"), len(mycontents)))
2346 for f in mycontents:
2347 i = i + 1
2348 if i % 1000 == 0:
2349 writemsg_stdout("%d files checked ...\n" % i)
2350 dest_path = normalize_path(
2351 os.path.join(destroot, f.lstrip(os.path.sep)))
2352 try:
2353 dest_lstat = os.lstat(dest_path)
2354 except EnvironmentError, e:
2355 if e.errno == errno.ENOENT:
2356 del e
2357 continue
2358 elif e.errno == errno.ENOTDIR:
2359 del e
2360 # A non-directory is in a location where this package
2361 # expects to have a directory.
2362 dest_lstat = None
2363 parent_path = dest_path
2364 while len(parent_path) > len(destroot):
2365 parent_path = os.path.dirname(parent_path)
2366 try:
2367 dest_lstat = os.lstat(parent_path)
2368 break
2369 except EnvironmentError, e:
2370 if e.errno != errno.ENOTDIR:
2371 raise
2372 del e
2373 if not dest_lstat:
2374 raise AssertionError(
2375 "unable to find non-directory " + \
2376 "parent for '%s'" % dest_path)
2377 dest_path = parent_path
2378 f = os.path.sep + dest_path[len(destroot):]
2379 if f in collisions:
2380 continue
2381 else:
2382 raise
2383 if f[0] != "/":
2384 f="/"+f
2385 isowned = False
2386 for ver in [self] + mypkglist:
2387 if (ver.isowner(f, destroot) or ver.isprotected(f)):
2388 isowned = True
2389 break
2390 if not isowned:
2391 stopmerge = True
2392 if collision_ignore:
2393 if f in collision_ignore:
2394 stopmerge = False
2395 else:
2396 for myignore in collision_ignore:
2397 if f.startswith(myignore + os.path.sep):
2398 stopmerge = False
2399 break
2400 if stopmerge:
2401 collisions.append(f)
2402 return collisions
2404 def _security_check(self, installed_instances):
2405 if not installed_instances:
2406 return 0
2407 file_paths = set()
2408 for dblnk in installed_instances:
2409 file_paths.update(dblnk.getcontents())
2410 inode_map = {}
2411 real_paths = set()
2412 for path in file_paths:
2413 try:
2414 s = os.lstat(path)
2415 except OSError, e:
2416 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
2417 raise
2418 del e
2419 continue
2420 if not stat.S_ISREG(s.st_mode):
2421 continue
2422 path = os.path.realpath(path)
2423 if path in real_paths:
2424 continue
2425 real_paths.add(path)
2426 if s.st_nlink > 1 and \
2427 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
2428 k = (s.st_dev, s.st_ino)
2429 inode_map.setdefault(k, []).append((path, s))
2430 suspicious_hardlinks = []
2431 for path_list in inode_map.itervalues():
2432 path, s = path_list[0]
2433 if len(path_list) == s.st_nlink:
2434 # All hardlinks seem to be owned by this package.
2435 continue
2436 suspicious_hardlinks.append(path_list)
2437 if not suspicious_hardlinks:
2438 return 0
2439 from portage.output import colorize
2440 prefix = colorize("SECURITY_WARN", "*") + " WARNING: "
2441 writemsg(prefix + "suid/sgid file(s) " + \
2442 "with suspicious hardlink(s):\n", noiselevel=-1)
2443 for path_list in suspicious_hardlinks:
2444 for path, s in path_list:
2445 writemsg(prefix + " '%s'\n" % path, noiselevel=-1)
2446 writemsg(prefix + "See the Gentoo Security Handbook " + \
2447 "guide for advice on how to proceed.\n", noiselevel=-1)
2448 return 1
2450 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
2451 mydbapi=None, prev_mtimes=None):
2454 This function does the following:
2456 calls self._preserve_libs if FEATURES=preserve-libs
2457 calls self._collision_protect if FEATURES=collision-protect
2458 calls doebuild(mydo=pkg_preinst)
2459 Merges the package to the livefs
2460 unmerges old version (if required)
2461 calls doebuild(mydo=pkg_postinst)
2462 calls env_update
2463 calls elog_process
2465 @param srcroot: Typically this is ${D}
2466 @type srcroot: String (Path)
2467 @param destroot: Path to merge to (usually ${ROOT})
2468 @type destroot: String (Path)
2469 @param inforoot: root of the vardb entry ?
2470 @type inforoot: String (Path)
2471 @param myebuild: path to the ebuild that we are processing
2472 @type myebuild: String (Path)
2473 @param mydbapi: dbapi which is handed to doebuild.
2474 @type mydbapi: portdbapi instance
2475 @param prev_mtimes: { Filename:mtime } mapping for env_update
2476 @type prev_mtimes: Dictionary
2477 @rtype: Boolean
2478 @returns:
2479 1. 0 on success
2480 2. 1 on failure
2482 secondhand is a list of symlinks that have been skipped due to their target
2483 not existing; we will merge these symlinks at a later time.
2486 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
2487 destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep
2489 if not os.path.isdir(srcroot):
2490 writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
2491 noiselevel=-1)
2492 return 1
2494 inforoot_slot_file = os.path.join(inforoot, "SLOT")
2495 slot = None
2496 try:
2497 f = open(inforoot_slot_file)
2498 try:
2499 slot = f.read().strip()
2500 finally:
2501 f.close()
2502 except EnvironmentError, e:
2503 if e.errno != errno.ENOENT:
2504 raise
2505 del e
2507 if slot is None:
2508 slot = ""
2510 from portage.elog.messages import eerror as _eerror
2511 def eerror(lines):
2512 for l in lines:
2513 _eerror(l, phase="preinst", key=self.settings.mycpv)
2515 if slot != self.settings["SLOT"]:
2516 writemsg("!!! WARNING: Expected SLOT='%s', got '%s'\n" % \
2517 (self.settings["SLOT"], slot))
2519 if not os.path.exists(self.dbcatdir):
2520 os.makedirs(self.dbcatdir)
2522 otherversions = []
2523 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
2524 otherversions.append(v.split("/")[1])
2526 # filter any old-style virtual matches
2527 slot_matches = [cpv for cpv in self.vartree.dbapi.match(
2528 "%s:%s" % (cpv_getkey(self.mycpv), slot)) \
2529 if cpv_getkey(cpv) == cpv_getkey(self.mycpv)]
2531 if self.mycpv not in slot_matches and \
2532 self.vartree.dbapi.cpv_exists(self.mycpv):
2533 # handle multislot or unapplied slotmove
2534 slot_matches.append(self.mycpv)
2536 others_in_slot = []
2537 from portage import config
2538 for cur_cpv in slot_matches:
2539 # Clone the config in case one of these has to be unmerged since
2540 # we need it to have private ${T} etc... for things like elog.
2541 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2542 self.vartree.root, config(clone=self.settings),
2543 vartree=self.vartree))
2544 retval = self._security_check(others_in_slot)
2545 if retval:
2546 return retval
2548 if slot_matches:
2549 # Used by self.isprotected().
2550 max_dblnk = None
2551 max_counter = -1
2552 for dblnk in others_in_slot:
2553 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
2554 if cur_counter > max_counter:
2555 max_counter = cur_counter
2556 max_dblnk = dblnk
2557 self._installed_instance = max_dblnk
2559 # get current counter value (counter_tick also takes care of incrementing it)
2560 # XXX Need to make this destroot, but it needs to be initialized first. XXX
2561 # XXX bis: leads to some invalidentry() call through cp_all().
2562 # Note: The counter is generated here but written later because preserve_libs
2563 # needs the counter value but has to be before dbtmpdir is made (which
2564 # has to be before the counter is written) - genone
2565 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
2567 # Save this for unregistering preserved-libs if the merge fails.
2568 self.settings["COUNTER"] = str(counter)
2569 self.settings.backup_changes("COUNTER")
2571 myfilelist = []
2572 mylinklist = []
2573 def onerror(e):
2574 raise
2575 for parent, dirs, files in os.walk(srcroot, onerror=onerror):
2576 for f in files:
2577 file_path = os.path.join(parent, f)
2578 file_mode = os.lstat(file_path).st_mode
2579 if stat.S_ISREG(file_mode):
2580 myfilelist.append(file_path[len(srcroot):])
2581 elif stat.S_ISLNK(file_mode):
2582 # Note: os.walk puts symlinks to directories in the "dirs"
2583 # list and it does not traverse them since that could lead
2584 # to an infinite recursion loop.
2585 mylinklist.append(file_path[len(srcroot):])
2587 # If there are no files to merge, and an installed package in the same
2588 # slot has files, it probably means that something went wrong.
2589 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
2590 not myfilelist and not mylinklist and others_in_slot:
2591 installed_files = None
2592 for other_dblink in others_in_slot:
2593 installed_files = other_dblink.getcontents()
2594 if not installed_files:
2595 continue
2596 from textwrap import wrap
2597 wrap_width = 72
2598 msg = []
2599 d = (
2600 self.mycpv,
2601 other_dblink.mycpv
2603 msg.extend(wrap(("The '%s' package will not install " + \
2604 "any files, but the currently installed '%s'" + \
2605 " package has the following files: ") % d, wrap_width))
2606 msg.append("")
2607 msg.extend(sorted(installed_files))
2608 msg.append("")
2609 msg.append("package %s NOT merged" % self.mycpv)
2610 msg.append("")
2611 msg.extend(wrap(
2612 ("Manually run `emerge --unmerge =%s` " % \
2613 other_dblink.mycpv) + "if you really want to " + \
2614 "remove the above files. Set " + \
2615 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in " + \
2616 "/etc/make.conf if you do not want to " + \
2617 "abort in cases like this.",
2618 wrap_width))
2619 eerror(msg)
2620 if installed_files:
2621 return 1
2623 # Preserve old libs if they are still in use
2624 if slot_matches and "preserve-libs" in self.settings.features:
2625 self._preserve_libs(srcroot, destroot, myfilelist+mylinklist, counter, inforoot)
2627 # check for package collisions
2628 blockers = None
2629 if self._blockers is not None:
2630 # This is only supposed to be called when
2631 # the vdb is locked, like it is here.
2632 blockers = self._blockers()
2633 if blockers is None:
2634 blockers = []
2635 collisions = self._collision_protect(srcroot, destroot,
2636 others_in_slot + blockers, myfilelist + mylinklist)
2638 # Make sure the ebuild environment is initialized and that ${T}/elog
2639 # exists for logging of collision-protect eerror messages.
2640 if myebuild is None:
2641 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
2642 doebuild_environment(myebuild, "preinst", destroot,
2643 self.settings, 0, 0, mydbapi)
2644 prepare_build_dirs(destroot, self.settings, cleanup)
2646 if collisions:
2647 collision_protect = "collision-protect" in self.settings.features
2648 msg = "This package will overwrite one or more files that" + \
2649 " may belong to other packages (see list below)."
2650 if not collision_protect:
2651 msg += " Add \"collision-protect\" to FEATURES in" + \
2652 " make.conf if you would like the merge to abort" + \
2653 " in cases like this."
2654 if self.settings.get("PORTAGE_QUIET") != "1":
2655 msg += " You can use a command such as" + \
2656 " `portageq owners / <filename>` to identify the" + \
2657 " installed package that owns a file. If portageq" + \
2658 " reports that only one package owns a file then do NOT" + \
2659 " file a bug report. A bug report is only useful if it" + \
2660 " identifies at least two or more packages that are known" + \
2661 " to install the same file(s)." + \
2662 " If a collision occurs and you" + \
2663 " can not explain where the file came from then you" + \
2664 " should simply ignore the collision since there is not" + \
2665 " enough information to determine if a real problem" + \
2666 " exists. Please do NOT file a bug report at" + \
2667 " http://bugs.gentoo.org unless you report exactly which" + \
2668 " two packages install the same file(s). Once again," + \
2669 " please do NOT file a bug report unless you have" + \
2670 " completely understood the above message."
2672 self.settings["EBUILD_PHASE"] = "preinst"
2673 from textwrap import wrap
2674 msg = wrap(msg, 70)
2675 if collision_protect:
2676 msg.append("")
2677 msg.append("package %s NOT merged" % self.settings.mycpv)
2678 msg.append("")
2679 msg.append("Detected file collision(s):")
2680 msg.append("")
2682 for f in collisions:
2683 msg.append("\t%s" % \
2684 os.path.join(destroot, f.lstrip(os.path.sep)))
2686 eerror(msg)
2688 msg = []
2689 msg.append("")
2690 msg.append("Searching all installed" + \
2691 " packages for file collisions...")
2692 msg.append("")
2693 msg.append("Press Ctrl-C to Stop")
2694 msg.append("")
2695 eerror(msg)
2697 owners = self.vartree.dbapi._owners.get_owners(collisions)
2698 self.vartree.dbapi.flush_cache()
2700 for pkg, owned_files in owners.iteritems():
2701 cpv = pkg.mycpv
2702 msg = []
2703 msg.append("%s" % cpv)
2704 for f in sorted(owned_files):
2705 msg.append("\t%s" % os.path.join(destroot,
2706 f.lstrip(os.path.sep)))
2707 eerror(msg)
2708 if not owners:
2709 eerror(["None of the installed" + \
2710 " packages claim the file(s)."])
2711 if collision_protect:
2712 return 1
2714 writemsg_stdout(">>> Merging %s to %s\n" % (self.mycpv, destroot))
2716 # The merge process may move files out of the image directory,
2717 # which causes invalidation of the .installed flag.
2718 try:
2719 os.unlink(os.path.join(
2720 os.path.dirname(normalize_path(srcroot)), ".installed"))
2721 except OSError, e:
2722 if e.errno != errno.ENOENT:
2723 raise
2724 del e
2726 self.dbdir = self.dbtmpdir
2727 self.delete()
2728 ensure_dirs(self.dbtmpdir)
2730 # run preinst script
2731 a = doebuild(myebuild, "preinst", destroot, self.settings,
2732 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
2733 vartree=self.vartree)
2735 # XXX: Decide how to handle failures here.
2736 if a != os.EX_OK:
2737 writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
2738 return a
2740 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
2741 for x in listdir(inforoot):
2742 self.copyfile(inforoot+"/"+x)
2744 # write local package counter for recording
2745 lcfile = open(os.path.join(self.dbtmpdir, "COUNTER"),"w")
2746 lcfile.write(str(counter))
2747 lcfile.close()
2749 # open CONTENTS file (possibly overwriting old one) for recording
2750 outfile = open(os.path.join(self.dbtmpdir, "CONTENTS"),"w")
2752 self.updateprotect()
2754 #if we have a file containing previously-merged config file md5sums, grab it.
2755 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
2756 cfgfiledict = grabdict(conf_mem_file)
2757 if self.settings.has_key("NOCONFMEM"):
2758 cfgfiledict["IGNORE"]=1
2759 else:
2760 cfgfiledict["IGNORE"]=0
2762 # Always behave like --noconfmem is enabled for downgrades
2763 # so that people who don't know about this option are less
2764 # likely to get confused when doing upgrade/downgrade cycles.
2765 pv_split = catpkgsplit(self.mycpv)[1:]
2766 for other in others_in_slot:
2767 if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
2768 cfgfiledict["IGNORE"] = 1
2769 break
2771 # Don't bump mtimes on merge since some application require
2772 # preservation of timestamps. This means that the unmerge phase must
2773 # check to see if file belongs to an installed instance in the same
2774 # slot.
2775 mymtime = None
2777 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
2778 prevmask = os.umask(0)
2779 secondhand = []
2781 # we do a first merge; this will recurse through all files in our srcroot but also build up a
2782 # "second hand" of symlinks to merge later
2783 if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
2784 return 1
2786 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
2787 # broken symlinks. We'll merge them too.
2788 lastlen = 0
2789 while len(secondhand) and len(secondhand)!=lastlen:
2790 # clear the thirdhand. Anything from our second hand that
2791 # couldn't get merged will be added to thirdhand.
2793 thirdhand = []
2794 self.mergeme(srcroot, destroot, outfile, thirdhand, secondhand, cfgfiledict, mymtime)
2796 #swap hands
2797 lastlen = len(secondhand)
2799 # our thirdhand now becomes our secondhand. It's ok to throw
2800 # away secondhand since thirdhand contains all the stuff that
2801 # couldn't be merged.
2802 secondhand = thirdhand
2804 if len(secondhand):
2805 # force merge of remaining symlinks (broken or circular; oh well)
2806 self.mergeme(srcroot, destroot, outfile, None, secondhand, cfgfiledict, mymtime)
2808 #restore umask
2809 os.umask(prevmask)
2811 #if we opened it, close it
2812 outfile.flush()
2813 outfile.close()
2815 # write out our collection of md5sums
2816 cfgfiledict.pop("IGNORE", None)
2817 ensure_dirs(os.path.dirname(conf_mem_file),
2818 gid=portage_gid, mode=02750, mask=02)
2819 writedict(cfgfiledict, conf_mem_file)
2821 # These caches are populated during collision-protect and the data
2822 # they contain is now invalid. It's very important to invalidate
2823 # the contents_inodes cache so that FEATURES=unmerge-orphans
2824 # doesn't unmerge anything that belongs to this package that has
2825 # just been merged.
2826 others_in_slot.append(self) # self has just been merged
2827 for dblnk in others_in_slot:
2828 dblnk.contentscache = None
2829 dblnk._contents_inodes = None
2830 dblnk._contents_basenames = None
2832 # If portage is reinstalling itself, remove the old
2833 # version now since we want to use the temporary
2834 # PORTAGE_BIN_PATH that will be removed when we return.
2835 reinstall_self = False
2836 if self.myroot == "/" and \
2837 "sys-apps" == self.cat and \
2838 "portage" == pkgsplit(self.pkg)[0]:
2839 reinstall_self = True
2841 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes"
2842 for dblnk in list(others_in_slot):
2843 if dblnk is self:
2844 continue
2845 if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
2846 continue
2847 writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
2848 others_in_slot.remove(dblnk) # dblnk will unmerge itself now
2849 dblnk.unmerge(trimworld=0, ldpath_mtimes=prev_mtimes,
2850 others_in_slot=others_in_slot)
2851 # TODO: Check status and abort if necessary.
2852 dblnk.delete()
2853 writemsg_stdout(">>> Original instance of package unmerged safely.\n")
2855 if len(others_in_slot) > 1:
2856 from portage.output import colorize
2857 writemsg_stdout(colorize("WARN", "WARNING:")
2858 + " AUTOCLEAN is disabled. This can cause serious"
2859 + " problems due to overlapping packages.\n")
2861 # We hold both directory locks.
2862 self.dbdir = self.dbpkgdir
2863 self.delete()
2864 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
2866 # Check for file collisions with blocking packages
2867 # and remove any colliding files from their CONTENTS
2868 # since they now belong to this package.
2869 self._clear_contents_cache()
2870 contents = self.getcontents()
2871 destroot_len = len(destroot) - 1
2872 for blocker in blockers:
2873 blocker_contents = blocker.getcontents()
2874 collisions = []
2875 for filename in blocker_contents:
2876 relative_filename = filename[destroot_len:]
2877 if self.isowner(relative_filename, destroot):
2878 collisions.append(filename)
2879 if not collisions:
2880 continue
2881 for filename in collisions:
2882 del blocker_contents[filename]
2883 f = atomic_ofstream(os.path.join(blocker.dbdir, "CONTENTS"))
2884 for filename in sorted(blocker_contents):
2885 entry_data = blocker_contents[filename]
2886 entry_type = entry_data[0]
2887 relative_filename = filename[destroot_len:]
2888 if entry_type == "obj":
2889 entry_type, mtime, md5sum = entry_data
2890 line = "%s %s %s %s\n" % \
2891 (entry_type, relative_filename, md5sum, mtime)
2892 elif entry_type == "sym":
2893 entry_type, mtime, link = entry_data
2894 line = "%s %s -> %s %s\n" % \
2895 (entry_type, relative_filename, link, mtime)
2896 else: # dir, dev, fif
2897 line = "%s %s\n" % (entry_type, relative_filename)
2898 f.write(line)
2899 f.close()
2901 self.vartree.dbapi._add(self)
2902 contents = self.getcontents()
2904 # regenerate reverse NEEDED map
2905 self.vartree.dbapi.linkmap.rebuild()
2907 #do postinst script
2908 self.settings["PORTAGE_UPDATE_ENV"] = \
2909 os.path.join(self.dbpkgdir, "environment.bz2")
2910 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
2911 a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
2912 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
2913 self.settings.pop("PORTAGE_UPDATE_ENV", None)
2915 # XXX: Decide how to handle failures here.
2916 if a != os.EX_OK:
2917 writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
2918 return a
2920 downgrade = False
2921 for v in otherversions:
2922 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
2923 downgrade = True
2925 #update environment settings, library paths. DO NOT change symlinks.
2926 env_update(makelinks=(not downgrade),
2927 target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
2928 contents=contents, env=self.settings.environ())
2930 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
2931 return os.EX_OK
2933 def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
2936 This function handles actual merging of the package contents to the livefs.
2937 It also handles config protection.
2939 @param srcroot: Where are we copying files from (usually ${D})
2940 @type srcroot: String (Path)
2941 @param destroot: Typically ${ROOT}
2942 @type destroot: String (Path)
2943 @param outfile: File to log operations to
2944 @type outfile: File Object
2945 @param secondhand: A set of items to merge in pass two (usually
2946 or symlinks that point to non-existing files that may get merged later)
2947 @type secondhand: List
2948 @param stufftomerge: Either a diretory to merge, or a list of items.
2949 @type stufftomerge: String or List
2950 @param cfgfiledict: { File:mtime } mapping for config_protected files
2951 @type cfgfiledict: Dictionary
2952 @param thismtime: The current time (typically long(time.time())
2953 @type thismtime: Long
2954 @rtype: None or Boolean
2955 @returns:
2956 1. True on failure
2957 2. None otherwise
2960 from os.path import sep, join
2961 srcroot = normalize_path(srcroot).rstrip(sep) + sep
2962 destroot = normalize_path(destroot).rstrip(sep) + sep
2964 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
2965 if isinstance(stufftomerge, basestring):
2966 #A directory is specified. Figure out protection paths, listdir() it and process it.
2967 mergelist = os.listdir(join(srcroot, stufftomerge))
2968 offset = stufftomerge
2969 else:
2970 mergelist = stufftomerge
2971 offset = ""
2972 for x in mergelist:
2973 mysrc = join(srcroot, offset, x)
2974 mydest = join(destroot, offset, x)
2975 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
2976 myrealdest = join(sep, offset, x)
2977 # stat file once, test using S_* macros many times (faster that way)
2978 try:
2979 mystat = os.lstat(mysrc)
2980 except OSError, e:
2981 writemsg("\n")
2982 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
2983 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
2984 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
2985 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
2986 writemsg(red("!!! File: ")+str(mysrc)+"\n", noiselevel=-1)
2987 writemsg(red("!!! Error: ")+str(e)+"\n", noiselevel=-1)
2988 sys.exit(1)
2989 except Exception, e:
2990 writemsg("\n")
2991 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
2992 writemsg(red("!!! A stat call returned the following error for the following file:"))
2993 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
2994 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
2995 writemsg( "!!! File: "+str(mysrc)+"\n", noiselevel=-1)
2996 writemsg( "!!! Error: "+str(e)+"\n", noiselevel=-1)
2997 sys.exit(1)
3000 mymode = mystat[stat.ST_MODE]
3001 # handy variables; mydest is the target object on the live filesystems;
3002 # mysrc is the source object in the temporary install dir
3003 try:
3004 mydstat = os.lstat(mydest)
3005 mydmode = mydstat.st_mode
3006 except OSError, e:
3007 if e.errno != errno.ENOENT:
3008 raise
3009 del e
3010 #dest file doesn't exist
3011 mydstat = None
3012 mydmode = None
3014 if stat.S_ISLNK(mymode):
3015 # we are merging a symbolic link
3016 myabsto = abssymlink(mysrc)
3017 if myabsto.startswith(srcroot):
3018 myabsto = myabsto[len(srcroot):]
3019 myabsto = myabsto.lstrip(sep)
3020 myto = os.readlink(mysrc)
3021 if self.settings and self.settings["D"]:
3022 if myto.startswith(self.settings["D"]):
3023 myto = myto[len(self.settings["D"]):]
3024 # myrealto contains the path of the real file to which this symlink points.
3025 # we can simply test for existence of this file to see if the target has been merged yet
3026 myrealto = normalize_path(os.path.join(destroot, myabsto))
3027 if mydmode!=None:
3028 #destination exists
3029 if not stat.S_ISLNK(mydmode):
3030 if stat.S_ISDIR(mydmode):
3031 # directory in the way: we can't merge a symlink over a directory
3032 # we won't merge this, continue with next file...
3033 continue
3035 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
3036 # Kill file blocking installation of symlink to dir #71787
3037 pass
3038 elif self.isprotected(mydest):
3039 # Use md5 of the target in ${D} if it exists...
3040 try:
3041 newmd5 = perform_md5(join(srcroot, myabsto))
3042 except FileNotFound:
3043 # Maybe the target is merged already.
3044 try:
3045 newmd5 = perform_md5(myrealto)
3046 except FileNotFound:
3047 newmd5 = None
3048 mydest = new_protect_filename(mydest, newmd5=newmd5)
3050 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
3051 if (secondhand != None) and (not os.path.exists(myrealto)):
3052 # either the target directory doesn't exist yet or the target file doesn't exist -- or
3053 # the target is a broken symlink. We will add this file to our "second hand" and merge
3054 # it later.
3055 secondhand.append(mysrc[len(srcroot):])
3056 continue
3057 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
3058 mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
3059 if mymtime != None:
3060 writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
3061 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
3062 else:
3063 print "!!! Failed to move file."
3064 print "!!!", mydest, "->", myto
3065 sys.exit(1)
3066 elif stat.S_ISDIR(mymode):
3067 # we are merging a directory
3068 if mydmode != None:
3069 # destination exists
3071 if bsd_chflags:
3072 # Save then clear flags on dest.
3073 dflags = mydstat.st_flags
3074 if dflags != 0:
3075 bsd_chflags.lchflags(mydest, 0)
3077 if not os.access(mydest, os.W_OK):
3078 pkgstuff = pkgsplit(self.pkg)
3079 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
3080 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
3081 writemsg("!!! You may start the merge process again by using ebuild:\n")
3082 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
3083 writemsg("!!! And finish by running this: env-update\n\n")
3084 return 1
3086 if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
3087 # a symlink to an existing directory will work for us; keep it:
3088 writemsg_stdout("--- %s/\n" % mydest)
3089 if bsd_chflags:
3090 bsd_chflags.lchflags(mydest, dflags)
3091 else:
3092 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
3093 if movefile(mydest, mydest+".backup", mysettings=self.settings) is None:
3094 sys.exit(1)
3095 print "bak", mydest, mydest+".backup"
3096 #now create our directory
3097 if self.settings.selinux_enabled():
3098 import selinux
3099 sid = selinux.get_sid(mysrc)
3100 selinux.secure_mkdir(mydest,sid)
3101 else:
3102 os.mkdir(mydest)
3103 if bsd_chflags:
3104 bsd_chflags.lchflags(mydest, dflags)
3105 os.chmod(mydest, mystat[0])
3106 os.chown(mydest, mystat[4], mystat[5])
3107 writemsg_stdout(">>> %s/\n" % mydest)
3108 else:
3109 #destination doesn't exist
3110 if self.settings.selinux_enabled():
3111 import selinux
3112 sid = selinux.get_sid(mysrc)
3113 selinux.secure_mkdir(mydest, sid)
3114 else:
3115 os.mkdir(mydest)
3116 os.chmod(mydest, mystat[0])
3117 os.chown(mydest, mystat[4], mystat[5])
3118 writemsg_stdout(">>> %s/\n" % mydest)
3119 outfile.write("dir "+myrealdest+"\n")
3120 # recurse and merge this directory
3121 if self.mergeme(srcroot, destroot, outfile, secondhand,
3122 join(offset, x), cfgfiledict, thismtime):
3123 return 1
3124 elif stat.S_ISREG(mymode):
3125 # we are merging a regular file
3126 mymd5 = perform_md5(mysrc, calc_prelink=1)
3127 # calculate config file protection stuff
3128 mydestdir = os.path.dirname(mydest)
3129 moveme = 1
3130 zing = "!!!"
3131 mymtime = None
3132 if mydmode != None:
3133 # destination file exists
3134 if stat.S_ISDIR(mydmode):
3135 # install of destination is blocked by an existing directory with the same name
3136 moveme = 0
3137 writemsg_stdout("!!! %s\n" % mydest)
3138 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
3139 cfgprot = 0
3140 # install of destination is blocked by an existing regular file,
3141 # or by a symlink to an existing regular file;
3142 # now, config file management may come into play.
3143 # we only need to tweak mydest if cfg file management is in play.
3144 if self.isprotected(mydest):
3145 # we have a protection path; enable config file management.
3146 destmd5 = perform_md5(mydest, calc_prelink=1)
3147 if mymd5 == destmd5:
3148 #file already in place; simply update mtimes of destination
3149 moveme = 1
3150 else:
3151 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
3152 """ An identical update has previously been
3153 merged. Skip it unless the user has chosen
3154 --noconfmem."""
3155 moveme = cfgfiledict["IGNORE"]
3156 cfgprot = cfgfiledict["IGNORE"]
3157 if not moveme:
3158 zing = "---"
3159 mymtime = long(mystat.st_mtime)
3160 else:
3161 moveme = 1
3162 cfgprot = 1
3163 if moveme:
3164 # Merging a new file, so update confmem.
3165 cfgfiledict[myrealdest] = [mymd5]
3166 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
3167 """A previously remembered update has been
3168 accepted, so it is removed from confmem."""
3169 del cfgfiledict[myrealdest]
3170 if cfgprot:
3171 mydest = new_protect_filename(mydest, newmd5=mymd5)
3173 # whether config protection or not, we merge the new file the
3174 # same way. Unless moveme=0 (blocking directory)
3175 if moveme:
3176 mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
3177 if mymtime is None:
3178 sys.exit(1)
3179 zing = ">>>"
3181 if mymtime != None:
3182 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
3183 writemsg_stdout("%s %s\n" % (zing,mydest))
3184 else:
3185 # we are merging a fifo or device node
3186 zing = "!!!"
3187 if mydmode is None:
3188 # destination doesn't exist
3189 if movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings) != None:
3190 zing = ">>>"
3191 else:
3192 sys.exit(1)
3193 if stat.S_ISFIFO(mymode):
3194 outfile.write("fif %s\n" % myrealdest)
3195 else:
3196 outfile.write("dev %s\n" % myrealdest)
3197 writemsg_stdout(zing + " " + mydest + "\n")
3199 def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
3200 mydbapi=None, prev_mtimes=None):
3202 If portage is reinstalling itself, create temporary
3203 copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
3204 to avoid relying on the new versions which may be
3205 incompatible. Register an atexit hook to clean up the
3206 temporary directories. Pre-load elog modules here since
3207 we won't be able to later if they get unmerged (happens
3208 when namespace changes).
3210 if self.vartree.dbapi._categories is not None:
3211 self.vartree.dbapi._categories = None
3212 if self.myroot == "/" and \
3213 "sys-apps" == self.cat and \
3214 "portage" == pkgsplit(self.pkg)[0]:
3215 settings = self.settings
3216 base_path_orig = os.path.dirname(settings["PORTAGE_BIN_PATH"])
3217 from tempfile import mkdtemp
3218 import shutil
3219 # Make the temp directory inside PORTAGE_TMPDIR since, unlike
3220 # /tmp, it can't be mounted with the "noexec" option.
3221 base_path_tmp = mkdtemp("", "._portage_reinstall_.",
3222 settings["PORTAGE_TMPDIR"])
3223 from portage.process import atexit_register
3224 atexit_register(shutil.rmtree, base_path_tmp)
3225 dir_perms = 0755
3226 for subdir in "bin", "pym":
3227 var_name = "PORTAGE_%s_PATH" % subdir.upper()
3228 var_orig = settings[var_name]
3229 var_new = os.path.join(base_path_tmp, subdir)
3230 settings[var_name] = var_new
3231 settings.backup_changes(var_name)
3232 shutil.copytree(var_orig, var_new, symlinks=True)
3233 os.chmod(var_new, dir_perms)
3234 os.chmod(base_path_tmp, dir_perms)
3235 # This serves so pre-load the modules.
3236 elog_process(self.mycpv, self.settings,
3237 phasefilter=filter_mergephases)
3239 return self._merge(mergeroot, inforoot,
3240 myroot, myebuild=myebuild, cleanup=cleanup,
3241 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3243 def _merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
3244 mydbapi=None, prev_mtimes=None):
3245 retval = -1
3246 self.lockdb()
3247 try:
3248 retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
3249 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3250 # undo registrations of preserved libraries, bug #210501
3251 if retval != os.EX_OK:
3252 self.vartree.dbapi.plib_registry.unregister(self.mycpv, self.settings["SLOT"], self.settings["COUNTER"])
3253 # Process ebuild logfiles
3254 elog_process(self.mycpv, self.settings, phasefilter=filter_mergephases)
3255 if retval == os.EX_OK and "noclean" not in self.settings.features:
3256 if myebuild is None:
3257 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
3258 doebuild(myebuild, "clean", myroot, self.settings,
3259 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
3260 finally:
3261 self.unlockdb()
3262 return retval
3264 def getstring(self,name):
3265 "returns contents of a file with whitespace converted to spaces"
3266 if not os.path.exists(self.dbdir+"/"+name):
3267 return ""
3268 myfile = open(self.dbdir+"/"+name,"r")
3269 mydata = myfile.read().split()
3270 myfile.close()
3271 return " ".join(mydata)
3273 def copyfile(self,fname):
3274 import shutil
3275 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
3277 def getfile(self,fname):
3278 if not os.path.exists(self.dbdir+"/"+fname):
3279 return ""
3280 myfile = open(self.dbdir+"/"+fname,"r")
3281 mydata = myfile.read()
3282 myfile.close()
3283 return mydata
3285 def setfile(self,fname,data):
3286 write_atomic(os.path.join(self.dbdir, fname), data)
3288 def getelements(self,ename):
3289 if not os.path.exists(self.dbdir+"/"+ename):
3290 return []
3291 myelement = open(self.dbdir+"/"+ename,"r")
3292 mylines = myelement.readlines()
3293 myreturn = []
3294 for x in mylines:
3295 for y in x[:-1].split():
3296 myreturn.append(y)
3297 myelement.close()
3298 return myreturn
3300 def setelements(self,mylist,ename):
3301 myelement = open(self.dbdir+"/"+ename,"w")
3302 for x in mylist:
3303 myelement.write(x+"\n")
3304 myelement.close()
3306 def isregular(self):
3307 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
3308 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
3310 def tar_contents(contents, root, tar, protect=None, onProgress=None):
3311 from portage.util import normalize_path
3312 import tarfile
3313 root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
3314 id_strings = {}
3315 maxval = len(contents)
3316 curval = 0
3317 if onProgress:
3318 onProgress(maxval, 0)
3319 paths = contents.keys()
3320 paths.sort()
3321 for path in paths:
3322 curval += 1
3323 try:
3324 lst = os.lstat(path)
3325 except OSError, e:
3326 if e.errno != errno.ENOENT:
3327 raise
3328 del e
3329 if onProgress:
3330 onProgress(maxval, curval)
3331 continue
3332 contents_type = contents[path][0]
3333 if path.startswith(root):
3334 arcname = path[len(root):]
3335 else:
3336 raise ValueError("invalid root argument: '%s'" % root)
3337 live_path = path
3338 if 'dir' == contents_type and \
3339 not stat.S_ISDIR(lst.st_mode) and \
3340 os.path.isdir(live_path):
3341 # Even though this was a directory in the original ${D}, it exists
3342 # as a symlink to a directory in the live filesystem. It must be
3343 # recorded as a real directory in the tar file to ensure that tar
3344 # can properly extract it's children.
3345 live_path = os.path.realpath(live_path)
3346 tarinfo = tar.gettarinfo(live_path, arcname)
3347 # store numbers instead of real names like tar's --numeric-owner
3348 tarinfo.uname = id_strings.setdefault(tarinfo.uid, str(tarinfo.uid))
3349 tarinfo.gname = id_strings.setdefault(tarinfo.gid, str(tarinfo.gid))
3351 if stat.S_ISREG(lst.st_mode):
3352 # break hardlinks due to bug #185305
3353 tarinfo.type = tarfile.REGTYPE
3354 if protect and protect(path):
3355 # Create an empty file as a place holder in order to avoid
3356 # potential collision-protect issues.
3357 tarinfo.size = 0
3358 tar.addfile(tarinfo)
3359 else:
3360 f = open(path)
3361 try:
3362 tar.addfile(tarinfo, f)
3363 finally:
3364 f.close()
3365 else:
3366 tar.addfile(tarinfo)
3367 if onProgress:
3368 onProgress(maxval, curval)