1 # Copyright 1998-2007 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
3 # $Id: vartree.py 10696 2008-06-17 18:13:56Z genone $
5 from portage
.checksum
import perform_md5
6 from portage
.const
import CACHE_PATH
, CONFIG_MEMORY_FILE
, PORTAGE_BIN_PATH
, \
8 from portage
.data
import portage_gid
, portage_uid
, secpass
9 from portage
.dbapi
import dbapi
10 from portage
.dep
import dep_getslot
, use_reduce
, paren_reduce
, isvalidatom
, \
11 isjustname
, dep_getkey
, match_from_list
12 from portage
.exception
import InvalidAtom
, InvalidData
, InvalidPackageName
, \
13 FileNotFound
, PermissionDenied
, UnsupportedAPIException
14 from portage
.locks
import lockdir
, unlockdir
15 from portage
.output
import bold
, red
, green
16 from portage
.update
import fixdbentries
17 from portage
.util
import apply_secpass_permissions
, ConfigProtect
, ensure_dirs
, \
18 writemsg
, writemsg_stdout
, write_atomic
, atomic_ofstream
, writedict
, \
19 grabfile
, grabdict
, normalize_path
, new_protect_filename
, getlibpaths
20 from portage
.versions
import pkgsplit
, catpkgsplit
, catsplit
, best
, pkgcmp
22 from portage
import listdir
, dep_expand
, flatten
, key_expand
, \
23 doebuild_environment
, doebuild
, env_update
, prepare_build_dirs
, \
24 abssymlink
, movefile
, _movefile
, bsd_chflags
, cpv_getkey
26 from portage
.elog
import elog_process
27 from portage
.elog
.messages
import ewarn
28 from portage
.elog
.filtering
import filter_mergephases
, filter_unmergephases
30 import os
, re
, sys
, stat
, errno
, commands
, copy
, time
, subprocess
31 from itertools
import izip
36 import pickle
as cPickle
38 class PreservedLibsRegistry(object):
39 """ This class handles the tracking of preserved library objects """
40 def __init__(self
, filename
, autocommit
=True):
41 """ @param filename: absolute path for saving the preserved libs records
42 @type filename: String
43 @param autocommit: determines if the file is written after every update
44 @type autocommit: Boolean
46 self
._filename
= filename
47 self
._autocommit
= autocommit
51 """ Reload the registry data from file """
53 self
._data
= cPickle
.load(open(self
._filename
, "r"))
55 if e
.errno
== errno
.ENOENT
:
57 elif e
.errno
== PermissionDenied
.errno
:
58 raise PermissionDenied(self
._filename
)
63 """ Store the registry data to file. No need to call this if autocommit
66 f
= atomic_ofstream(self
._filename
)
67 cPickle
.dump(self
._data
, f
)
70 def register(self
, cpv
, slot
, counter
, paths
):
71 """ Register new objects in the registry. If there is a record with the
72 same packagename (internally derived from cpv) and slot it is
73 overwritten with the new data.
74 @param cpv: package instance that owns the objects
75 @type cpv: CPV (as String)
76 @param slot: the value of SLOT of the given package instance
78 @param counter: vdb counter value for the package instace
79 @type counter: Integer
80 @param paths: absolute paths of objects that got preserved during an update
83 cp
= "/".join(catpkgsplit(cpv
)[:2])
85 if len(paths
) == 0 and self
._data
.has_key(cps
) \
86 and self
._data
[cps
][0] == cpv
and int(self
._data
[cps
][1]) == int(counter
):
89 self
._data
[cps
] = (cpv
, counter
, paths
)
93 def unregister(self
, cpv
, slot
, counter
):
94 """ Remove a previous registration of preserved objects for the given package.
95 @param cpv: package instance whose records should be removed
96 @type cpv: CPV (as String)
97 @param slot: the value of SLOT of the given package instance
100 self
.register(cpv
, slot
, counter
, [])
102 def pruneNonExisting(self
):
103 """ Remove all records for objects that no longer exist on the filesystem. """
104 for cps
in self
._data
.keys():
105 cpv
, counter
, paths
= self
._data
[cps
]
106 paths
= [f
for f
in paths
if os
.path
.exists(f
)]
108 self
._data
[cps
] = (cpv
, counter
, paths
)
114 def hasEntries(self
):
115 """ Check if this registry contains any records. """
116 return len(self
._data
) > 0
118 def getPreservedLibs(self
):
119 """ Return a mapping of packages->preserved objects.
120 @returns mapping of package instances to preserved objects
121 @rtype Dict cpv->list-of-paths
124 for cps
in self
._data
:
125 rValue
[self
._data
[cps
][0]] = self
._data
[cps
][2]
128 class LinkageMap(object):
129 def __init__(self
, vardbapi
):
130 self
._dbapi
= vardbapi
132 self
._obj
_properties
= {}
133 self
._defpath
= getlibpaths()
135 def rebuild(self
, include_file
=None):
139 for cpv
in self
._dbapi
.cpv_all():
140 lines
+= self
._dbapi
.aux_get(cpv
, ["NEEDED.ELF.2"])[0].split('\n')
141 # Cache NEEDED.* files avoid doing excessive IO for every rebuild.
142 self
._dbapi
.flush_cache()
145 lines
+= grabfile(include_file
)
147 # have to call scanelf for preserved libs here as they aren't
148 # registered in NEEDED.ELF.2 files
149 if self
._dbapi
.plib_registry
and self
._dbapi
.plib_registry
.getPreservedLibs():
150 args
= ["/usr/bin/scanelf", "-qF", "%a;%F;%S;%r;%n"]
151 for items
in self
._dbapi
.plib_registry
.getPreservedLibs().values():
152 args
+= [x
.lstrip(".") for x
in items
]
153 proc
= subprocess
.Popen(args
, stdout
=subprocess
.PIPE
)
154 output
= [l
[3:] for l
in proc
.communicate()[0].split("\n")]
160 fields
= l
.strip("\n").split(";")
162 print "Error", fields
163 # insufficient field length
166 obj
= os
.path
.realpath(fields
[1])
168 # When fields[3]=="", this prevents the empty string from being
171 path
= fields
[3].replace("${ORIGIN}", os
.path
.dirname(obj
)).replace("$ORIGIN", os
.path
.dirname(obj
)).split(":")
174 # When fields[4]=="", this prevents the empty string from being
175 # inserted as a key into libs.
177 needed
= fields
[4].split(",")
181 libs
.setdefault(soname
, {arch
: {"providers": [], "consumers": []}})
182 libs
[soname
].setdefault(arch
, {"providers": [], "consumers": []})
183 libs
[soname
][arch
]["providers"].append(obj
)
185 libs
.setdefault(x
, {arch
: {"providers": [], "consumers": []}})
186 libs
[x
].setdefault(arch
, {"providers": [], "consumers": []})
187 libs
[x
][arch
]["consumers"].append(obj
)
188 obj_properties
[obj
] = (arch
, needed
, path
, soname
)
191 self
._obj
_properties
= obj_properties
193 def isMasterLink(self
, obj
):
194 basename
= os
.path
.basename(obj
)
195 if obj
not in self
._obj
_properties
:
196 obj
= os
.path
.realpath(obj
)
197 if obj
not in self
._obj
_properties
:
198 raise KeyError("%s not in object list" % obj
)
199 soname
= self
._obj
_properties
[obj
][3]
200 return (len(basename
) < len(soname
))
202 def listBrokenBinaries(self
):
204 Find binaries and their needed sonames, which have no providers.
206 @rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
207 @return: The return value is an object -> set-of-sonames mapping, where
208 object is a broken binary and the set consists of sonames needed by
209 object that have no corresponding libraries to fulfill the dependency.
212 class SonameCache(object):
215 Caches sonames and realpaths associated with paths.
217 The purpose of this class is to prevent multiple calls of
218 os.path.realpath and os.path.isfile on the same paths.
222 def __init__(cache_self
):
223 cache_self
.cache
= {}
225 def get(cache_self
, path
):
227 Caches and returns the soname and realpath for a path.
229 @param path: absolute path (can be symlink)
230 @type path: string (example: '/usr/lib/libfoo.so')
231 @rtype: 3-tuple with types (string or None, string, boolean)
232 @return: 3-tuple with the following components:
233 1. soname as a string or None if it does not exist,
234 2. realpath as a string,
235 3. the result of os.path.isfile(realpath)
236 (example: ('libfoo.so.1', '/usr/lib/libfoo.so.1.5.1', True))
239 if path
in cache_self
.cache
:
240 return cache_self
.cache
[path
]
242 realpath
= os
.path
.realpath(path
)
243 # Check that the library exists on the filesystem.
244 if os
.path
.isfile(realpath
):
245 # Get the soname from LinkageMap._obj_properties if it
246 # exists. Otherwise, None.
247 soname
= self
._obj
_properties
.get(realpath
, (None,)*3)[3]
248 # Both path and realpath are cached and the result is returned.
249 cache_self
.cache
.setdefault(realpath
, (soname
, realpath
, True))
250 return cache_self
.cache
.setdefault(path
, (soname
, realpath
, True))
252 # realpath is not cached here, because the large majority of
253 # cases where realpath is not a file, path is the same as
254 # realpath. Thus storing twice slows down the cache
256 return cache_self
.cache
.setdefault(path
, (None, realpath
, False))
260 cache
= SonameCache()
261 providers
= self
.listProviders()
262 # providers = self.listProvidersForReachableBinaries(self.getBinaries())
264 # Iterate over all binaries and their providers.
265 for obj
, sonames
in providers
.items():
266 # Iterate over each needed soname and the set of library paths that
267 # fulfill the soname to determine if the dependency is broken.
268 for soname
, libraries
in sonames
.items():
269 # validLibraries is used to store libraries, which satisfy soname,
270 # so if no valid libraries are found, the soname is not satisfied
271 # for obj. Thus obj must be emerged.
272 validLibraries
= set()
273 # It could be the case that the library to satisfy the soname is
274 # not in the obj's runpath, but a symlink to the library is (eg
275 # libnvidia-tls.so.1 in nvidia-drivers). Also, since LinkageMap
276 # does not catalog symlinks, broken or missing symlinks may go
277 # unnoticed. As a result of these cases, check that a file with
278 # the same name as the soname exists in the binary's runpath.
279 path
= self
._obj
_properties
[obj
][2] + self
._defpath
281 cachedSoname
, cachedRealpath
, cachedExists
= \
282 cache
.get(os
.path
.join(dir, soname
))
283 # Check that the this library provides the needed soname. Doing
284 # this, however, will cause consumers of libraries missing
285 # sonames to be unnecessarily emerged. (eg libmix.so)
286 if cachedSoname
== soname
:
287 validLibraries
.add(cachedRealpath
)
288 if debug
and cachedRealpath
not in libraries
:
289 print "Unregistered symlink:", \
290 os
.path
.join(dir, soname
), cachedRealpath
291 # A valid library has been found, so there is no need to
294 if debug
and cachedRealpath
in self
._obj
_properties
:
295 print "Broken symlink or missing/bad soname:", \
296 os
.path
.join(dir, soname
), '->', cachedRealpath
, \
298 # This conditional checks if there are no libraries to satisfy the
299 # soname (empty set).
300 if not validLibraries
:
301 rValue
.setdefault(obj
, set()).add(soname
)
302 # If no valid libraries have been found by this point, then
303 # there are no files named with the soname within obj's runpath,
304 # but if there are libraries (from the providers mapping), it is
305 # likely that symlinks or the actual libraries are missing.
306 # Thus possible symlinks and missing libraries are added to the
307 # rValue to emerge corrupt library packages.
308 for lib
in libraries
:
309 cachedSoname
, cachedRealpath
, cachedExists
= cache
.get(lib
)
311 # The library's package needs to be emerged to repair the
313 rValue
.setdefault(lib
, set()).add(soname
)
315 rValue
.setdefault(os
.path
.join(os
.path
.dirname(lib
), \
316 soname
), set()).add(soname
)
319 print "Missing lib:", lib
321 print "Possibly missing symlink:", \
322 os
.path
.join(os
.path
.dirname(lib
), soname
)
326 def listProviders(self
):
328 Find the providers for all binaries.
330 @rtype: dict (example:
331 {'/usr/bin/foo': {'libbar.so': set(['/lib/libbar.so.1.5'])}})
332 @return: The return value is an object -> providers mapping, where
333 providers is a mapping of soname -> set-of-library-paths returned
334 from the findProviders method.
340 # Iterate over all binaries within LinkageMap.
341 for obj
in self
._obj
_properties
.keys():
342 rValue
.setdefault(obj
, self
.findProviders(obj
))
345 def getBinaries(self
):
347 Get binaries from PATH variables and shared library directories.
349 @rtype: set of strings
350 @return: the set of binaries found in PATH variables and shared library
356 searchDirectories
= set()
359 # Gather set of directories from PATH variables and shared library
361 pathvar
= self
._dbapi
.settings
['PATH'].split(':')
362 rootpathvar
= self
._dbapi
.settings
['ROOTPATH'].split(':')
364 searchDirectories
.update(set(pathvar
))
365 if rootpathvar
!= ['']:
366 searchDirectories
.update(set(rootpathvar
))
367 searchDirectories
.update(set(['/bin', '/sbin', '/usr/bin', '/usr/sbin']))
368 for file in os
.listdir('/usr'):
369 if file.startswith('lib'):
370 searchDirectories
.add(os
.path
.join('/usr', file))
371 searchDirectories
.update(set(self
._defpath
))
372 print searchDirectories
374 for obj
in self
._obj
_properties
.keys():
375 if os
.path
.dirname(obj
) in searchDirectories
:
380 def listProvidersForReachableBinaries(self
, binarySet
, rValue
={}):
382 Recursively find reachable binaries and their providers.
384 @param binarySet: set of paths to binaries, which must be in
386 @type binarySet: set of strings
387 @param rValue: same as return
388 @type rValue: same as rtype
389 @rtype: dict (example:
390 {'/usr/bin/foo': {'libbar.so': set(['/lib/libbar.so.1.5'])}})
391 @return: The return value is an object -> providers mapping, where
392 providers is a mapping of soname -> set-of-library-paths returned
393 from the findProviders method.
396 for binary
in binarySet
:
399 rValue
.setdefault(binary
, self
.findProviders(binary
))
401 for libs
in rValue
[binary
].values():
402 libraries
.update(set(libs
))
403 rValue
.update(self
.listProvidersForReachableBinaries(libraries
, rValue
=rValue
))
407 def findDeepProviders(self
, obj
, rValue
={}):
408 """Recursively finds all direct and indirect providers.
410 @param obj: pathname of obj in _obj_properties
412 @param rValue: same as return
413 @type rValue: same as rtype
414 @rtype: dict (example:
415 {'libbar.so': set(['/lib/libbar.so.1.5'])})
416 @return: The return value is an soname -> set-of-library-paths mapping.
421 if obj
in rValue
or os
.path
.realpath(obj
) in rValue
:
423 if obj
not in self
._obj
_properties
:
424 obj
= os
.path
.realpath(obj
)
425 if obj
not in self
._obj
_properties
:
426 raise KeyError("%s not in object list" % obj
)
427 arch
, needed
, path
, soname
= self
._obj
_properties
[obj
]
428 path
= [os
.path
.realpath(x
) for x
in path
+ self
._defpath
]
431 rValue
.setdefault(x
, set())
432 if x
not in self
._libs
or arch
not in self
._libs
[x
]:
434 for y
in self
._libs
[x
][arch
]["providers"]:
435 if x
[0] == os
.sep
and os
.path
.realpath(x
) == os
.path
.realpath(y
):
437 self
.findDeepProviders(obj
=y
, rValue
=rValue
)
438 elif os
.path
.realpath(os
.path
.dirname(y
)) in path
:
440 self
.findDeepProviders(obj
=y
, rValue
=rValue
)
443 def listLibraryObjects(self
):
447 for soname
in self
._libs
:
448 for arch
in self
._libs
[soname
]:
449 rValue
.extend(self
._libs
[soname
][arch
]["providers"])
452 def findProviders(self
, obj
):
456 if obj
not in self
._obj
_properties
:
457 obj
= os
.path
.realpath(obj
)
458 if obj
not in self
._obj
_properties
:
459 raise KeyError("%s not in object list" % obj
)
460 arch
, needed
, path
, soname
= self
._obj
_properties
[obj
]
461 path
= [os
.path
.realpath(x
) for x
in path
+ self
._defpath
]
464 if x
not in self
._libs
or arch
not in self
._libs
[x
]:
466 for y
in self
._libs
[x
][arch
]["providers"]:
467 if x
[0] == os
.sep
and os
.path
.realpath(x
) == os
.path
.realpath(y
):
469 elif os
.path
.realpath(os
.path
.dirname(y
)) in path
:
473 def findConsumers(self
, obj
):
476 if obj
not in self
._obj
_properties
:
477 obj
= os
.path
.realpath(obj
)
478 if obj
not in self
._obj
_properties
:
479 raise KeyError("%s not in object list" % obj
)
481 for soname
in self
._libs
:
482 for arch
in self
._libs
[soname
]:
483 if obj
in self
._libs
[soname
][arch
]["providers"]:
484 for x
in self
._libs
[soname
][arch
]["consumers"]:
485 path
= self
._obj
_properties
[x
][2]
486 path
= [os
.path
.realpath(y
) for y
in path
+self
._defpath
]
487 if soname
[0] == os
.sep
and os
.path
.realpath(soname
) == os
.path
.realpath(obj
):
489 elif os
.path
.realpath(os
.path
.dirname(obj
)) in path
:
493 class vardbapi(dbapi
):
495 _excluded_dirs
= ["CVS", "lost+found"]
496 _excluded_dirs
= [re
.escape(x
) for x
in _excluded_dirs
]
497 _excluded_dirs
= re
.compile(r
'^(\..*|-MERGING-.*|' + \
498 "|".join(_excluded_dirs
) + r
')$')
500 _aux_cache_version
= "1"
501 _owners_cache_version
= "1"
503 # Number of uncached packages to trigger cache update, since
504 # it's wasteful to update it for every vdb change.
505 _aux_cache_threshold
= 5
507 _aux_cache_keys_re
= re
.compile(r
'^NEEDED\..*$')
508 _aux_multi_line_re
= re
.compile(r
'^(CONTENTS|NEEDED\..*)$')
510 def __init__(self
, root
, categories
=None, settings
=None, vartree
=None):
512 The categories parameter is unused since the dbapi class
513 now has a categories property that is generated from the
518 #cache for category directory mtimes
521 #cache for dependency checks
524 #cache for cp_list results
529 from portage
import settings
530 self
.settings
= settings
532 from portage
import db
533 vartree
= db
[root
]["vartree"]
534 self
.vartree
= vartree
535 self
._aux
_cache
_keys
= set(
536 ["CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
537 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
538 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
539 "repository", "RESTRICT" , "SLOT", "USE"])
540 self
._aux
_cache
_obj
= None
541 self
._aux
_cache
_filename
= os
.path
.join(self
.root
,
542 CACHE_PATH
.lstrip(os
.path
.sep
), "vdb_metadata.pickle")
543 self
._counter
_path
= os
.path
.join(root
,
544 CACHE_PATH
.lstrip(os
.path
.sep
), "counter")
547 self
.plib_registry
= PreservedLibsRegistry(
548 os
.path
.join(self
.root
, PRIVATE_PATH
, "preserved_libs_registry"))
549 except PermissionDenied
:
550 # apparently this user isn't allowed to access PRIVATE_PATH
551 self
.plib_registry
= None
553 self
.linkmap
= LinkageMap(self
)
554 self
._owners
= self
._owners
_db
(self
)
556 def getpath(self
, mykey
, filename
=None):
557 rValue
= os
.path
.join(self
.root
, VDB_PATH
, mykey
)
559 rValue
= os
.path
.join(rValue
, filename
)
562 def cpv_exists(self
, mykey
):
563 "Tells us whether an actual ebuild exists on disk (no masking)"
564 return os
.path
.exists(self
.getpath(mykey
))
566 def cpv_counter(self
, mycpv
):
567 "This method will grab the COUNTER. Returns a counter value."
569 return long(self
.aux_get(mycpv
, ["COUNTER"])[0])
570 except (KeyError, ValueError):
572 cdir
= self
.getpath(mycpv
)
573 cpath
= self
.getpath(mycpv
, filename
="COUNTER")
575 # We write our new counter value to a new file that gets moved into
576 # place to avoid filesystem corruption on XFS (unexpected reboot.)
578 if os
.path
.exists(cpath
):
579 cfile
= open(cpath
, "r")
581 counter
= long(cfile
.readline())
583 print "portage: COUNTER for", mycpv
, "was corrupted; resetting to value of 0"
587 elif os
.path
.exists(cdir
):
588 mys
= pkgsplit(mycpv
)
589 myl
= self
.match(mys
[0], use_cache
=0)
593 # Only one package... Counter doesn't matter.
594 write_atomic(cpath
, "1")
596 except SystemExit, e
:
599 writemsg("!!! COUNTER file is missing for "+str(mycpv
)+" in /var/db.\n",
601 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH
,
603 writemsg("!!! unmerge this exact version.\n", noiselevel
=-1)
604 writemsg("!!! %s\n" % e
, noiselevel
=-1)
607 writemsg("!!! COUNTER file is missing for "+str(mycpv
)+" in /var/db.\n",
609 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH
,
611 writemsg("!!! remerge the package.\n", noiselevel
=-1)
616 # update new global counter file
617 write_atomic(cpath
, str(counter
))
620 def cpv_inject(self
, mycpv
):
621 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
622 os
.makedirs(self
.getpath(mycpv
))
623 counter
= self
.counter_tick(self
.root
, mycpv
=mycpv
)
624 # write local package counter so that emerge clean does the right thing
625 write_atomic(self
.getpath(mycpv
, filename
="COUNTER"), str(counter
))
627 def isInjected(self
, mycpv
):
628 if self
.cpv_exists(mycpv
):
629 if os
.path
.exists(self
.getpath(mycpv
, filename
="INJECTED")):
631 if not os
.path
.exists(self
.getpath(mycpv
, filename
="CONTENTS")):
635 def move_ent(self
, mylist
):
640 for cp
in [origcp
, newcp
]:
641 if not (isvalidatom(cp
) and isjustname(cp
)):
642 raise InvalidPackageName(cp
)
643 origmatches
= self
.match(origcp
, use_cache
=0)
647 for mycpv
in origmatches
:
648 mycpsplit
= catpkgsplit(mycpv
)
649 mynewcpv
= newcp
+ "-" + mycpsplit
[2]
650 mynewcat
= newcp
.split("/")[0]
651 if mycpsplit
[3] != "r0":
652 mynewcpv
+= "-" + mycpsplit
[3]
653 mycpsplit_new
= catpkgsplit(mynewcpv
)
654 origpath
= self
.getpath(mycpv
)
655 if not os
.path
.exists(origpath
):
658 if not os
.path
.exists(self
.getpath(mynewcat
)):
659 #create the directory
660 os
.makedirs(self
.getpath(mynewcat
))
661 newpath
= self
.getpath(mynewcpv
)
662 if os
.path
.exists(newpath
):
663 #dest already exists; keep this puppy where it is.
665 _movefile(origpath
, newpath
, mysettings
=self
.settings
)
667 # We need to rename the ebuild now.
668 old_pf
= catsplit(mycpv
)[1]
669 new_pf
= catsplit(mynewcpv
)[1]
672 os
.rename(os
.path
.join(newpath
, old_pf
+ ".ebuild"),
673 os
.path
.join(newpath
, new_pf
+ ".ebuild"))
674 except EnvironmentError, e
:
675 if e
.errno
!= errno
.ENOENT
:
678 write_atomic(os
.path
.join(newpath
, "PF"), new_pf
+"\n")
679 write_atomic(os
.path
.join(newpath
, "CATEGORY"), mynewcat
+"\n")
680 fixdbentries([mylist
], newpath
)
683 def cp_list(self
, mycp
, use_cache
=1):
684 mysplit
=catsplit(mycp
)
685 if mysplit
[0] == '*':
686 mysplit
[0] = mysplit
[0][1:]
688 mystat
= os
.stat(self
.getpath(mysplit
[0]))[stat
.ST_MTIME
]
691 if use_cache
and self
.cpcache
.has_key(mycp
):
692 cpc
= self
.cpcache
[mycp
]
695 cat_dir
= self
.getpath(mysplit
[0])
697 dir_list
= os
.listdir(cat_dir
)
698 except EnvironmentError, e
:
699 from portage
.exception
import PermissionDenied
700 if e
.errno
== PermissionDenied
.errno
:
701 raise PermissionDenied(cat_dir
)
707 if self
._excluded
_dirs
.match(x
) is not None:
711 self
.invalidentry(os
.path
.join(self
.getpath(mysplit
[0]), x
))
714 if ps
[0] == mysplit
[1]:
715 returnme
.append(mysplit
[0]+"/"+x
)
716 self
._cpv
_sort
_ascending
(returnme
)
718 self
.cpcache
[mycp
] = [mystat
, returnme
[:]]
719 elif self
.cpcache
.has_key(mycp
):
720 del self
.cpcache
[mycp
]
723 def cpv_all(self
, use_cache
=1):
725 Set use_cache=0 to bypass the portage.cachedir() cache in cases
726 when the accuracy of mtime staleness checks should not be trusted
727 (generally this is only necessary in critical sections that
728 involve merge or unmerge of packages).
731 basepath
= os
.path
.join(self
.root
, VDB_PATH
) + os
.path
.sep
734 from portage
import listdir
736 def listdir(p
, **kwargs
):
738 return [x
for x
in os
.listdir(p
) \
739 if os
.path
.isdir(os
.path
.join(p
, x
))]
740 except EnvironmentError, e
:
741 if e
.errno
== PermissionDenied
.errno
:
742 raise PermissionDenied(p
)
746 for x
in listdir(basepath
, EmptyOnError
=1, ignorecvs
=1, dirsonly
=1):
747 if self
._excluded
_dirs
.match(x
) is not None:
749 if not self
._category
_re
.match(x
):
751 for y
in listdir(basepath
+ x
, EmptyOnError
=1, dirsonly
=1):
752 if self
._excluded
_dirs
.match(y
) is not None:
754 subpath
= x
+ "/" + y
755 # -MERGING- should never be a cpv, nor should files.
757 if catpkgsplit(subpath
) is None:
758 self
.invalidentry(os
.path
.join(self
.root
, subpath
))
760 except portage
.exception
.InvalidData
:
761 self
.invalidentry(os
.path
.join(self
.root
, subpath
))
763 returnme
.append(subpath
)
766 def cp_all(self
, use_cache
=1):
767 mylist
= self
.cpv_all(use_cache
=use_cache
)
773 mysplit
= catpkgsplit(y
)
774 except portage
.exception
.InvalidData
:
775 self
.invalidentry(self
.getpath(y
))
778 self
.invalidentry(self
.getpath(y
))
780 d
[mysplit
[0]+"/"+mysplit
[1]] = None
783 def checkblockers(self
, origdep
):
786 def _add(self
, pkg_dblink
):
787 self
._clear
_cache
(pkg_dblink
)
789 def _remove(self
, pkg_dblink
):
790 self
._clear
_cache
(pkg_dblink
)
792 def _clear_cache(self
, pkg_dblink
):
793 # Due to 1 second mtime granularity in <python-2.5, mtime checks
794 # are not always sufficient to invalidate vardbapi caches. Therefore,
795 # the caches need to be actively invalidated here.
796 self
.mtdircache
.pop(pkg_dblink
.cat
, None)
797 self
.matchcache
.pop(pkg_dblink
.cat
, None)
798 self
.cpcache
.pop(pkg_dblink
.mysplit
[0], None)
799 from portage
import dircache
800 dircache
.pop(pkg_dblink
.dbcatdir
, None)
802 def match(self
, origdep
, use_cache
=1):
803 "caching match function"
805 origdep
, mydb
=self
, use_cache
=use_cache
, settings
=self
.settings
)
806 mykey
= dep_getkey(mydep
)
807 mycat
= catsplit(mykey
)[0]
809 if self
.matchcache
.has_key(mycat
):
810 del self
.mtdircache
[mycat
]
811 del self
.matchcache
[mycat
]
812 return list(self
._iter
_match
(mydep
,
813 self
.cp_list(mydep
.cp
, use_cache
=use_cache
)))
815 curmtime
= os
.stat(self
.root
+VDB_PATH
+"/"+mycat
).st_mtime
816 except (IOError, OSError):
819 if not self
.matchcache
.has_key(mycat
) or not self
.mtdircache
[mycat
]==curmtime
:
821 self
.mtdircache
[mycat
] = curmtime
822 self
.matchcache
[mycat
] = {}
823 if not self
.matchcache
[mycat
].has_key(mydep
):
824 mymatch
= list(self
._iter
_match
(mydep
,
825 self
.cp_list(mydep
.cp
, use_cache
=use_cache
)))
826 self
.matchcache
[mycat
][mydep
] = mymatch
827 return self
.matchcache
[mycat
][mydep
][:]
829 def findname(self
, mycpv
):
830 return self
.getpath(str(mycpv
), filename
=catsplit(mycpv
)[1]+".ebuild")
832 def flush_cache(self
):
833 """If the current user has permission and the internal aux_get cache has
834 been updated, save it to disk and mark it unmodified. This is called
835 by emerge after it has loaded the full vdb for use in dependency
836 calculations. Currently, the cache is only written if the user has
837 superuser privileges (since that's required to obtain a lock), but all
838 users have read access and benefit from faster metadata lookups (as
839 long as at least part of the cache is still valid)."""
840 if self
._aux
_cache
is not None and \
841 len(self
._aux
_cache
["modified"]) >= self
._aux
_cache
_threshold
and \
843 self
._owners
.populate() # index any unindexed contents
844 valid_nodes
= set(self
.cpv_all())
845 for cpv
in self
._aux
_cache
["packages"].keys():
846 if cpv
not in valid_nodes
:
847 del self
._aux
_cache
["packages"][cpv
]
848 del self
._aux
_cache
["modified"]
850 f
= atomic_ofstream(self
._aux
_cache
_filename
)
851 cPickle
.dump(self
._aux
_cache
, f
, -1)
853 apply_secpass_permissions(
854 self
._aux
_cache
_filename
, gid
=portage_gid
, mode
=0644)
855 except (IOError, OSError), e
:
857 self
._aux
_cache
["modified"] = set()
860 def _aux_cache(self
):
861 if self
._aux
_cache
_obj
is None:
862 self
._aux
_cache
_init
()
863 return self
._aux
_cache
_obj
865 def _aux_cache_init(self
):
868 f
= open(self
._aux
_cache
_filename
)
869 mypickle
= cPickle
.Unpickler(f
)
870 mypickle
.find_global
= None
871 aux_cache
= mypickle
.load()
874 except (IOError, OSError, EOFError, cPickle
.UnpicklingError
), e
:
875 if isinstance(e
, cPickle
.UnpicklingError
):
876 writemsg("!!! Error loading '%s': %s\n" % \
877 (self
._aux
_cache
_filename
, str(e
)), noiselevel
=-1)
880 if not aux_cache
or \
881 not isinstance(aux_cache
, dict) or \
882 aux_cache
.get("version") != self
._aux
_cache
_version
or \
883 not aux_cache
.get("packages"):
884 aux_cache
= {"version": self
._aux
_cache
_version
}
885 aux_cache
["packages"] = {}
887 owners
= aux_cache
.get("owners")
888 if owners
is not None:
889 if not isinstance(owners
, dict):
891 elif "version" not in owners
:
893 elif owners
["version"] != self
._owners
_cache
_version
:
895 elif "base_names" not in owners
:
897 elif not isinstance(owners
["base_names"], dict):
903 "version" : self
._owners
_cache
_version
905 aux_cache
["owners"] = owners
907 aux_cache
["modified"] = set()
908 self
._aux
_cache
_obj
= aux_cache
910 def aux_get(self
, mycpv
, wants
):
911 """This automatically caches selected keys that are frequently needed
912 by emerge for dependency calculations. The cached metadata is
913 considered valid if the mtime of the package directory has not changed
914 since the data was cached. The cache is stored in a pickled dict
915 object with the following format:
917 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
919 If an error occurs while loading the cache pickle or the version is
920 unrecognized, the cache will simple be recreated from scratch (it is
921 completely disposable).
923 cache_these_wants
= self
._aux
_cache
_keys
.intersection(wants
)
925 if self
._aux
_cache
_keys
_re
.match(x
) is not None:
926 cache_these_wants
.add(x
)
928 if not cache_these_wants
:
929 return self
._aux
_get
(mycpv
, wants
)
931 cache_these
= set(self
._aux
_cache
_keys
)
932 cache_these
.update(cache_these_wants
)
934 mydir
= self
.getpath(mycpv
)
937 mydir_stat
= os
.stat(mydir
)
939 if e
.errno
!= errno
.ENOENT
:
941 raise KeyError(mycpv
)
942 mydir_mtime
= long(mydir_stat
.st_mtime
)
943 pkg_data
= self
._aux
_cache
["packages"].get(mycpv
)
944 pull_me
= cache_these
.union(wants
)
945 mydata
= {"_mtime_" : mydir_mtime
}
947 cache_incomplete
= False
950 if pkg_data
is not None:
951 if not isinstance(pkg_data
, tuple) or len(pkg_data
) != 2:
954 cache_mtime
, metadata
= pkg_data
955 if not isinstance(cache_mtime
, (long, int)) or \
956 not isinstance(metadata
, dict):
960 cache_mtime
, metadata
= pkg_data
961 cache_valid
= cache_mtime
== mydir_mtime
963 mydata
.update(metadata
)
964 pull_me
.difference_update(metadata
)
967 # pull any needed data and cache it
968 aux_keys
= list(pull_me
)
969 for k
, v
in izip(aux_keys
,
970 self
._aux
_get
(mycpv
, aux_keys
, st
=mydir_stat
)):
972 if not cache_valid
or cache_these
.difference(metadata
):
974 if cache_valid
and metadata
:
975 cache_data
.update(metadata
)
976 for aux_key
in cache_these
:
977 cache_data
[aux_key
] = mydata
[aux_key
]
978 self
._aux
_cache
["packages"][mycpv
] = (mydir_mtime
, cache_data
)
979 self
._aux
_cache
["modified"].add(mycpv
)
980 return [mydata
[x
] for x
in wants
]
982 def _aux_get(self
, mycpv
, wants
, st
=None):
983 mydir
= self
.getpath(mycpv
)
988 if e
.errno
== errno
.ENOENT
:
989 raise KeyError(mycpv
)
990 elif e
.errno
== PermissionDenied
.errno
:
991 raise PermissionDenied(mydir
)
994 if not stat
.S_ISDIR(st
.st_mode
):
995 raise KeyError(mycpv
)
999 results
.append(st
.st_mtime
)
1002 myf
= open(os
.path
.join(mydir
, x
), "r")
1007 # Preserve \n for metadata that is known to
1008 # contain multiple lines.
1009 if self
._aux
_multi
_line
_re
.match(x
) is None:
1010 myd
= " ".join(myd
.split())
1013 if x
== "EAPI" and not myd
:
1019 def aux_update(self
, cpv
, values
):
1020 cat
, pkg
= catsplit(cpv
)
1021 mylink
= dblink(cat
, pkg
, self
.root
, self
.settings
,
1022 treetype
="vartree", vartree
=self
.vartree
)
1023 if not mylink
.exists():
1025 for k
, v
in values
.iteritems():
1027 mylink
.setfile(k
, v
)
1030 os
.unlink(os
.path
.join(self
.getpath(cpv
), k
))
1031 except EnvironmentError:
1034 def counter_tick(self
, myroot
, mycpv
=None):
1035 return self
.counter_tick_core(myroot
, incrementing
=1, mycpv
=mycpv
)
1037 def get_counter_tick_core(self
, myroot
, mycpv
=None):
1039 Use this method to retrieve the counter instead
1040 of having to trust the value of a global counter
1041 file that can lead to invalid COUNTER
1042 generation. When cache is valid, the package COUNTER
1043 files are not read and we rely on the timestamp of
1044 the package directory to validate cache. The stat
1045 calls should only take a short time, so performance
1046 is sufficient without having to rely on a potentially
1047 corrupt global counter file.
1049 The global counter file located at
1050 $CACHE_PATH/counter serves to record the
1051 counter of the last installed package and
1052 it also corresponds to the total number of
1053 installation actions that have occurred in
1054 the history of this package database.
1056 cp_list
= self
.cp_list
1058 for cp
in self
.cp_all():
1059 for cpv
in cp_list(cp
):
1061 counter
= int(self
.aux_get(cpv
, ["COUNTER"])[0])
1062 except (KeyError, OverflowError, ValueError):
1064 if counter
> max_counter
:
1065 max_counter
= counter
1069 cfile
= open(self
._counter
_path
, "r")
1070 except EnvironmentError, e
:
1071 writemsg("!!! Unable to read COUNTER file: '%s'\n" % \
1072 self
._counter
_path
, noiselevel
=-1)
1073 writemsg("!!! %s\n" % str(e
), noiselevel
=-1)
1078 counter
= long(cfile
.readline().strip())
1081 except (OverflowError, ValueError), e
:
1082 writemsg("!!! COUNTER file is corrupt: '%s'\n" % \
1083 self
._counter
_path
, noiselevel
=-1)
1084 writemsg("!!! %s\n" % str(e
), noiselevel
=-1)
1087 # We must ensure that we return a counter
1088 # value that is at least as large as the
1089 # highest one from the installed packages,
1090 # since having a corrupt value that is too low
1091 # can trigger incorrect AUTOCLEAN behavior due
1092 # to newly installed packages having lower
1093 # COUNTERs than the previous version in the
1095 if counter
> max_counter
:
1096 max_counter
= counter
1099 writemsg("!!! Initializing COUNTER to " + \
1100 "value of %d\n" % max_counter
, noiselevel
=-1)
1102 return max_counter
+ 1
1104 def counter_tick_core(self
, myroot
, incrementing
=1, mycpv
=None):
1105 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
1106 counter
= self
.get_counter_tick_core(myroot
, mycpv
=mycpv
) - 1
1110 # update new global counter file
1111 write_atomic(self
._counter
_path
, str(counter
))
1114 def _dblink(self
, cpv
):
1115 category
, pf
= catsplit(cpv
)
1116 return dblink(category
, pf
, self
.root
,
1117 self
.settings
, vartree
=self
.vartree
)
1119 class _owners_cache(object):
1121 This class maintains an hash table that serves to index package
1122 contents by mapping the basename of file to a list of possible
1123 packages that own it. This is used to optimize owner lookups
1124 by narrowing the search down to a smaller number of packages.
1127 from hashlib
import md5
as _new_hash
1129 from md5
import new
as _new_hash
1132 _hex_chars
= _hash_bits
/ 4
1134 def __init__(self
, vardb
):
1138 root_len
= len(self
._vardb
.root
)
1139 contents
= self
._vardb
._dblink
(cpv
).getcontents()
1140 pkg_hash
= self
._hash
_pkg
(cpv
)
1142 # Empty path is a code used to represent empty contents.
1143 self
._add
_path
("", pkg_hash
)
1145 self
._add
_path
(x
[root_len
:], pkg_hash
)
1146 self
._vardb
._aux
_cache
["modified"].add(cpv
)
1148 def _add_path(self
, path
, pkg_hash
):
1150 Empty path is a code that represents empty contents.
1153 name
= os
.path
.basename(path
.rstrip(os
.path
.sep
))
1158 name_hash
= self
._hash
_str
(name
)
1159 base_names
= self
._vardb
._aux
_cache
["owners"]["base_names"]
1160 pkgs
= base_names
.get(name_hash
)
1163 base_names
[name_hash
] = pkgs
1164 pkgs
[pkg_hash
] = None
1166 def _hash_str(self
, s
):
1167 h
= self
._new
_hash
()
1170 h
= h
[-self
._hex
_chars
:]
1174 def _hash_pkg(self
, cpv
):
1175 counter
, mtime
= self
._vardb
.aux_get(
1176 cpv
, ["COUNTER", "_mtime_"])
1178 counter
= int(counter
)
1181 return (cpv
, counter
, mtime
)
1183 class _owners_db(object):
1185 def __init__(self
, vardb
):
1191 def _populate(self
):
1192 owners_cache
= vardbapi
._owners
_cache
(self
._vardb
)
1193 cached_hashes
= set()
1194 base_names
= self
._vardb
._aux
_cache
["owners"]["base_names"]
1196 # Take inventory of all cached package hashes.
1197 for name
, hash_values
in base_names
.items():
1198 if not isinstance(hash_values
, dict):
1199 del base_names
[name
]
1201 cached_hashes
.update(hash_values
)
1203 # Create sets of valid package hashes and uncached packages.
1204 uncached_pkgs
= set()
1205 hash_pkg
= owners_cache
._hash
_pkg
1206 valid_pkg_hashes
= set()
1207 for cpv
in self
._vardb
.cpv_all():
1208 hash_value
= hash_pkg(cpv
)
1209 valid_pkg_hashes
.add(hash_value
)
1210 if hash_value
not in cached_hashes
:
1211 uncached_pkgs
.add(cpv
)
1213 # Cache any missing packages.
1214 for cpv
in uncached_pkgs
:
1215 owners_cache
.add(cpv
)
1217 # Delete any stale cache.
1218 stale_hashes
= cached_hashes
.difference(valid_pkg_hashes
)
1220 for base_name_hash
, bucket
in base_names
.items():
1221 for hash_value
in stale_hashes
.intersection(bucket
):
1222 del bucket
[hash_value
]
1224 del base_names
[base_name_hash
]
1228 def get_owners(self
, path_iter
):
1230 @return the owners as a dblink -> set(files) mapping.
1233 for owner
, f
in self
.iter_owners(path_iter
):
1234 owned_files
= owners
.get(owner
)
1235 if owned_files
is None:
1237 owners
[owner
] = owned_files
1241 def iter_owners(self
, path_iter
):
1243 Iterate over tuples of (dblink, path). In order to avoid
1244 consuming too many resources for too much time, resources
1245 are only allocated for the duration of a given iter_owners()
1246 call. Therefore, to maximize reuse of resources when searching
1247 for multiple files, it's best to search for them all in a single
1251 owners_cache
= self
._populate
()
1255 hash_pkg
= owners_cache
._hash
_pkg
1256 hash_str
= owners_cache
._hash
_str
1257 base_names
= self
._vardb
._aux
_cache
["owners"]["base_names"]
1262 x
= dblink_cache
.get(cpv
)
1264 x
= self
._vardb
._dblink
(cpv
)
1265 dblink_cache
[cpv
] = x
1268 for path
in path_iter
:
1269 name
= os
.path
.basename(path
.rstrip(os
.path
.sep
))
1273 name_hash
= hash_str(name
)
1274 pkgs
= base_names
.get(name_hash
)
1275 if pkgs
is not None:
1276 for hash_value
in pkgs
:
1277 if not isinstance(hash_value
, tuple) or \
1278 len(hash_value
) != 3:
1280 cpv
, counter
, mtime
= hash_value
1281 if not isinstance(cpv
, basestring
):
1284 current_hash
= hash_pkg(cpv
)
1288 if current_hash
!= hash_value
:
1290 if dblink(cpv
).isowner(path
, root
):
1291 yield dblink(cpv
), path
1293 class vartree(object):
1294 "this tree will scan a var/db/pkg database located at root (passed to init)"
1295 def __init__(self
, root
="/", virtual
=None, clone
=None, categories
=None,
1298 writemsg("vartree.__init__(): deprecated " + \
1299 "use of clone parameter\n", noiselevel
=-1)
1300 self
.root
= clone
.root
[:]
1301 self
.dbapi
= copy
.deepcopy(clone
.dbapi
)
1303 from portage
import config
1304 self
.settings
= config(clone
=clone
.settings
)
1307 if settings
is None:
1308 from portage
import settings
1309 self
.settings
= settings
# for key_expand calls
1310 if categories
is None:
1311 categories
= settings
.categories
1312 self
.dbapi
= vardbapi(self
.root
, categories
=categories
,
1313 settings
=settings
, vartree
=self
)
1316 def getpath(self
, mykey
, filename
=None):
1317 return self
.dbapi
.getpath(mykey
, filename
=filename
)
1319 def zap(self
, mycpv
):
1322 def inject(self
, mycpv
):
1325 def get_provide(self
, mycpv
):
1329 mylines
, myuse
= self
.dbapi
.aux_get(mycpv
, ["PROVIDE", "USE"])
1331 myuse
= myuse
.split()
1332 mylines
= flatten(use_reduce(paren_reduce(mylines
), uselist
=myuse
))
1333 for myprovide
in mylines
:
1334 mys
= catpkgsplit(myprovide
)
1336 mys
= myprovide
.split("/")
1337 myprovides
+= [mys
[0] + "/" + mys
[1]]
1339 except SystemExit, e
:
1341 except Exception, e
:
1342 mydir
= os
.path
.join(self
.root
, VDB_PATH
, mycpv
)
1343 writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir
,
1346 writemsg("Possibly Invalid: '%s'\n" % str(mylines
),
1348 writemsg("Exception: %s\n\n" % str(e
), noiselevel
=-1)
1351 def get_all_provides(self
):
1353 for node
in self
.getallcpv():
1354 for mykey
in self
.get_provide(node
):
1355 if myprovides
.has_key(mykey
):
1356 myprovides
[mykey
] += [node
]
1358 myprovides
[mykey
] = [node
]
1361 def dep_bestmatch(self
, mydep
, use_cache
=1):
1362 "compatibility method -- all matches, not just visible ones"
1363 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
1364 mymatch
= best(self
.dbapi
.match(
1365 dep_expand(mydep
, mydb
=self
.dbapi
, settings
=self
.settings
),
1366 use_cache
=use_cache
))
1372 def dep_match(self
, mydep
, use_cache
=1):
1373 "compatibility method -- we want to see all matches, not just visible ones"
1374 #mymatch = match(mydep,self.dbapi)
1375 mymatch
= self
.dbapi
.match(mydep
, use_cache
=use_cache
)
1381 def exists_specific(self
, cpv
):
1382 return self
.dbapi
.cpv_exists(cpv
)
1384 def getallcpv(self
):
1385 """temporary function, probably to be renamed --- Gets a list of all
1386 category/package-versions installed on the system."""
1387 return self
.dbapi
.cpv_all()
1389 def getallnodes(self
):
1390 """new behavior: these are all *unmasked* nodes. There may or may not be available
1391 masked package for nodes in this nodes list."""
1392 return self
.dbapi
.cp_all()
1394 def exists_specific_cat(self
, cpv
, use_cache
=1):
1395 cpv
= key_expand(cpv
, mydb
=self
.dbapi
, use_cache
=use_cache
,
1396 settings
=self
.settings
)
1397 a
= catpkgsplit(cpv
)
1400 mylist
= listdir(self
.getpath(a
[0]), EmptyOnError
=1)
1404 self
.dbapi
.invalidentry(self
.getpath(a
[0], filename
=x
))
1410 def getebuildpath(self
, fullpackage
):
1411 cat
, package
= catsplit(fullpackage
)
1412 return self
.getpath(fullpackage
, filename
=package
+".ebuild")
1414 def getnode(self
, mykey
, use_cache
=1):
1415 mykey
= key_expand(mykey
, mydb
=self
.dbapi
, use_cache
=use_cache
,
1416 settings
=self
.settings
)
1419 mysplit
= catsplit(mykey
)
1420 mydirlist
= listdir(self
.getpath(mysplit
[0]),EmptyOnError
=1)
1423 mypsplit
= pkgsplit(x
)
1425 self
.dbapi
.invalidentry(self
.getpath(mysplit
[0], filename
=x
))
1427 if mypsplit
[0] == mysplit
[1]:
1428 appendme
= [mysplit
[0]+"/"+x
, [mysplit
[0], mypsplit
[0], mypsplit
[1], mypsplit
[2]]]
1429 returnme
.append(appendme
)
1433 def getslot(self
, mycatpkg
):
1434 "Get a slot for a catpkg; assume it exists."
1436 return self
.dbapi
.aux_get(mycatpkg
, ["SLOT"])[0]
1440 def hasnode(self
, mykey
, use_cache
):
1441 """Does the particular node (cat/pkg key) exist?"""
1442 mykey
= key_expand(mykey
, mydb
=self
.dbapi
, use_cache
=use_cache
,
1443 settings
=self
.settings
)
1444 mysplit
= catsplit(mykey
)
1445 mydirlist
= listdir(self
.getpath(mysplit
[0]), EmptyOnError
=1)
1447 mypsplit
= pkgsplit(x
)
1449 self
.dbapi
.invalidentry(self
.getpath(mysplit
[0], filename
=x
))
1451 if mypsplit
[0] == mysplit
[1]:
1458 class dblink(object):
1460 This class provides an interface to the installed package database
1461 At present this is implemented as a text backend in /var/db/pkg.
1465 _normalize_needed
= re
.compile(r
'.*//.*|^[^/]|.+/$|(^|.*/)\.\.?(/.*|$)')
1466 _contents_split_counts
= {
1474 def __init__(self
, cat
, pkg
, myroot
, mysettings
, treetype
=None,
1475 vartree
=None, blockers
=None):
1477 Creates a DBlink object for a given CPV.
1478 The given CPV may not be present in the database already.
1480 @param cat: Category
1482 @param pkg: Package (PV)
1484 @param myroot: Typically ${ROOT}
1485 @type myroot: String (Path)
1486 @param mysettings: Typically portage.config
1487 @type mysettings: An instance of portage.config
1488 @param treetype: one of ['porttree','bintree','vartree']
1489 @type treetype: String
1490 @param vartree: an instance of vartree corresponding to myroot.
1491 @type vartree: vartree
1496 self
.mycpv
= self
.cat
+ "/" + self
.pkg
1497 self
.mysplit
= list(catpkgsplit(self
.mycpv
)[1:])
1498 self
.mysplit
[0] = "%s/%s" % (self
.cat
, self
.mysplit
[0])
1499 self
.treetype
= treetype
1501 from portage
import db
1502 vartree
= db
[myroot
]["vartree"]
1503 self
.vartree
= vartree
1504 self
._blockers
= blockers
1506 self
.dbroot
= normalize_path(os
.path
.join(myroot
, VDB_PATH
))
1507 self
.dbcatdir
= self
.dbroot
+"/"+cat
1508 self
.dbpkgdir
= self
.dbcatdir
+"/"+pkg
1509 self
.dbtmpdir
= self
.dbcatdir
+"/-MERGING-"+pkg
1510 self
.dbdir
= self
.dbpkgdir
1512 self
._lock
_vdb
= None
1514 self
.settings
= mysettings
1515 if self
.settings
== 1:
1519 protect_obj
= ConfigProtect(myroot
,
1520 mysettings
.get("CONFIG_PROTECT","").split(),
1521 mysettings
.get("CONFIG_PROTECT_MASK","").split())
1522 self
.updateprotect
= protect_obj
.updateprotect
1523 self
.isprotected
= protect_obj
.isprotected
1524 self
._installed
_instance
= None
1525 self
.contentscache
= None
1526 self
._contents
_inodes
= None
1527 self
._contents
_basenames
= None
1531 raise AssertionError("Lock already held.")
1532 # At least the parent needs to exist for the lock file.
1533 ensure_dirs(self
.dbroot
)
1534 self
._lock
_vdb
= lockdir(self
.dbroot
)
1538 unlockdir(self
._lock
_vdb
)
1539 self
._lock
_vdb
= None
1542 "return path to location of db information (for >>> informational display)"
1546 "does the db entry exist? boolean."
1547 return os
.path
.exists(self
.dbdir
)
1551 Remove this entry from the database
1553 if not os
.path
.exists(self
.dbdir
):
1556 # Check validity of self.dbdir before attempting to remove it.
1557 if not self
.dbdir
.startswith(self
.dbroot
):
1558 writemsg("portage.dblink.delete(): invalid dbdir: %s\n" % \
1559 self
.dbdir
, noiselevel
=-1)
1562 shutil
.rmtree(self
.dbdir
)
1563 self
.vartree
.dbapi
._remove
(self
)
1565 def clearcontents(self
):
1567 For a given db entry (self), erase the CONTENTS values.
1569 if os
.path
.exists(self
.dbdir
+"/CONTENTS"):
1570 os
.unlink(self
.dbdir
+"/CONTENTS")
1572 def _clear_contents_cache(self
):
1573 self
.contentscache
= None
1574 self
._contents
_inodes
= None
1575 self
._contents
_basenames
= None
1577 def getcontents(self
):
1579 Get the installed files of a given package (aka what that package installed)
1581 contents_file
= os
.path
.join(self
.dbdir
, "CONTENTS")
1582 if self
.contentscache
is not None:
1583 return self
.contentscache
1586 myc
= open(contents_file
,"r")
1587 except EnvironmentError, e
:
1588 if e
.errno
!= errno
.ENOENT
:
1591 self
.contentscache
= pkgfiles
1593 mylines
= myc
.readlines()
1596 normalize_needed
= self
._normalize
_needed
1597 contents_split_counts
= self
._contents
_split
_counts
1598 myroot
= self
.myroot
1599 if myroot
== os
.path
.sep
:
1603 for pos
, line
in enumerate(mylines
):
1604 if null_byte
in line
:
1605 # Null bytes are a common indication of corruption.
1606 errors
.append((pos
+ 1, "Null byte found in CONTENTS entry"))
1608 line
= line
.rstrip("\n")
1609 # Split on " " so that even file paths that
1610 # end with spaces can be handled.
1611 mydat
= line
.split(" ")
1612 entry_type
= mydat
[0] # empty string if line is empty
1613 correct_split_count
= contents_split_counts
.get(entry_type
)
1614 if correct_split_count
and len(mydat
) > correct_split_count
:
1615 # Apparently file paths contain spaces, so reassemble
1616 # the split have the correct_split_count.
1617 newsplit
= [entry_type
]
1618 spaces_total
= len(mydat
) - correct_split_count
1619 if entry_type
== "sym":
1621 splitter
= mydat
.index("->", 2, len(mydat
) - 2)
1623 errors
.append((pos
+ 1, "Unrecognized CONTENTS entry"))
1625 spaces_in_path
= splitter
- 2
1626 spaces_in_target
= spaces_total
- spaces_in_path
1627 newsplit
.append(" ".join(mydat
[1:splitter
]))
1628 newsplit
.append("->")
1629 target_end
= splitter
+ spaces_in_target
+ 2
1630 newsplit
.append(" ".join(mydat
[splitter
+ 1:target_end
]))
1631 newsplit
.extend(mydat
[target_end
:])
1633 path_end
= spaces_total
+ 2
1634 newsplit
.append(" ".join(mydat
[1:path_end
]))
1635 newsplit
.extend(mydat
[path_end
:])
1638 # we do this so we can remove from non-root filesystems
1639 # (use the ROOT var to allow maintenance on other partitions)
1641 if normalize_needed
.match(mydat
[1]):
1642 mydat
[1] = normalize_path(mydat
[1])
1643 if not mydat
[1].startswith(os
.path
.sep
):
1644 mydat
[1] = os
.path
.sep
+ mydat
[1]
1646 mydat
[1] = os
.path
.join(myroot
, mydat
[1].lstrip(os
.path
.sep
))
1647 if mydat
[0] == "obj":
1648 #format: type, mtime, md5sum
1649 pkgfiles
[mydat
[1]] = [mydat
[0], mydat
[3], mydat
[2]]
1650 elif mydat
[0] == "dir":
1652 pkgfiles
[mydat
[1]] = [mydat
[0]]
1653 elif mydat
[0] == "sym":
1654 #format: type, mtime, dest
1655 pkgfiles
[mydat
[1]] = [mydat
[0], mydat
[4], mydat
[3]]
1656 elif mydat
[0] == "dev":
1658 pkgfiles
[mydat
[1]] = [mydat
[0]]
1659 elif mydat
[0]=="fif":
1661 pkgfiles
[mydat
[1]] = [mydat
[0]]
1663 errors
.append((pos
+ 1, "Unrecognized CONTENTS entry"))
1664 except (KeyError, IndexError):
1665 errors
.append((pos
+ 1, "Unrecognized CONTENTS entry"))
1667 writemsg("!!! Parse error in '%s'\n" % contents_file
, noiselevel
=-1)
1668 for pos
, e
in errors
:
1669 writemsg("!!! line %d: %s\n" % (pos
, e
), noiselevel
=-1)
1670 self
.contentscache
= pkgfiles
1673 def unmerge(self
, pkgfiles
=None, trimworld
=1, cleanup
=1,
1674 ldpath_mtimes
=None, others_in_slot
=None):
1677 Unmerges a given package (CPV)
1682 @param pkgfiles: files to unmerge (generally self.getcontents() )
1683 @type pkgfiles: Dictionary
1684 @param trimworld: Remove CPV from world file if True, not if False
1685 @type trimworld: Boolean
1686 @param cleanup: cleanup to pass to doebuild (see doebuild)
1687 @type cleanup: Boolean
1688 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1689 @type ldpath_mtimes: Dictionary
1690 @param others_in_slot: all dblink instances in this slot, excluding self
1691 @type others_in_slot: list
1694 1. os.EX_OK if everything went well.
1695 2. return code of the failed phase (for prerm, postrm, cleanrm)
1698 The caller must ensure that lockdb() and unlockdb() are called
1699 before and after this method.
1701 if self
.vartree
.dbapi
._categories
is not None:
1702 self
.vartree
.dbapi
._categories
= None
1703 # When others_in_slot is supplied, the security check has already been
1704 # done for this slot, so it shouldn't be repeated until the next
1705 # replacement or unmerge operation.
1706 if others_in_slot
is None:
1707 slot
= self
.vartree
.dbapi
.aux_get(self
.mycpv
, ["SLOT"])[0]
1708 slot_matches
= self
.vartree
.dbapi
.match(
1709 "%s:%s" % (dep_getkey(self
.mycpv
), slot
))
1711 for cur_cpv
in slot_matches
:
1712 if cur_cpv
== self
.mycpv
:
1714 others_in_slot
.append(dblink(self
.cat
, catsplit(cur_cpv
)[1],
1715 self
.vartree
.root
, self
.settings
, vartree
=self
.vartree
))
1716 retval
= self
._security
_check
([self
] + others_in_slot
)
1720 contents
= self
.getcontents()
1721 # Now, don't assume that the name of the ebuild is the same as the
1722 # name of the dir; the package may have been moved.
1724 ebuild_phase
= "prerm"
1725 mystuff
= listdir(self
.dbdir
, EmptyOnError
=1)
1727 if x
.endswith(".ebuild"):
1728 myebuildpath
= os
.path
.join(self
.dbdir
, self
.pkg
+ ".ebuild")
1729 if x
[:-7] != self
.pkg
:
1730 # Clean up after vardbapi.move_ent() breakage in
1731 # portage versions before 2.1.2
1732 os
.rename(os
.path
.join(self
.dbdir
, x
), myebuildpath
)
1733 write_atomic(os
.path
.join(self
.dbdir
, "PF"), self
.pkg
+"\n")
1736 self
.settings
.setcpv(self
.mycpv
, mydb
=self
.vartree
.dbapi
)
1739 doebuild_environment(myebuildpath
, "prerm", self
.myroot
,
1740 self
.settings
, 0, 0, self
.vartree
.dbapi
)
1741 except UnsupportedAPIException
, e
:
1742 # Sometimes this happens due to corruption of the EAPI file.
1743 writemsg("!!! FAILED prerm: %s\n" % \
1744 os
.path
.join(self
.dbdir
, "EAPI"), noiselevel
=-1)
1745 writemsg("%s\n" % str(e
), noiselevel
=-1)
1747 catdir
= os
.path
.dirname(self
.settings
["PORTAGE_BUILDDIR"])
1748 ensure_dirs(os
.path
.dirname(catdir
),
1749 uid
=portage_uid
, gid
=portage_gid
, mode
=070, mask
=0)
1750 builddir_lock
= None
1755 catdir_lock
= lockdir(catdir
)
1757 uid
=portage_uid
, gid
=portage_gid
,
1759 builddir_lock
= lockdir(
1760 self
.settings
["PORTAGE_BUILDDIR"])
1762 unlockdir(catdir_lock
)
1765 # Eventually, we'd like to pass in the saved ebuild env here...
1766 retval
= doebuild(myebuildpath
, "prerm", self
.myroot
,
1767 self
.settings
, cleanup
=cleanup
, use_cache
=0,
1768 mydbapi
=self
.vartree
.dbapi
, tree
="vartree",
1769 vartree
=self
.vartree
)
1770 # XXX: Decide how to handle failures here.
1771 if retval
!= os
.EX_OK
:
1772 writemsg("!!! FAILED prerm: %s\n" % retval
, noiselevel
=-1)
1775 self
._unmerge
_pkgfiles
(pkgfiles
, others_in_slot
)
1777 # Remove the registration of preserved libs for this pkg instance
1778 plib_registry
= self
.vartree
.dbapi
.plib_registry
1779 plib_registry
.unregister(self
.mycpv
, self
.settings
["SLOT"],
1780 self
.vartree
.dbapi
.cpv_counter(self
.mycpv
))
1783 ebuild_phase
= "postrm"
1784 retval
= doebuild(myebuildpath
, "postrm", self
.myroot
,
1785 self
.settings
, use_cache
=0, tree
="vartree",
1786 mydbapi
=self
.vartree
.dbapi
, vartree
=self
.vartree
)
1788 # XXX: Decide how to handle failures here.
1789 if retval
!= os
.EX_OK
:
1790 writemsg("!!! FAILED postrm: %s\n" % retval
, noiselevel
=-1)
1793 # regenerate reverse NEEDED map
1794 self
.vartree
.dbapi
.linkmap
.rebuild()
1796 # remove preserved libraries that don't have any consumers left
1797 # FIXME: this code is quite ugly and can likely be optimized in several ways
1798 plib_dict
= plib_registry
.getPreservedLibs()
1799 for cpv
in plib_dict
:
1800 plib_dict
[cpv
].sort()
1801 # for the loop below to work correctly, we need all
1802 # symlinks to come before the actual files, such that
1803 # the recorded symlinks (sonames) will be resolved into
1804 # their real target before the object is found not to be
1805 # in the reverse NEEDED map
1806 def symlink_compare(x
, y
):
1807 if os
.path
.islink(x
):
1808 if os
.path
.islink(y
):
1812 elif os
.path
.islink(y
):
1817 plib_dict
[cpv
].sort(symlink_compare
)
1818 for f
in plib_dict
[cpv
]:
1819 if not os
.path
.exists(f
):
1822 consumers
= self
.vartree
.dbapi
.linkmap
.findConsumers(f
)
1824 unlink_list
.append(f
)
1828 if c
not in self
.getcontents():
1832 unlink_list
.append(f
)
1833 for obj
in unlink_list
:
1835 if os
.path
.islink(obj
):
1840 writemsg_stdout("<<< !needed %s %s\n" % (obj_type
, obj
))
1842 if e
.errno
== errno
.ENOENT
:
1846 plib_registry
.pruneNonExisting()
1852 if retval
!= os
.EX_OK
:
1854 msg
= ("The '%s' " % ebuild_phase
) + \
1855 ("phase of the '%s' package " % self
.mycpv
) + \
1856 ("has failed with exit value %s." % retval
)
1857 from textwrap
import wrap
1858 msg_lines
.extend(wrap(msg
, 72))
1859 msg_lines
.append("")
1861 ebuild_name
= os
.path
.basename(myebuildpath
)
1862 ebuild_dir
= os
.path
.dirname(myebuildpath
)
1863 msg
= "The problem occurred while executing " + \
1864 ("the ebuild file named '%s' " % ebuild_name
) + \
1865 ("located in the '%s' directory. " \
1867 "If necessary, manually remove " + \
1868 "the environment.bz2 file and/or the " + \
1869 "ebuild file located in that directory."
1870 msg_lines
.extend(wrap(msg
, 72))
1871 msg_lines
.append("")
1873 msg
= "Removal " + \
1874 "of the environment.bz2 file is " + \
1875 "preferred since it may allow the " + \
1876 "removal phases to execute successfully. " + \
1877 "The ebuild will be " + \
1878 "sourced and the eclasses " + \
1879 "from the current portage tree will be used " + \
1880 "when necessary. Removal of " + \
1881 "the ebuild file will cause the " + \
1882 "pkg_prerm() and pkg_postrm() removal " + \
1883 "phases to be skipped entirely."
1884 msg_lines
.extend(wrap(msg
, 72))
1885 from portage
.elog
.messages
import eerror
1887 eerror(l
, phase
=ebuild_phase
, key
=self
.mycpv
)
1889 # process logs created during pre/postrm
1890 elog_process(self
.mycpv
, self
.settings
, phasefilter
=filter_unmergephases
)
1891 if retval
== os
.EX_OK
:
1892 doebuild(myebuildpath
, "cleanrm", self
.myroot
,
1893 self
.settings
, tree
="vartree",
1894 mydbapi
=self
.vartree
.dbapi
,
1895 vartree
=self
.vartree
)
1897 unlockdir(builddir_lock
)
1899 if myebuildpath
and not catdir_lock
:
1900 # Lock catdir for removal if empty.
1901 catdir_lock
= lockdir(catdir
)
1907 if e
.errno
not in (errno
.ENOENT
,
1908 errno
.ENOTEMPTY
, errno
.EEXIST
):
1911 unlockdir(catdir_lock
)
1912 env_update(target_root
=self
.myroot
, prev_mtimes
=ldpath_mtimes
,
1913 contents
=contents
, env
=self
.settings
.environ())
1916 def _unmerge_pkgfiles(self
, pkgfiles
, others_in_slot
):
1919 Unmerges the contents of a package from the liveFS
1920 Removes the VDB entry for self
1922 @param pkgfiles: typically self.getcontents()
1923 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
1924 @param others_in_slot: all dblink instances in this slot, excluding self
1925 @type others_in_slot: list
1930 writemsg_stdout("No package files given... Grabbing a set.\n")
1931 pkgfiles
= self
.getcontents()
1933 if others_in_slot
is None:
1935 slot
= self
.vartree
.dbapi
.aux_get(self
.mycpv
, ["SLOT"])[0]
1936 slot_matches
= self
.vartree
.dbapi
.match(
1937 "%s:%s" % (dep_getkey(self
.mycpv
), slot
))
1938 for cur_cpv
in slot_matches
:
1939 if cur_cpv
== self
.mycpv
:
1941 others_in_slot
.append(dblink(self
.cat
, catsplit(cur_cpv
)[1],
1942 self
.vartree
.root
, self
.settings
,
1943 vartree
=self
.vartree
))
1944 dest_root
= normalize_path(self
.vartree
.root
).rstrip(os
.path
.sep
) + \
1946 dest_root_len
= len(dest_root
) - 1
1948 conf_mem_file
= os
.path
.join(dest_root
, CONFIG_MEMORY_FILE
)
1949 cfgfiledict
= grabdict(conf_mem_file
)
1952 unmerge_orphans
= "unmerge-orphans" in self
.settings
.features
1955 self
.updateprotect()
1956 mykeys
= pkgfiles
.keys()
1960 #process symlinks second-to-last, directories last.
1962 ignored_unlink_errnos
= (
1963 errno
.EBUSY
, errno
.ENOENT
,
1964 errno
.ENOTDIR
, errno
.EISDIR
)
1965 ignored_rmdir_errnos
= (
1966 errno
.EEXIST
, errno
.ENOTEMPTY
,
1967 errno
.EBUSY
, errno
.ENOENT
,
1968 errno
.ENOTDIR
, errno
.EISDIR
)
1969 modprotect
= os
.path
.join(self
.vartree
.root
, "lib/modules/")
1971 def unlink(file_name
, lstatobj
):
1973 if lstatobj
.st_flags
!= 0:
1974 bsd_chflags
.lchflags(file_name
, 0)
1975 parent_name
= os
.path
.dirname(file_name
)
1976 # Use normal stat/chflags for the parent since we want to
1977 # follow any symlinks to the real parent directory.
1978 pflags
= os
.stat(parent_name
).st_flags
1980 bsd_chflags
.chflags(parent_name
, 0)
1982 if not stat
.S_ISLNK(lstatobj
.st_mode
):
1983 # Remove permissions to ensure that any hardlinks to
1984 # suid/sgid files are rendered harmless.
1985 os
.chmod(file_name
, 0)
1986 os
.unlink(file_name
)
1988 if bsd_chflags
and pflags
!= 0:
1989 # Restore the parent flags we saved before unlinking
1990 bsd_chflags
.chflags(parent_name
, pflags
)
1992 def show_unmerge(zing
, desc
, file_type
, file_name
):
1993 writemsg_stdout("%s %s %s %s\n" % \
1994 (zing
, desc
.ljust(8), file_type
, file_name
))
1995 for objkey
in mykeys
:
1996 obj
= normalize_path(objkey
)
1997 file_data
= pkgfiles
[objkey
]
1998 file_type
= file_data
[0]
2001 statobj
= os
.stat(obj
)
2006 lstatobj
= os
.lstat(obj
)
2007 except (OSError, AttributeError):
2009 islink
= lstatobj
is not None and stat
.S_ISLNK(lstatobj
.st_mode
)
2010 if lstatobj
is None:
2011 show_unmerge("---", "!found", file_type
, obj
)
2013 if obj
.startswith(dest_root
):
2014 relative_path
= obj
[dest_root_len
:]
2016 for dblnk
in others_in_slot
:
2017 if dblnk
.isowner(relative_path
, dest_root
):
2021 # A new instance of this package claims the file, so
2023 show_unmerge("---", "replaced", file_type
, obj
)
2025 elif relative_path
in cfgfiledict
:
2026 stale_confmem
.append(relative_path
)
2027 # next line includes a tweak to protect modules from being unmerged,
2028 # but we don't protect modules from being overwritten if they are
2029 # upgraded. We effectively only want one half of the config protection
2030 # functionality for /lib/modules. For portage-ng both capabilities
2031 # should be able to be independently specified.
2032 if obj
.startswith(modprotect
):
2033 show_unmerge("---", "cfgpro", file_type
, obj
)
2036 # Don't unlink symlinks to directories here since that can
2037 # remove /lib and /usr/lib symlinks.
2038 if unmerge_orphans
and \
2039 lstatobj
and not stat
.S_ISDIR(lstatobj
.st_mode
) and \
2040 not (islink
and statobj
and stat
.S_ISDIR(statobj
.st_mode
)) and \
2041 not self
.isprotected(obj
):
2043 unlink(obj
, lstatobj
)
2044 except EnvironmentError, e
:
2045 if e
.errno
not in ignored_unlink_errnos
:
2048 show_unmerge("<<<", "", file_type
, obj
)
2051 lmtime
= str(lstatobj
[stat
.ST_MTIME
])
2052 if (pkgfiles
[objkey
][0] not in ("dir", "fif", "dev")) and (lmtime
!= pkgfiles
[objkey
][1]):
2053 show_unmerge("---", "!mtime", file_type
, obj
)
2056 if pkgfiles
[objkey
][0] == "dir":
2057 if statobj
is None or not stat
.S_ISDIR(statobj
.st_mode
):
2058 show_unmerge("---", "!dir", file_type
, obj
)
2061 elif pkgfiles
[objkey
][0] == "sym":
2063 show_unmerge("---", "!sym", file_type
, obj
)
2065 # Go ahead and unlink symlinks to directories here when
2066 # they're actually recorded as symlinks in the contents.
2067 # Normally, symlinks such as /lib -> lib64 are not recorded
2068 # as symlinks in the contents of a package. If a package
2069 # installs something into ${D}/lib/, it is recorded in the
2070 # contents as a directory even if it happens to correspond
2071 # to a symlink when it's merged to the live filesystem.
2073 unlink(obj
, lstatobj
)
2074 show_unmerge("<<<", "", file_type
, obj
)
2075 except (OSError, IOError),e
:
2076 if e
.errno
not in ignored_unlink_errnos
:
2079 show_unmerge("!!!", "", file_type
, obj
)
2080 elif pkgfiles
[objkey
][0] == "obj":
2081 if statobj
is None or not stat
.S_ISREG(statobj
.st_mode
):
2082 show_unmerge("---", "!obj", file_type
, obj
)
2086 mymd5
= perform_md5(obj
, calc_prelink
=1)
2087 except FileNotFound
, e
:
2088 # the file has disappeared between now and our stat call
2089 show_unmerge("---", "!obj", file_type
, obj
)
2092 # string.lower is needed because db entries used to be in upper-case. The
2093 # string.lower allows for backwards compatibility.
2094 if mymd5
!= pkgfiles
[objkey
][2].lower():
2095 show_unmerge("---", "!md5", file_type
, obj
)
2098 unlink(obj
, lstatobj
)
2099 except (OSError, IOError), e
:
2100 if e
.errno
not in ignored_unlink_errnos
:
2103 show_unmerge("<<<", "", file_type
, obj
)
2104 elif pkgfiles
[objkey
][0] == "fif":
2105 if not stat
.S_ISFIFO(lstatobj
[stat
.ST_MODE
]):
2106 show_unmerge("---", "!fif", file_type
, obj
)
2108 show_unmerge("---", "", file_type
, obj
)
2109 elif pkgfiles
[objkey
][0] == "dev":
2110 show_unmerge("---", "", file_type
, obj
)
2118 lstatobj
= os
.lstat(obj
)
2119 if lstatobj
.st_flags
!= 0:
2120 bsd_chflags
.lchflags(obj
, 0)
2121 parent_name
= os
.path
.dirname(obj
)
2122 # Use normal stat/chflags for the parent since we want to
2123 # follow any symlinks to the real parent directory.
2124 pflags
= os
.stat(parent_name
).st_flags
2126 bsd_chflags
.chflags(parent_name
, 0)
2130 if bsd_chflags
and pflags
!= 0:
2131 # Restore the parent flags we saved before unlinking
2132 bsd_chflags
.chflags(parent_name
, pflags
)
2133 show_unmerge("<<<", "", "dir", obj
)
2134 except EnvironmentError, e
:
2135 if e
.errno
not in ignored_rmdir_errnos
:
2137 if e
.errno
!= errno
.ENOENT
:
2138 show_unmerge("---", "!empty", "dir", obj
)
2141 # Remove stale entries from config memory.
2143 for filename
in stale_confmem
:
2144 del cfgfiledict
[filename
]
2145 writedict(cfgfiledict
, conf_mem_file
)
2147 #remove self from vartree database so that our own virtual gets zapped if we're the last node
2148 self
.vartree
.zap(self
.mycpv
)
2150 def isowner(self
,filename
, destroot
):
2152 Check if a file belongs to this package. This may
2153 result in a stat call for the parent directory of
2154 every installed file, since the inode numbers are
2155 used to work around the problem of ambiguous paths
2156 caused by symlinked directories. The results of
2157 stat calls are cached to optimize multiple calls
2166 1. True if this package owns the file.
2167 2. False if this package does not own the file.
2169 destfile
= normalize_path(
2170 os
.path
.join(destroot
, filename
.lstrip(os
.path
.sep
)))
2172 pkgfiles
= self
.getcontents()
2173 if pkgfiles
and destfile
in pkgfiles
:
2176 basename
= os
.path
.basename(destfile
)
2177 if self
._contents
_basenames
is None:
2178 self
._contents
_basenames
= set(
2179 os
.path
.basename(x
) for x
in pkgfiles
)
2180 if basename
not in self
._contents
_basenames
:
2181 # This is a shortcut that, in most cases, allows us to
2182 # eliminate this package as an owner without the need
2183 # to examine inode numbers of parent directories.
2186 # Use stat rather than lstat since we want to follow
2187 # any symlinks to the real parent directory.
2188 parent_path
= os
.path
.dirname(destfile
)
2190 parent_stat
= os
.stat(parent_path
)
2191 except EnvironmentError, e
:
2192 if e
.errno
!= errno
.ENOENT
:
2196 if self
._contents
_inodes
is None:
2197 self
._contents
_inodes
= {}
2198 parent_paths
= set()
2200 p_path
= os
.path
.dirname(x
)
2201 if p_path
in parent_paths
:
2203 parent_paths
.add(p_path
)
2209 inode_key
= (s
.st_dev
, s
.st_ino
)
2210 # Use lists of paths in case multiple
2211 # paths reference the same inode.
2212 p_path_list
= self
._contents
_inodes
.get(inode_key
)
2213 if p_path_list
is None:
2215 self
._contents
_inodes
[inode_key
] = p_path_list
2216 if p_path
not in p_path_list
:
2217 p_path_list
.append(p_path
)
2218 p_path_list
= self
._contents
_inodes
.get(
2219 (parent_stat
.st_dev
, parent_stat
.st_ino
))
2221 for p_path
in p_path_list
:
2222 x
= os
.path
.join(p_path
, basename
)
2228 def _preserve_libs(self
, srcroot
, destroot
, mycontents
, counter
, inforoot
):
2229 # read global reverse NEEDED map
2230 linkmap
= self
.vartree
.dbapi
.linkmap
2231 linkmap
.rebuild(include_file
=os
.path
.join(inforoot
, "NEEDED.ELF.2"))
2232 liblist
= linkmap
.listLibraryObjects()
2234 # get list of libraries from old package instance
2235 old_contents
= self
._installed
_instance
.getcontents().keys()
2236 old_libs
= set(old_contents
).intersection(liblist
)
2238 # get list of libraries from new package instance
2239 mylibs
= set([os
.path
.join(os
.sep
, x
) for x
in mycontents
]).intersection(liblist
)
2241 # check which libs are present in the old, but not the new package instance
2242 candidates
= old_libs
.difference(mylibs
)
2244 for x
in old_contents
:
2245 if os
.path
.islink(x
) and os
.path
.realpath(x
) in candidates
and x
not in mycontents
:
2248 # ignore any libs that are only internally used by the package
2249 def has_external_consumers(lib
, contents
, otherlibs
):
2250 consumers
= linkmap
.findConsumers(lib
)
2251 contents_without_libs
= [x
for x
in contents
if x
not in otherlibs
]
2253 # just used by objects that will be autocleaned
2254 if len(consumers
.difference(contents_without_libs
)) == 0:
2256 # used by objects that are referenced as well, need to check those
2257 # recursively to break any reference cycles
2258 elif len(consumers
.difference(contents
)) == 0:
2259 otherlibs
= set(otherlibs
)
2260 for ol
in otherlibs
.intersection(consumers
):
2261 if has_external_consumers(ol
, contents
, otherlibs
.difference([lib
])):
2264 # used by external objects directly
2268 for lib
in list(candidates
):
2269 if not has_external_consumers(lib
, old_contents
, candidates
):
2270 candidates
.remove(lib
)
2272 if linkmap
.isMasterLink(lib
):
2273 candidates
.remove(lib
)
2275 # only preserve the lib if there is no other copy to use for each consumer
2277 for c
in linkmap
.findConsumers(lib
):
2279 providers
= linkmap
.findProviders(c
)
2281 for soname
in providers
:
2282 if lib
in providers
[soname
]:
2283 for p
in providers
[soname
]:
2284 if p
not in candidates
or os
.path
.exists(os
.path
.join(srcroot
, p
.lstrip(os
.sep
))):
2291 candidates
.remove(lib
)
2294 del mylibs
, mycontents
, old_contents
, liblist
2296 # inject files that should be preserved into our image dir
2299 candidates_stack
= list(candidates
)
2300 while candidates_stack
:
2301 x
= candidates_stack
.pop()
2302 # skip existing files so the 'new' libs aren't overwritten
2303 if os
.path
.exists(os
.path
.join(srcroot
, x
.lstrip(os
.sep
))):
2305 print "injecting %s into %s" % (x
, srcroot
)
2306 if not os
.path
.exists(os
.path
.join(destroot
, x
.lstrip(os
.sep
))):
2307 print "%s does not exist so can't be preserved" % x
2309 mydir
= os
.path
.join(srcroot
, os
.path
.dirname(x
).lstrip(os
.sep
))
2310 if not os
.path
.exists(mydir
):
2313 # resolve symlinks and extend preserve list
2314 # NOTE: we're extending the list in the loop to emulate recursion to
2315 # also get indirect symlinks
2316 if os
.path
.islink(x
):
2317 linktarget
= os
.readlink(x
)
2318 os
.symlink(linktarget
, os
.path
.join(srcroot
, x
.lstrip(os
.sep
)))
2319 if linktarget
[0] != os
.sep
:
2320 linktarget
= os
.path
.join(os
.path
.dirname(x
), linktarget
)
2321 candidates
.add(linktarget
)
2322 candidates_stack
.append(linktarget
)
2324 shutil
.copy2(os
.path
.join(destroot
, x
.lstrip(os
.sep
)),
2325 os
.path
.join(srcroot
, x
.lstrip(os
.sep
)))
2326 preserve_paths
.append(x
)
2330 # keep track of the libs we preserved
2331 self
.vartree
.dbapi
.plib_registry
.register(self
.mycpv
, self
.settings
["SLOT"], counter
, preserve_paths
)
2335 def _collision_protect(self
, srcroot
, destroot
, mypkglist
, mycontents
):
2336 collision_ignore
= set([normalize_path(myignore
) for myignore
in \
2337 self
.settings
.get("COLLISION_IGNORE", "").split()])
2342 destroot
= normalize_path(destroot
).rstrip(os
.path
.sep
) + \
2344 writemsg_stdout("%s checking %d files for package collisions\n" % \
2345 (green("*"), len(mycontents
)))
2346 for f
in mycontents
:
2349 writemsg_stdout("%d files checked ...\n" % i
)
2350 dest_path
= normalize_path(
2351 os
.path
.join(destroot
, f
.lstrip(os
.path
.sep
)))
2353 dest_lstat
= os
.lstat(dest_path
)
2354 except EnvironmentError, e
:
2355 if e
.errno
== errno
.ENOENT
:
2358 elif e
.errno
== errno
.ENOTDIR
:
2360 # A non-directory is in a location where this package
2361 # expects to have a directory.
2363 parent_path
= dest_path
2364 while len(parent_path
) > len(destroot
):
2365 parent_path
= os
.path
.dirname(parent_path
)
2367 dest_lstat
= os
.lstat(parent_path
)
2369 except EnvironmentError, e
:
2370 if e
.errno
!= errno
.ENOTDIR
:
2374 raise AssertionError(
2375 "unable to find non-directory " + \
2376 "parent for '%s'" % dest_path
)
2377 dest_path
= parent_path
2378 f
= os
.path
.sep
+ dest_path
[len(destroot
):]
2386 for ver
in [self
] + mypkglist
:
2387 if (ver
.isowner(f
, destroot
) or ver
.isprotected(f
)):
2392 if collision_ignore
:
2393 if f
in collision_ignore
:
2396 for myignore
in collision_ignore
:
2397 if f
.startswith(myignore
+ os
.path
.sep
):
2401 collisions
.append(f
)
2404 def _security_check(self
, installed_instances
):
2405 if not installed_instances
:
2408 for dblnk
in installed_instances
:
2409 file_paths
.update(dblnk
.getcontents())
2412 for path
in file_paths
:
2416 if e
.errno
not in (errno
.ENOENT
, errno
.ENOTDIR
):
2420 if not stat
.S_ISREG(s
.st_mode
):
2422 path
= os
.path
.realpath(path
)
2423 if path
in real_paths
:
2425 real_paths
.add(path
)
2426 if s
.st_nlink
> 1 and \
2427 s
.st_mode
& (stat
.S_ISUID | stat
.S_ISGID
):
2428 k
= (s
.st_dev
, s
.st_ino
)
2429 inode_map
.setdefault(k
, []).append((path
, s
))
2430 suspicious_hardlinks
= []
2431 for path_list
in inode_map
.itervalues():
2432 path
, s
= path_list
[0]
2433 if len(path_list
) == s
.st_nlink
:
2434 # All hardlinks seem to be owned by this package.
2436 suspicious_hardlinks
.append(path_list
)
2437 if not suspicious_hardlinks
:
2439 from portage
.output
import colorize
2440 prefix
= colorize("SECURITY_WARN", "*") + " WARNING: "
2441 writemsg(prefix
+ "suid/sgid file(s) " + \
2442 "with suspicious hardlink(s):\n", noiselevel
=-1)
2443 for path_list
in suspicious_hardlinks
:
2444 for path
, s
in path_list
:
2445 writemsg(prefix
+ " '%s'\n" % path
, noiselevel
=-1)
2446 writemsg(prefix
+ "See the Gentoo Security Handbook " + \
2447 "guide for advice on how to proceed.\n", noiselevel
=-1)
2450 def treewalk(self
, srcroot
, destroot
, inforoot
, myebuild
, cleanup
=0,
2451 mydbapi
=None, prev_mtimes
=None):
2454 This function does the following:
2456 calls self._preserve_libs if FEATURES=preserve-libs
2457 calls self._collision_protect if FEATURES=collision-protect
2458 calls doebuild(mydo=pkg_preinst)
2459 Merges the package to the livefs
2460 unmerges old version (if required)
2461 calls doebuild(mydo=pkg_postinst)
2465 @param srcroot: Typically this is ${D}
2466 @type srcroot: String (Path)
2467 @param destroot: Path to merge to (usually ${ROOT})
2468 @type destroot: String (Path)
2469 @param inforoot: root of the vardb entry ?
2470 @type inforoot: String (Path)
2471 @param myebuild: path to the ebuild that we are processing
2472 @type myebuild: String (Path)
2473 @param mydbapi: dbapi which is handed to doebuild.
2474 @type mydbapi: portdbapi instance
2475 @param prev_mtimes: { Filename:mtime } mapping for env_update
2476 @type prev_mtimes: Dictionary
2482 secondhand is a list of symlinks that have been skipped due to their target
2483 not existing; we will merge these symlinks at a later time.
2486 srcroot
= normalize_path(srcroot
).rstrip(os
.path
.sep
) + os
.path
.sep
2487 destroot
= normalize_path(destroot
).rstrip(os
.path
.sep
) + os
.path
.sep
2489 if not os
.path
.isdir(srcroot
):
2490 writemsg("!!! Directory Not Found: D='%s'\n" % srcroot
,
2494 inforoot_slot_file
= os
.path
.join(inforoot
, "SLOT")
2497 f
= open(inforoot_slot_file
)
2499 slot
= f
.read().strip()
2502 except EnvironmentError, e
:
2503 if e
.errno
!= errno
.ENOENT
:
2510 from portage
.elog
.messages
import eerror
as _eerror
2513 _eerror(l
, phase
="preinst", key
=self
.settings
.mycpv
)
2515 if slot
!= self
.settings
["SLOT"]:
2516 writemsg("!!! WARNING: Expected SLOT='%s', got '%s'\n" % \
2517 (self
.settings
["SLOT"], slot
))
2519 if not os
.path
.exists(self
.dbcatdir
):
2520 os
.makedirs(self
.dbcatdir
)
2523 for v
in self
.vartree
.dbapi
.cp_list(self
.mysplit
[0]):
2524 otherversions
.append(v
.split("/")[1])
2526 # filter any old-style virtual matches
2527 slot_matches
= [cpv
for cpv
in self
.vartree
.dbapi
.match(
2528 "%s:%s" % (cpv_getkey(self
.mycpv
), slot
)) \
2529 if cpv_getkey(cpv
) == cpv_getkey(self
.mycpv
)]
2531 if self
.mycpv
not in slot_matches
and \
2532 self
.vartree
.dbapi
.cpv_exists(self
.mycpv
):
2533 # handle multislot or unapplied slotmove
2534 slot_matches
.append(self
.mycpv
)
2537 from portage
import config
2538 for cur_cpv
in slot_matches
:
2539 # Clone the config in case one of these has to be unmerged since
2540 # we need it to have private ${T} etc... for things like elog.
2541 others_in_slot
.append(dblink(self
.cat
, catsplit(cur_cpv
)[1],
2542 self
.vartree
.root
, config(clone
=self
.settings
),
2543 vartree
=self
.vartree
))
2544 retval
= self
._security
_check
(others_in_slot
)
2549 # Used by self.isprotected().
2552 for dblnk
in others_in_slot
:
2553 cur_counter
= self
.vartree
.dbapi
.cpv_counter(dblnk
.mycpv
)
2554 if cur_counter
> max_counter
:
2555 max_counter
= cur_counter
2557 self
._installed
_instance
= max_dblnk
2559 # get current counter value (counter_tick also takes care of incrementing it)
2560 # XXX Need to make this destroot, but it needs to be initialized first. XXX
2561 # XXX bis: leads to some invalidentry() call through cp_all().
2562 # Note: The counter is generated here but written later because preserve_libs
2563 # needs the counter value but has to be before dbtmpdir is made (which
2564 # has to be before the counter is written) - genone
2565 counter
= self
.vartree
.dbapi
.counter_tick(self
.myroot
, mycpv
=self
.mycpv
)
2567 # Save this for unregistering preserved-libs if the merge fails.
2568 self
.settings
["COUNTER"] = str(counter
)
2569 self
.settings
.backup_changes("COUNTER")
2575 for parent
, dirs
, files
in os
.walk(srcroot
, onerror
=onerror
):
2577 file_path
= os
.path
.join(parent
, f
)
2578 file_mode
= os
.lstat(file_path
).st_mode
2579 if stat
.S_ISREG(file_mode
):
2580 myfilelist
.append(file_path
[len(srcroot
):])
2581 elif stat
.S_ISLNK(file_mode
):
2582 # Note: os.walk puts symlinks to directories in the "dirs"
2583 # list and it does not traverse them since that could lead
2584 # to an infinite recursion loop.
2585 mylinklist
.append(file_path
[len(srcroot
):])
2587 # If there are no files to merge, and an installed package in the same
2588 # slot has files, it probably means that something went wrong.
2589 if self
.settings
.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
2590 not myfilelist
and not mylinklist
and others_in_slot
:
2591 installed_files
= None
2592 for other_dblink
in others_in_slot
:
2593 installed_files
= other_dblink
.getcontents()
2594 if not installed_files
:
2596 from textwrap
import wrap
2603 msg
.extend(wrap(("The '%s' package will not install " + \
2604 "any files, but the currently installed '%s'" + \
2605 " package has the following files: ") % d
, wrap_width
))
2607 msg
.extend(sorted(installed_files
))
2609 msg
.append("package %s NOT merged" % self
.mycpv
)
2612 ("Manually run `emerge --unmerge =%s` " % \
2613 other_dblink
.mycpv
) + "if you really want to " + \
2614 "remove the above files. Set " + \
2615 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in " + \
2616 "/etc/make.conf if you do not want to " + \
2617 "abort in cases like this.",
2623 # Preserve old libs if they are still in use
2624 if slot_matches
and "preserve-libs" in self
.settings
.features
:
2625 self
._preserve
_libs
(srcroot
, destroot
, myfilelist
+mylinklist
, counter
, inforoot
)
2627 # check for package collisions
2629 if self
._blockers
is not None:
2630 # This is only supposed to be called when
2631 # the vdb is locked, like it is here.
2632 blockers
= self
._blockers
()
2633 if blockers
is None:
2635 collisions
= self
._collision
_protect
(srcroot
, destroot
,
2636 others_in_slot
+ blockers
, myfilelist
+ mylinklist
)
2638 # Make sure the ebuild environment is initialized and that ${T}/elog
2639 # exists for logging of collision-protect eerror messages.
2640 if myebuild
is None:
2641 myebuild
= os
.path
.join(inforoot
, self
.pkg
+ ".ebuild")
2642 doebuild_environment(myebuild
, "preinst", destroot
,
2643 self
.settings
, 0, 0, mydbapi
)
2644 prepare_build_dirs(destroot
, self
.settings
, cleanup
)
2647 collision_protect
= "collision-protect" in self
.settings
.features
2648 msg
= "This package will overwrite one or more files that" + \
2649 " may belong to other packages (see list below)."
2650 if not collision_protect
:
2651 msg
+= " Add \"collision-protect\" to FEATURES in" + \
2652 " make.conf if you would like the merge to abort" + \
2653 " in cases like this."
2654 if self
.settings
.get("PORTAGE_QUIET") != "1":
2655 msg
+= " You can use a command such as" + \
2656 " `portageq owners / <filename>` to identify the" + \
2657 " installed package that owns a file. If portageq" + \
2658 " reports that only one package owns a file then do NOT" + \
2659 " file a bug report. A bug report is only useful if it" + \
2660 " identifies at least two or more packages that are known" + \
2661 " to install the same file(s)." + \
2662 " If a collision occurs and you" + \
2663 " can not explain where the file came from then you" + \
2664 " should simply ignore the collision since there is not" + \
2665 " enough information to determine if a real problem" + \
2666 " exists. Please do NOT file a bug report at" + \
2667 " http://bugs.gentoo.org unless you report exactly which" + \
2668 " two packages install the same file(s). Once again," + \
2669 " please do NOT file a bug report unless you have" + \
2670 " completely understood the above message."
2672 self
.settings
["EBUILD_PHASE"] = "preinst"
2673 from textwrap
import wrap
2675 if collision_protect
:
2677 msg
.append("package %s NOT merged" % self
.settings
.mycpv
)
2679 msg
.append("Detected file collision(s):")
2682 for f
in collisions
:
2683 msg
.append("\t%s" % \
2684 os
.path
.join(destroot
, f
.lstrip(os
.path
.sep
)))
2690 msg
.append("Searching all installed" + \
2691 " packages for file collisions...")
2693 msg
.append("Press Ctrl-C to Stop")
2697 owners
= self
.vartree
.dbapi
._owners
.get_owners(collisions
)
2698 self
.vartree
.dbapi
.flush_cache()
2700 for pkg
, owned_files
in owners
.iteritems():
2703 msg
.append("%s" % cpv
)
2704 for f
in sorted(owned_files
):
2705 msg
.append("\t%s" % os
.path
.join(destroot
,
2706 f
.lstrip(os
.path
.sep
)))
2709 eerror(["None of the installed" + \
2710 " packages claim the file(s)."])
2711 if collision_protect
:
2714 writemsg_stdout(">>> Merging %s to %s\n" % (self
.mycpv
, destroot
))
2716 # The merge process may move files out of the image directory,
2717 # which causes invalidation of the .installed flag.
2719 os
.unlink(os
.path
.join(
2720 os
.path
.dirname(normalize_path(srcroot
)), ".installed"))
2722 if e
.errno
!= errno
.ENOENT
:
2726 self
.dbdir
= self
.dbtmpdir
2728 ensure_dirs(self
.dbtmpdir
)
2730 # run preinst script
2731 a
= doebuild(myebuild
, "preinst", destroot
, self
.settings
,
2732 use_cache
=0, tree
=self
.treetype
, mydbapi
=mydbapi
,
2733 vartree
=self
.vartree
)
2735 # XXX: Decide how to handle failures here.
2737 writemsg("!!! FAILED preinst: "+str(a
)+"\n", noiselevel
=-1)
2740 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
2741 for x
in listdir(inforoot
):
2742 self
.copyfile(inforoot
+"/"+x
)
2744 # write local package counter for recording
2745 lcfile
= open(os
.path
.join(self
.dbtmpdir
, "COUNTER"),"w")
2746 lcfile
.write(str(counter
))
2749 # open CONTENTS file (possibly overwriting old one) for recording
2750 outfile
= open(os
.path
.join(self
.dbtmpdir
, "CONTENTS"),"w")
2752 self
.updateprotect()
2754 #if we have a file containing previously-merged config file md5sums, grab it.
2755 conf_mem_file
= os
.path
.join(destroot
, CONFIG_MEMORY_FILE
)
2756 cfgfiledict
= grabdict(conf_mem_file
)
2757 if self
.settings
.has_key("NOCONFMEM"):
2758 cfgfiledict
["IGNORE"]=1
2760 cfgfiledict
["IGNORE"]=0
2762 # Always behave like --noconfmem is enabled for downgrades
2763 # so that people who don't know about this option are less
2764 # likely to get confused when doing upgrade/downgrade cycles.
2765 pv_split
= catpkgsplit(self
.mycpv
)[1:]
2766 for other
in others_in_slot
:
2767 if pkgcmp(pv_split
, catpkgsplit(other
.mycpv
)[1:]) < 0:
2768 cfgfiledict
["IGNORE"] = 1
2771 # Don't bump mtimes on merge since some application require
2772 # preservation of timestamps. This means that the unmerge phase must
2773 # check to see if file belongs to an installed instance in the same
2777 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
2778 prevmask
= os
.umask(0)
2781 # we do a first merge; this will recurse through all files in our srcroot but also build up a
2782 # "second hand" of symlinks to merge later
2783 if self
.mergeme(srcroot
, destroot
, outfile
, secondhand
, "", cfgfiledict
, mymtime
):
2786 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
2787 # broken symlinks. We'll merge them too.
2789 while len(secondhand
) and len(secondhand
)!=lastlen
:
2790 # clear the thirdhand. Anything from our second hand that
2791 # couldn't get merged will be added to thirdhand.
2794 self
.mergeme(srcroot
, destroot
, outfile
, thirdhand
, secondhand
, cfgfiledict
, mymtime
)
2797 lastlen
= len(secondhand
)
2799 # our thirdhand now becomes our secondhand. It's ok to throw
2800 # away secondhand since thirdhand contains all the stuff that
2801 # couldn't be merged.
2802 secondhand
= thirdhand
2805 # force merge of remaining symlinks (broken or circular; oh well)
2806 self
.mergeme(srcroot
, destroot
, outfile
, None, secondhand
, cfgfiledict
, mymtime
)
2811 #if we opened it, close it
2815 # write out our collection of md5sums
2816 cfgfiledict
.pop("IGNORE", None)
2817 ensure_dirs(os
.path
.dirname(conf_mem_file
),
2818 gid
=portage_gid
, mode
=02750, mask
=02)
2819 writedict(cfgfiledict
, conf_mem_file
)
2821 # These caches are populated during collision-protect and the data
2822 # they contain is now invalid. It's very important to invalidate
2823 # the contents_inodes cache so that FEATURES=unmerge-orphans
2824 # doesn't unmerge anything that belongs to this package that has
2826 others_in_slot
.append(self
) # self has just been merged
2827 for dblnk
in others_in_slot
:
2828 dblnk
.contentscache
= None
2829 dblnk
._contents
_inodes
= None
2830 dblnk
._contents
_basenames
= None
2832 # If portage is reinstalling itself, remove the old
2833 # version now since we want to use the temporary
2834 # PORTAGE_BIN_PATH that will be removed when we return.
2835 reinstall_self
= False
2836 if self
.myroot
== "/" and \
2837 "sys-apps" == self
.cat
and \
2838 "portage" == pkgsplit(self
.pkg
)[0]:
2839 reinstall_self
= True
2841 autoclean
= self
.settings
.get("AUTOCLEAN", "yes") == "yes"
2842 for dblnk
in list(others_in_slot
):
2845 if not (autoclean
or dblnk
.mycpv
== self
.mycpv
or reinstall_self
):
2847 writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
2848 others_in_slot
.remove(dblnk
) # dblnk will unmerge itself now
2849 dblnk
.unmerge(trimworld
=0, ldpath_mtimes
=prev_mtimes
,
2850 others_in_slot
=others_in_slot
)
2851 # TODO: Check status and abort if necessary.
2853 writemsg_stdout(">>> Original instance of package unmerged safely.\n")
2855 if len(others_in_slot
) > 1:
2856 from portage
.output
import colorize
2857 writemsg_stdout(colorize("WARN", "WARNING:")
2858 + " AUTOCLEAN is disabled. This can cause serious"
2859 + " problems due to overlapping packages.\n")
2861 # We hold both directory locks.
2862 self
.dbdir
= self
.dbpkgdir
2864 _movefile(self
.dbtmpdir
, self
.dbpkgdir
, mysettings
=self
.settings
)
2866 # Check for file collisions with blocking packages
2867 # and remove any colliding files from their CONTENTS
2868 # since they now belong to this package.
2869 self
._clear
_contents
_cache
()
2870 contents
= self
.getcontents()
2871 destroot_len
= len(destroot
) - 1
2872 for blocker
in blockers
:
2873 blocker_contents
= blocker
.getcontents()
2875 for filename
in blocker_contents
:
2876 relative_filename
= filename
[destroot_len
:]
2877 if self
.isowner(relative_filename
, destroot
):
2878 collisions
.append(filename
)
2881 for filename
in collisions
:
2882 del blocker_contents
[filename
]
2883 f
= atomic_ofstream(os
.path
.join(blocker
.dbdir
, "CONTENTS"))
2884 for filename
in sorted(blocker_contents
):
2885 entry_data
= blocker_contents
[filename
]
2886 entry_type
= entry_data
[0]
2887 relative_filename
= filename
[destroot_len
:]
2888 if entry_type
== "obj":
2889 entry_type
, mtime
, md5sum
= entry_data
2890 line
= "%s %s %s %s\n" % \
2891 (entry_type
, relative_filename
, md5sum
, mtime
)
2892 elif entry_type
== "sym":
2893 entry_type
, mtime
, link
= entry_data
2894 line
= "%s %s -> %s %s\n" % \
2895 (entry_type
, relative_filename
, link
, mtime
)
2896 else: # dir, dev, fif
2897 line
= "%s %s\n" % (entry_type
, relative_filename
)
2901 self
.vartree
.dbapi
._add
(self
)
2902 contents
= self
.getcontents()
2904 # regenerate reverse NEEDED map
2905 self
.vartree
.dbapi
.linkmap
.rebuild()
2908 self
.settings
["PORTAGE_UPDATE_ENV"] = \
2909 os
.path
.join(self
.dbpkgdir
, "environment.bz2")
2910 self
.settings
.backup_changes("PORTAGE_UPDATE_ENV")
2911 a
= doebuild(myebuild
, "postinst", destroot
, self
.settings
, use_cache
=0,
2912 tree
=self
.treetype
, mydbapi
=mydbapi
, vartree
=self
.vartree
)
2913 self
.settings
.pop("PORTAGE_UPDATE_ENV", None)
2915 # XXX: Decide how to handle failures here.
2917 writemsg("!!! FAILED postinst: "+str(a
)+"\n", noiselevel
=-1)
2921 for v
in otherversions
:
2922 if pkgcmp(catpkgsplit(self
.pkg
)[1:], catpkgsplit(v
)[1:]) < 0:
2925 #update environment settings, library paths. DO NOT change symlinks.
2926 env_update(makelinks
=(not downgrade
),
2927 target_root
=self
.settings
["ROOT"], prev_mtimes
=prev_mtimes
,
2928 contents
=contents
, env
=self
.settings
.environ())
2930 writemsg_stdout(">>> %s %s\n" % (self
.mycpv
,"merged."))
2933 def mergeme(self
, srcroot
, destroot
, outfile
, secondhand
, stufftomerge
, cfgfiledict
, thismtime
):
2936 This function handles actual merging of the package contents to the livefs.
2937 It also handles config protection.
2939 @param srcroot: Where are we copying files from (usually ${D})
2940 @type srcroot: String (Path)
2941 @param destroot: Typically ${ROOT}
2942 @type destroot: String (Path)
2943 @param outfile: File to log operations to
2944 @type outfile: File Object
2945 @param secondhand: A set of items to merge in pass two (usually
2946 or symlinks that point to non-existing files that may get merged later)
2947 @type secondhand: List
2948 @param stufftomerge: Either a diretory to merge, or a list of items.
2949 @type stufftomerge: String or List
2950 @param cfgfiledict: { File:mtime } mapping for config_protected files
2951 @type cfgfiledict: Dictionary
2952 @param thismtime: The current time (typically long(time.time())
2953 @type thismtime: Long
2954 @rtype: None or Boolean
2960 from os
.path
import sep
, join
2961 srcroot
= normalize_path(srcroot
).rstrip(sep
) + sep
2962 destroot
= normalize_path(destroot
).rstrip(sep
) + sep
2964 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
2965 if isinstance(stufftomerge
, basestring
):
2966 #A directory is specified. Figure out protection paths, listdir() it and process it.
2967 mergelist
= os
.listdir(join(srcroot
, stufftomerge
))
2968 offset
= stufftomerge
2970 mergelist
= stufftomerge
2973 mysrc
= join(srcroot
, offset
, x
)
2974 mydest
= join(destroot
, offset
, x
)
2975 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
2976 myrealdest
= join(sep
, offset
, x
)
2977 # stat file once, test using S_* macros many times (faster that way)
2979 mystat
= os
.lstat(mysrc
)
2982 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
2983 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
2984 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
2985 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
2986 writemsg(red("!!! File: ")+str(mysrc
)+"\n", noiselevel
=-1)
2987 writemsg(red("!!! Error: ")+str(e
)+"\n", noiselevel
=-1)
2989 except Exception, e
:
2991 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
2992 writemsg(red("!!! A stat call returned the following error for the following file:"))
2993 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
2994 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
2995 writemsg( "!!! File: "+str(mysrc
)+"\n", noiselevel
=-1)
2996 writemsg( "!!! Error: "+str(e
)+"\n", noiselevel
=-1)
3000 mymode
= mystat
[stat
.ST_MODE
]
3001 # handy variables; mydest is the target object on the live filesystems;
3002 # mysrc is the source object in the temporary install dir
3004 mydstat
= os
.lstat(mydest
)
3005 mydmode
= mydstat
.st_mode
3007 if e
.errno
!= errno
.ENOENT
:
3010 #dest file doesn't exist
3014 if stat
.S_ISLNK(mymode
):
3015 # we are merging a symbolic link
3016 myabsto
= abssymlink(mysrc
)
3017 if myabsto
.startswith(srcroot
):
3018 myabsto
= myabsto
[len(srcroot
):]
3019 myabsto
= myabsto
.lstrip(sep
)
3020 myto
= os
.readlink(mysrc
)
3021 if self
.settings
and self
.settings
["D"]:
3022 if myto
.startswith(self
.settings
["D"]):
3023 myto
= myto
[len(self
.settings
["D"]):]
3024 # myrealto contains the path of the real file to which this symlink points.
3025 # we can simply test for existence of this file to see if the target has been merged yet
3026 myrealto
= normalize_path(os
.path
.join(destroot
, myabsto
))
3029 if not stat
.S_ISLNK(mydmode
):
3030 if stat
.S_ISDIR(mydmode
):
3031 # directory in the way: we can't merge a symlink over a directory
3032 # we won't merge this, continue with next file...
3035 if os
.path
.exists(mysrc
) and stat
.S_ISDIR(os
.stat(mysrc
)[stat
.ST_MODE
]):
3036 # Kill file blocking installation of symlink to dir #71787
3038 elif self
.isprotected(mydest
):
3039 # Use md5 of the target in ${D} if it exists...
3041 newmd5
= perform_md5(join(srcroot
, myabsto
))
3042 except FileNotFound
:
3043 # Maybe the target is merged already.
3045 newmd5
= perform_md5(myrealto
)
3046 except FileNotFound
:
3048 mydest
= new_protect_filename(mydest
, newmd5
=newmd5
)
3050 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
3051 if (secondhand
!= None) and (not os
.path
.exists(myrealto
)):
3052 # either the target directory doesn't exist yet or the target file doesn't exist -- or
3053 # the target is a broken symlink. We will add this file to our "second hand" and merge
3055 secondhand
.append(mysrc
[len(srcroot
):])
3057 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
3058 mymtime
= movefile(mysrc
, mydest
, newmtime
=thismtime
, sstat
=mystat
, mysettings
=self
.settings
)
3060 writemsg_stdout(">>> %s -> %s\n" % (mydest
, myto
))
3061 outfile
.write("sym "+myrealdest
+" -> "+myto
+" "+str(mymtime
)+"\n")
3063 print "!!! Failed to move file."
3064 print "!!!", mydest
, "->", myto
3066 elif stat
.S_ISDIR(mymode
):
3067 # we are merging a directory
3069 # destination exists
3072 # Save then clear flags on dest.
3073 dflags
= mydstat
.st_flags
3075 bsd_chflags
.lchflags(mydest
, 0)
3077 if not os
.access(mydest
, os
.W_OK
):
3078 pkgstuff
= pkgsplit(self
.pkg
)
3079 writemsg("\n!!! Cannot write to '"+mydest
+"'.\n", noiselevel
=-1)
3080 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
3081 writemsg("!!! You may start the merge process again by using ebuild:\n")
3082 writemsg("!!! ebuild "+self
.settings
["PORTDIR"]+"/"+self
.cat
+"/"+pkgstuff
[0]+"/"+self
.pkg
+".ebuild merge\n")
3083 writemsg("!!! And finish by running this: env-update\n\n")
3086 if stat
.S_ISLNK(mydmode
) or stat
.S_ISDIR(mydmode
):
3087 # a symlink to an existing directory will work for us; keep it:
3088 writemsg_stdout("--- %s/\n" % mydest
)
3090 bsd_chflags
.lchflags(mydest
, dflags
)
3092 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
3093 if movefile(mydest
, mydest
+".backup", mysettings
=self
.settings
) is None:
3095 print "bak", mydest
, mydest
+".backup"
3096 #now create our directory
3097 if self
.settings
.selinux_enabled():
3099 sid
= selinux
.get_sid(mysrc
)
3100 selinux
.secure_mkdir(mydest
,sid
)
3104 bsd_chflags
.lchflags(mydest
, dflags
)
3105 os
.chmod(mydest
, mystat
[0])
3106 os
.chown(mydest
, mystat
[4], mystat
[5])
3107 writemsg_stdout(">>> %s/\n" % mydest
)
3109 #destination doesn't exist
3110 if self
.settings
.selinux_enabled():
3112 sid
= selinux
.get_sid(mysrc
)
3113 selinux
.secure_mkdir(mydest
, sid
)
3116 os
.chmod(mydest
, mystat
[0])
3117 os
.chown(mydest
, mystat
[4], mystat
[5])
3118 writemsg_stdout(">>> %s/\n" % mydest
)
3119 outfile
.write("dir "+myrealdest
+"\n")
3120 # recurse and merge this directory
3121 if self
.mergeme(srcroot
, destroot
, outfile
, secondhand
,
3122 join(offset
, x
), cfgfiledict
, thismtime
):
3124 elif stat
.S_ISREG(mymode
):
3125 # we are merging a regular file
3126 mymd5
= perform_md5(mysrc
, calc_prelink
=1)
3127 # calculate config file protection stuff
3128 mydestdir
= os
.path
.dirname(mydest
)
3133 # destination file exists
3134 if stat
.S_ISDIR(mydmode
):
3135 # install of destination is blocked by an existing directory with the same name
3137 writemsg_stdout("!!! %s\n" % mydest
)
3138 elif stat
.S_ISREG(mydmode
) or (stat
.S_ISLNK(mydmode
) and os
.path
.exists(mydest
) and stat
.S_ISREG(os
.stat(mydest
)[stat
.ST_MODE
])):
3140 # install of destination is blocked by an existing regular file,
3141 # or by a symlink to an existing regular file;
3142 # now, config file management may come into play.
3143 # we only need to tweak mydest if cfg file management is in play.
3144 if self
.isprotected(mydest
):
3145 # we have a protection path; enable config file management.
3146 destmd5
= perform_md5(mydest
, calc_prelink
=1)
3147 if mymd5
== destmd5
:
3148 #file already in place; simply update mtimes of destination
3151 if mymd5
== cfgfiledict
.get(myrealdest
, [None])[0]:
3152 """ An identical update has previously been
3153 merged. Skip it unless the user has chosen
3155 moveme
= cfgfiledict
["IGNORE"]
3156 cfgprot
= cfgfiledict
["IGNORE"]
3159 mymtime
= long(mystat
.st_mtime
)
3164 # Merging a new file, so update confmem.
3165 cfgfiledict
[myrealdest
] = [mymd5
]
3166 elif destmd5
== cfgfiledict
.get(myrealdest
, [None])[0]:
3167 """A previously remembered update has been
3168 accepted, so it is removed from confmem."""
3169 del cfgfiledict
[myrealdest
]
3171 mydest
= new_protect_filename(mydest
, newmd5
=mymd5
)
3173 # whether config protection or not, we merge the new file the
3174 # same way. Unless moveme=0 (blocking directory)
3176 mymtime
= movefile(mysrc
, mydest
, newmtime
=thismtime
, sstat
=mystat
, mysettings
=self
.settings
)
3182 outfile
.write("obj "+myrealdest
+" "+mymd5
+" "+str(mymtime
)+"\n")
3183 writemsg_stdout("%s %s\n" % (zing
,mydest
))
3185 # we are merging a fifo or device node
3188 # destination doesn't exist
3189 if movefile(mysrc
, mydest
, newmtime
=thismtime
, sstat
=mystat
, mysettings
=self
.settings
) != None:
3193 if stat
.S_ISFIFO(mymode
):
3194 outfile
.write("fif %s\n" % myrealdest
)
3196 outfile
.write("dev %s\n" % myrealdest
)
3197 writemsg_stdout(zing
+ " " + mydest
+ "\n")
3199 def merge(self
, mergeroot
, inforoot
, myroot
, myebuild
=None, cleanup
=0,
3200 mydbapi
=None, prev_mtimes
=None):
3202 If portage is reinstalling itself, create temporary
3203 copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
3204 to avoid relying on the new versions which may be
3205 incompatible. Register an atexit hook to clean up the
3206 temporary directories. Pre-load elog modules here since
3207 we won't be able to later if they get unmerged (happens
3208 when namespace changes).
3210 if self
.vartree
.dbapi
._categories
is not None:
3211 self
.vartree
.dbapi
._categories
= None
3212 if self
.myroot
== "/" and \
3213 "sys-apps" == self
.cat
and \
3214 "portage" == pkgsplit(self
.pkg
)[0]:
3215 settings
= self
.settings
3216 base_path_orig
= os
.path
.dirname(settings
["PORTAGE_BIN_PATH"])
3217 from tempfile
import mkdtemp
3219 # Make the temp directory inside PORTAGE_TMPDIR since, unlike
3220 # /tmp, it can't be mounted with the "noexec" option.
3221 base_path_tmp
= mkdtemp("", "._portage_reinstall_.",
3222 settings
["PORTAGE_TMPDIR"])
3223 from portage
.process
import atexit_register
3224 atexit_register(shutil
.rmtree
, base_path_tmp
)
3226 for subdir
in "bin", "pym":
3227 var_name
= "PORTAGE_%s_PATH" % subdir
.upper()
3228 var_orig
= settings
[var_name
]
3229 var_new
= os
.path
.join(base_path_tmp
, subdir
)
3230 settings
[var_name
] = var_new
3231 settings
.backup_changes(var_name
)
3232 shutil
.copytree(var_orig
, var_new
, symlinks
=True)
3233 os
.chmod(var_new
, dir_perms
)
3234 os
.chmod(base_path_tmp
, dir_perms
)
3235 # This serves so pre-load the modules.
3236 elog_process(self
.mycpv
, self
.settings
,
3237 phasefilter
=filter_mergephases
)
3239 return self
._merge
(mergeroot
, inforoot
,
3240 myroot
, myebuild
=myebuild
, cleanup
=cleanup
,
3241 mydbapi
=mydbapi
, prev_mtimes
=prev_mtimes
)
3243 def _merge(self
, mergeroot
, inforoot
, myroot
, myebuild
=None, cleanup
=0,
3244 mydbapi
=None, prev_mtimes
=None):
3248 retval
= self
.treewalk(mergeroot
, myroot
, inforoot
, myebuild
,
3249 cleanup
=cleanup
, mydbapi
=mydbapi
, prev_mtimes
=prev_mtimes
)
3250 # undo registrations of preserved libraries, bug #210501
3251 if retval
!= os
.EX_OK
:
3252 self
.vartree
.dbapi
.plib_registry
.unregister(self
.mycpv
, self
.settings
["SLOT"], self
.settings
["COUNTER"])
3253 # Process ebuild logfiles
3254 elog_process(self
.mycpv
, self
.settings
, phasefilter
=filter_mergephases
)
3255 if retval
== os
.EX_OK
and "noclean" not in self
.settings
.features
:
3256 if myebuild
is None:
3257 myebuild
= os
.path
.join(inforoot
, self
.pkg
+ ".ebuild")
3258 doebuild(myebuild
, "clean", myroot
, self
.settings
,
3259 tree
=self
.treetype
, mydbapi
=mydbapi
, vartree
=self
.vartree
)
3264 def getstring(self
,name
):
3265 "returns contents of a file with whitespace converted to spaces"
3266 if not os
.path
.exists(self
.dbdir
+"/"+name
):
3268 myfile
= open(self
.dbdir
+"/"+name
,"r")
3269 mydata
= myfile
.read().split()
3271 return " ".join(mydata
)
3273 def copyfile(self
,fname
):
3275 shutil
.copyfile(fname
,self
.dbdir
+"/"+os
.path
.basename(fname
))
3277 def getfile(self
,fname
):
3278 if not os
.path
.exists(self
.dbdir
+"/"+fname
):
3280 myfile
= open(self
.dbdir
+"/"+fname
,"r")
3281 mydata
= myfile
.read()
3285 def setfile(self
,fname
,data
):
3286 write_atomic(os
.path
.join(self
.dbdir
, fname
), data
)
3288 def getelements(self
,ename
):
3289 if not os
.path
.exists(self
.dbdir
+"/"+ename
):
3291 myelement
= open(self
.dbdir
+"/"+ename
,"r")
3292 mylines
= myelement
.readlines()
3295 for y
in x
[:-1].split():
3300 def setelements(self
,mylist
,ename
):
3301 myelement
= open(self
.dbdir
+"/"+ename
,"w")
3303 myelement
.write(x
+"\n")
3306 def isregular(self
):
3307 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
3308 return os
.path
.exists(os
.path
.join(self
.dbdir
, "CATEGORY"))
3310 def tar_contents(contents
, root
, tar
, protect
=None, onProgress
=None):
3311 from portage
.util
import normalize_path
3313 root
= normalize_path(root
).rstrip(os
.path
.sep
) + os
.path
.sep
3315 maxval
= len(contents
)
3318 onProgress(maxval
, 0)
3319 paths
= contents
.keys()
3324 lst
= os
.lstat(path
)
3326 if e
.errno
!= errno
.ENOENT
:
3330 onProgress(maxval
, curval
)
3332 contents_type
= contents
[path
][0]
3333 if path
.startswith(root
):
3334 arcname
= path
[len(root
):]
3336 raise ValueError("invalid root argument: '%s'" % root
)
3338 if 'dir' == contents_type
and \
3339 not stat
.S_ISDIR(lst
.st_mode
) and \
3340 os
.path
.isdir(live_path
):
3341 # Even though this was a directory in the original ${D}, it exists
3342 # as a symlink to a directory in the live filesystem. It must be
3343 # recorded as a real directory in the tar file to ensure that tar
3344 # can properly extract it's children.
3345 live_path
= os
.path
.realpath(live_path
)
3346 tarinfo
= tar
.gettarinfo(live_path
, arcname
)
3347 # store numbers instead of real names like tar's --numeric-owner
3348 tarinfo
.uname
= id_strings
.setdefault(tarinfo
.uid
, str(tarinfo
.uid
))
3349 tarinfo
.gname
= id_strings
.setdefault(tarinfo
.gid
, str(tarinfo
.gid
))
3351 if stat
.S_ISREG(lst
.st_mode
):
3352 # break hardlinks due to bug #185305
3353 tarinfo
.type = tarfile
.REGTYPE
3354 if protect
and protect(path
):
3355 # Create an empty file as a place holder in order to avoid
3356 # potential collision-protect issues.
3358 tar
.addfile(tarinfo
)
3362 tar
.addfile(tarinfo
, f
)
3366 tar
.addfile(tarinfo
)
3368 onProgress(maxval
, curval
)