1 """Manage shelves of pickled objects.
3 A "shelf" is a persistent, dictionary-like object. The difference
4 with dbm databases is that the values (not the keys!) in a shelf can
5 be essentially arbitrary Python objects -- anything that the "pickle"
6 module can handle. This includes most class instances, recursive data
7 types, and objects containing lots of shared sub-objects. The keys
10 To summarize the interface (key is a string, data is an arbitrary
14 d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
16 d[key] = data # store data at key (overwrites old data if
17 # using an existing key)
18 data = d[key] # retrieve a COPY of the data at key (raise
19 # KeyError if no such key) -- NOTE that this
20 # access returns a *copy* of the entry!
21 del d[key] # delete data stored at key (raises KeyError
23 flag = d.has_key(key) # true if the key exists; same as "key in d"
24 list = d.keys() # a list of all existing keys (slow!)
28 Dependent on the implementation, closing a persistent dictionary may
29 or may not be necessary to flush changes to disk.
31 Normally, d[key] returns a COPY of the entry. This needs care when
32 mutable entries are mutated: for example, if d[key] is a list,
34 does NOT modify the entry d[key] itself, as stored in the persistent
35 mapping -- it only modifies the copy, which is then immediately
36 discarded, so that the append has NO effect whatsoever. To append an
37 item to d[key] in a way that will affect the persistent mapping, use:
42 To avoid the problem with mutable entries, you may pass the keyword
43 argument writeback=True in the call to shelve.open. When you use:
44 d = shelve.open(filename, writeback=True)
45 then d keeps a cache of all entries you access, and writes them all back
46 to the persistent mapping when you call d.close(). This ensures that
47 such usage as d[key].append(anitem) works as intended.
49 However, using keyword argument writeback=True may consume vast amount
50 of memory for the cache, and it may make d.close() very slow, if you
51 access many of d's entries after opening it in this way: d has no way to
52 check which of the entries you access are mutable and/or which ones you
53 actually mutate, so it must cache, and write back at close, all of the
54 entries that you access. You can call d.sync() to write back all the
55 entries in the cache, and empty the cache (d.sync() also synchronizes
56 the persistent dictionary on disk, if feasible).
59 # Try using cPickle and cStringIO if available.
62 from cPickle
import Pickler
, Unpickler
64 from pickle
import Pickler
, Unpickler
67 from cStringIO
import StringIO
69 from StringIO
import StringIO
74 __all__
= ["Shelf","BsdDbShelf","DbfilenameShelf","open"]
76 class Shelf(UserDict
.DictMixin
):
77 """Base class for shelf implementations.
79 This is initialized with a dictionary-like object.
80 See the module's __doc__ string for an overview of the interface.
83 def __init__(self
, dict, protocol
=None, writeback
=False):
87 self
._protocol
= protocol
88 self
.writeback
= writeback
92 return self
.dict.keys()
97 def has_key(self
, key
):
98 return self
.dict.has_key(key
)
100 def __contains__(self
, key
):
101 return self
.dict.has_key(key
)
103 def get(self
, key
, default
=None):
104 if self
.dict.has_key(key
):
108 def __getitem__(self
, key
):
110 value
= self
.cache
[key
]
112 f
= StringIO(self
.dict[key
])
113 value
= Unpickler(f
).load()
115 self
.cache
[key
] = value
118 def __setitem__(self
, key
, value
):
120 self
.cache
[key
] = value
122 p
= Pickler(f
, self
._protocol
)
124 self
.dict[key
] = f
.getvalue()
126 def __delitem__(self
, key
):
137 except AttributeError:
142 if not hasattr(self
, 'writeback'):
143 # __init__ didn't succeed, so don't bother closing
148 if self
.writeback
and self
.cache
:
149 self
.writeback
= False
150 for key
, entry
in self
.cache
.iteritems():
152 self
.writeback
= True
154 if hasattr(self
.dict, 'sync'):
158 class BsdDbShelf(Shelf
):
159 """Shelf implementation using the "BSD" db interface.
161 This adds methods first(), next(), previous(), last() and
162 set_location() that have no counterpart in [g]dbm databases.
164 The actual database must be opened using one of the "bsddb"
165 modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
166 bsddb.rnopen) and passed to the constructor.
168 See the module's __doc__ string for an overview of the interface.
171 def __init__(self
, dict, protocol
=None, writeback
=False):
172 Shelf
.__init
__(self
, dict, protocol
, writeback
)
174 def set_location(self
, key
):
175 (key
, value
) = self
.dict.set_location(key
)
177 return (key
, Unpickler(f
).load())
180 (key
, value
) = self
.dict.next()
182 return (key
, Unpickler(f
).load())
185 (key
, value
) = self
.dict.previous()
187 return (key
, Unpickler(f
).load())
190 (key
, value
) = self
.dict.first()
192 return (key
, Unpickler(f
).load())
195 (key
, value
) = self
.dict.last()
197 return (key
, Unpickler(f
).load())
200 class DbfilenameShelf(Shelf
):
201 """Shelf implementation using the "anydbm" generic dbm interface.
203 This is initialized with the filename for the dbm database.
204 See the module's __doc__ string for an overview of the interface.
207 def __init__(self
, filename
, flag
='c', protocol
=None, writeback
=False):
209 Shelf
.__init
__(self
, anydbm
.open(filename
, flag
), protocol
, writeback
)
212 def open(filename
, flag
='c', protocol
=None, writeback
=False):
213 """Open a persistent dictionary for reading and writing.
215 The filename parameter is the base filename for the underlying
216 database. As a side-effect, an extension may be added to the
217 filename and more than one file may be created. The optional flag
218 parameter has the same interpretation as the flag parameter of
219 anydbm.open(). The optional protocol parameter specifies the
220 version of the pickle protocol (0, 1, or 2).
222 See the module's __doc__ string for an overview of the interface.
225 return DbfilenameShelf(filename
, flag
, protocol
, writeback
)