Whitespace normalization.
[python/dscho.git] / Lib / dumbdbm.py
blobb85844dc611a8c86245b402f4d1a42c902082ed0
1 """A dumb and slow but simple dbm clone.
3 For database spam, spam.dir contains the index (a text file),
4 spam.bak *may* contain a backup of the index (also a text file),
5 while spam.dat contains the data (a binary file).
7 XXX TO DO:
9 - seems to contain a bug when updating...
11 - reclaim free space (currently, space once occupied by deleted or expanded
12 items is never reused)
14 - support concurrent access (currently, if two processes take turns making
15 updates, they can mess up the index)
17 - support efficient access to large databases (currently, the whole index
18 is read when the database is opened, and some updates rewrite the whole index)
20 - support opening for read-only (flag = 'm')
22 """
24 import os as _os
25 import __builtin__
26 import UserDict
28 _open = __builtin__.open
30 _BLOCKSIZE = 512
32 error = IOError # For anydbm
34 class _Database(UserDict.DictMixin):
36 # The on-disk directory and data files can remain in mutually
37 # inconsistent states for an arbitrarily long time (see comments
38 # at the end of __setitem__). This is only repaired when _commit()
39 # gets called. One place _commit() gets called is from __del__(),
40 # and if that occurs at program shutdown time, module globals may
41 # already have gotten rebound to None. Since it's crucial that
42 # _commit() finish successfully, we can't ignore shutdown races
43 # here, and _commit() must not reference any globals.
44 _os = _os # for _commit()
45 _open = _open # for _commit()
47 def __init__(self, filebasename, mode):
48 self._mode = mode
50 # The directory file is a text file. Each line looks like
51 # "%r, (%d, %d)\n" % (key, pos, siz)
52 # where key is the string key, pos is the offset into the dat
53 # file of the associated value's first byte, and siz is the number
54 # of bytes in the associated value.
55 self._dirfile = filebasename + _os.extsep + 'dir'
57 # The data file is a binary file pointed into by the directory
58 # file, and holds the values associated with keys. Each value
59 # begins at a _BLOCKSIZE-aligned byte offset, and is a raw
60 # binary 8-bit string value.
61 self._datfile = filebasename + _os.extsep + 'dat'
62 self._bakfile = filebasename + _os.extsep + 'bak'
64 # The index is an in-memory dict, mirroring the directory file.
65 self._index = None # maps keys to (pos, siz) pairs
67 # Mod by Jack: create data file if needed
68 try:
69 f = _open(self._datfile, 'r')
70 except IOError:
71 f = _open(self._datfile, 'w', self._mode)
72 f.close()
73 self._update()
75 # Read directory file into the in-memory index dict.
76 def _update(self):
77 self._index = {}
78 try:
79 f = _open(self._dirfile)
80 except IOError:
81 pass
82 else:
83 for line in f:
84 key, pos_and_siz_pair = eval(line)
85 self._index[key] = pos_and_siz_pair
86 f.close()
88 # Write the index dict to the directory file. The original directory
89 # file (if any) is renamed with a .bak extension first. If a .bak
90 # file currently exists, it's deleted.
91 def _commit(self):
92 # CAUTION: It's vital that _commit() succeed, and _commit() can
93 # be called from __del__(). Therefore we must never reference a
94 # global in this routine.
95 if self._index is None:
96 return # nothing to do
98 try:
99 self._os.unlink(self._bakfile)
100 except self._os.error:
101 pass
103 try:
104 self._os.rename(self._dirfile, self._bakfile)
105 except self._os.error:
106 pass
108 f = self._open(self._dirfile, 'w', self._mode)
109 for key, pos_and_siz_pair in self._index.iteritems():
110 f.write("%r, %r\n" % (key, pos_and_siz_pair))
111 f.close()
113 sync = _commit
115 def __getitem__(self, key):
116 pos, siz = self._index[key] # may raise KeyError
117 f = _open(self._datfile, 'rb')
118 f.seek(pos)
119 dat = f.read(siz)
120 f.close()
121 return dat
123 # Append val to the data file, starting at a _BLOCKSIZE-aligned
124 # offset. The data file is first padded with NUL bytes (if needed)
125 # to get to an aligned offset. Return pair
126 # (starting offset of val, len(val))
127 def _addval(self, val):
128 f = _open(self._datfile, 'rb+')
129 f.seek(0, 2)
130 pos = int(f.tell())
131 npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
132 f.write('\0'*(npos-pos))
133 pos = npos
134 f.write(val)
135 f.close()
136 return (pos, len(val))
138 # Write val to the data file, starting at offset pos. The caller
139 # is responsible for ensuring that there's enough room starting at
140 # pos to hold val, without overwriting some other value. Return
141 # pair (pos, len(val)).
142 def _setval(self, pos, val):
143 f = _open(self._datfile, 'rb+')
144 f.seek(pos)
145 f.write(val)
146 f.close()
147 return (pos, len(val))
149 # key is a new key whose associated value starts in the data file
150 # at offset pos and with length siz. Add an index record to
151 # the in-memory index dict, and append one to the directory file.
152 def _addkey(self, key, pos_and_siz_pair):
153 self._index[key] = pos_and_siz_pair
154 f = _open(self._dirfile, 'a', self._mode)
155 f.write("%r, %r\n" % (key, pos_and_siz_pair))
156 f.close()
158 def __setitem__(self, key, val):
159 if not type(key) == type('') == type(val):
160 raise TypeError, "keys and values must be strings"
161 if key not in self._index:
162 self._addkey(key, self._addval(val))
163 else:
164 # See whether the new value is small enough to fit in the
165 # (padded) space currently occupied by the old value.
166 pos, siz = self._index[key]
167 oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
168 newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
169 if newblocks <= oldblocks:
170 self._index[key] = self._setval(pos, val)
171 else:
172 # The new value doesn't fit in the (padded) space used
173 # by the old value. The blocks used by the old value are
174 # forever lost.
175 self._index[key] = self._addval(val)
177 # Note that _index may be out of synch with the directory
178 # file now: _setval() and _addval() don't update the directory
179 # file. This also means that the on-disk directory and data
180 # files are in a mutually inconsistent state, and they'll
181 # remain that way until _commit() is called. Note that this
182 # is a disaster (for the database) if the program crashes
183 # (so that _commit() never gets called).
185 def __delitem__(self, key):
186 # The blocks used by the associated value are lost.
187 del self._index[key]
188 # XXX It's unclear why we do a _commit() here (the code always
189 # XXX has, so I'm not changing it). _setitem__ doesn't try to
190 # XXX keep the directory file in synch. Why should we? Or
191 # XXX why shouldn't __setitem__?
192 self._commit()
194 def keys(self):
195 return self._index.keys()
197 def has_key(self, key):
198 return key in self._index
200 def __contains__(self, key):
201 return key in self._index
203 def iterkeys(self):
204 return self._index.iterkeys()
205 __iter__ = iterkeys
207 def __len__(self):
208 return len(self._index)
210 def close(self):
211 self._commit()
212 self._index = self._datfile = self._dirfile = self._bakfile = None
214 __del__ = close
218 def open(file, flag=None, mode=0666):
219 """Open the database file, filename, and return corresponding object.
221 The flag argument, used to control how the database is opened in the
222 other DBM implementations, is ignored in the dumbdbm module; the
223 database is always opened for update, and will be created if it does
224 not exist.
226 The optional mode argument is the UNIX mode of the file, used only when
227 the database has to be created. It defaults to octal code 0666 (and
228 will be modified by the prevailing umask).
231 # flag argument is currently ignored
232 return _Database(file, mode)