On Tue, Nov 06, 2007 at 02:33:53AM -0800, akpm@linux-foundation.org wrote:
[mmotm.git] / fs / reiser4 / blocknrset.c
blobbf57c174ba7387324c06a812031170e0109da72b
1 /* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by
2 reiser4/README */
4 /* This file contains code for various block number sets used by the atom to
5 track the deleted set and wandered block mappings. */
7 #include "debug.h"
8 #include "dformat.h"
9 #include "txnmgr.h"
10 #include "context.h"
12 #include <linux/slab.h>
14 /* The proposed data structure for storing unordered block number sets is a
15 list of elements, each of which contains an array of block number or/and
16 array of block number pairs. That element called blocknr_set_entry is used
17 to store block numbers from the beginning and for extents from the end of
18 the data field (char data[...]). The ->nr_blocks and ->nr_pairs fields
19 count numbers of blocks and extents.
21 +------------------- blocknr_set_entry->data ------------------+
22 |block1|block2| ... <free space> ... |pair3|pair2|pair1|
23 +------------------------------------------------------------+
25 When current blocknr_set_entry is full, allocate a new one. */
27 /* Usage examples: blocknr sets are used in reiser4 for storing atom's delete
28 * set (single blocks and block extents), in that case blocknr pair represent an
29 * extent; atom's wandered map is also stored as a blocknr set, blocknr pairs
30 * there represent a (real block) -> (wandered block) mapping. */
32 /* Protection: blocknr sets belong to reiser4 atom, and
33 * their modifications are performed with the atom lock held */
35 /* The total size of a blocknr_set_entry. */
36 #define BLOCKNR_SET_ENTRY_SIZE 128
38 /* The number of blocks that can fit the blocknr data area. */
39 #define BLOCKNR_SET_ENTRIES_NUMBER \
40 ((BLOCKNR_SET_ENTRY_SIZE - \
41 2 * sizeof(unsigned) - \
42 sizeof(struct list_head)) / \
43 sizeof(reiser4_block_nr))
45 /* An entry of the blocknr_set */
46 struct blocknr_set_entry {
47 unsigned nr_singles;
48 unsigned nr_pairs;
49 struct list_head link;
50 reiser4_block_nr entries[BLOCKNR_SET_ENTRIES_NUMBER];
53 /* A pair of blocks as recorded in the blocknr_set_entry data. */
54 struct blocknr_pair {
55 reiser4_block_nr a;
56 reiser4_block_nr b;
59 /* Return the number of blocknr slots available in a blocknr_set_entry. */
60 /* Audited by: green(2002.06.11) */
61 static unsigned bse_avail(blocknr_set_entry * bse)
63 unsigned used = bse->nr_singles + 2 * bse->nr_pairs;
65 assert("jmacd-5088", BLOCKNR_SET_ENTRIES_NUMBER >= used);
66 cassert(sizeof(blocknr_set_entry) == BLOCKNR_SET_ENTRY_SIZE);
68 return BLOCKNR_SET_ENTRIES_NUMBER - used;
71 /* Initialize a blocknr_set_entry. */
72 static void bse_init(blocknr_set_entry *bse)
74 bse->nr_singles = 0;
75 bse->nr_pairs = 0;
76 INIT_LIST_HEAD(&bse->link);
79 /* Allocate and initialize a blocknr_set_entry. */
80 /* Audited by: green(2002.06.11) */
81 static blocknr_set_entry *bse_alloc(void)
83 blocknr_set_entry *e;
85 if ((e = (blocknr_set_entry *) kmalloc(sizeof(blocknr_set_entry),
86 reiser4_ctx_gfp_mask_get())) == NULL)
87 return NULL;
89 bse_init(e);
91 return e;
94 /* Free a blocknr_set_entry. */
95 /* Audited by: green(2002.06.11) */
96 static void bse_free(blocknr_set_entry * bse)
98 kfree(bse);
101 /* Add a block number to a blocknr_set_entry */
102 /* Audited by: green(2002.06.11) */
103 static void
104 bse_put_single(blocknr_set_entry * bse, const reiser4_block_nr * block)
106 assert("jmacd-5099", bse_avail(bse) >= 1);
108 bse->entries[bse->nr_singles++] = *block;
111 /* Get a pair of block numbers */
112 /* Audited by: green(2002.06.11) */
113 static inline struct blocknr_pair *bse_get_pair(blocknr_set_entry * bse,
114 unsigned pno)
116 assert("green-1", BLOCKNR_SET_ENTRIES_NUMBER >= 2 * (pno + 1));
118 return (struct blocknr_pair *) (bse->entries +
119 BLOCKNR_SET_ENTRIES_NUMBER -
120 2 * (pno + 1));
123 /* Add a pair of block numbers to a blocknr_set_entry */
124 /* Audited by: green(2002.06.11) */
125 static void
126 bse_put_pair(blocknr_set_entry * bse, const reiser4_block_nr * a,
127 const reiser4_block_nr * b)
129 struct blocknr_pair *pair;
131 assert("jmacd-5100", bse_avail(bse) >= 2 && a != NULL && b != NULL);
133 pair = bse_get_pair(bse, bse->nr_pairs++);
135 pair->a = *a;
136 pair->b = *b;
139 /* Add either a block or pair of blocks to the block number set. The first
140 blocknr (@a) must be non-NULL. If @b is NULL a single blocknr is added, if
141 @b is non-NULL a pair is added. The block number set belongs to atom, and
142 the call is made with the atom lock held. There may not be enough space in
143 the current blocknr_set_entry. If new_bsep points to a non-NULL
144 blocknr_set_entry then it will be added to the blocknr_set and new_bsep
145 will be set to NULL. If new_bsep contains NULL then the atom lock will be
146 released and a new bse will be allocated in new_bsep. E_REPEAT will be
147 returned with the atom unlocked for the operation to be tried again. If
148 the operation succeeds, 0 is returned. If new_bsep is non-NULL and not
149 used during the call, it will be freed automatically. */
150 static int blocknr_set_add(txn_atom *atom, struct list_head *bset,
151 blocknr_set_entry **new_bsep, const reiser4_block_nr *a,
152 const reiser4_block_nr *b)
154 blocknr_set_entry *bse;
155 unsigned entries_needed;
157 assert("jmacd-5101", a != NULL);
159 entries_needed = (b == NULL) ? 1 : 2;
160 if (list_empty(bset) ||
161 bse_avail(list_entry(bset->next, blocknr_set_entry, link)) < entries_needed) {
162 /* See if a bse was previously allocated. */
163 if (*new_bsep == NULL) {
164 spin_unlock_atom(atom);
165 *new_bsep = bse_alloc();
166 return (*new_bsep != NULL) ? -E_REPEAT :
167 RETERR(-ENOMEM);
170 /* Put it on the head of the list. */
171 list_add(&((*new_bsep)->link), bset);
173 *new_bsep = NULL;
176 /* Add the single or pair. */
177 bse = list_entry(bset->next, blocknr_set_entry, link);
178 if (b == NULL) {
179 bse_put_single(bse, a);
180 } else {
181 bse_put_pair(bse, a, b);
184 /* If new_bsep is non-NULL then there was an allocation race, free this
185 copy. */
186 if (*new_bsep != NULL) {
187 bse_free(*new_bsep);
188 *new_bsep = NULL;
191 return 0;
194 /* Add an extent to the block set. If the length is 1, it is treated as a
195 single block (e.g., reiser4_set_add_block). */
196 /* Audited by: green(2002.06.11) */
197 /* Auditor note: Entire call chain cannot hold any spinlocks, because
198 kmalloc might schedule. The only exception is atom spinlock, which is
199 properly freed. */
201 blocknr_set_add_extent(txn_atom * atom,
202 struct list_head *bset,
203 blocknr_set_entry ** new_bsep,
204 const reiser4_block_nr * start,
205 const reiser4_block_nr * len)
207 assert("jmacd-5102", start != NULL && len != NULL && *len > 0);
208 return blocknr_set_add(atom, bset, new_bsep, start,
209 *len == 1 ? NULL : len);
212 /* Add a block pair to the block set. It adds exactly a pair, which is checked
213 * by an assertion that both arguments are not null.*/
214 /* Audited by: green(2002.06.11) */
215 /* Auditor note: Entire call chain cannot hold any spinlocks, because
216 kmalloc might schedule. The only exception is atom spinlock, which is
217 properly freed. */
219 blocknr_set_add_pair(txn_atom * atom,
220 struct list_head *bset,
221 blocknr_set_entry ** new_bsep, const reiser4_block_nr * a,
222 const reiser4_block_nr * b)
224 assert("jmacd-5103", a != NULL && b != NULL);
225 return blocknr_set_add(atom, bset, new_bsep, a, b);
228 /* Initialize a blocknr_set. */
229 void blocknr_set_init(struct list_head *bset)
231 INIT_LIST_HEAD(bset);
234 /* Release the entries of a blocknr_set. */
235 void blocknr_set_destroy(struct list_head *bset)
237 blocknr_set_entry *bse;
239 while (!list_empty(bset)) {
240 bse = list_entry(bset->next, blocknr_set_entry, link);
241 list_del_init(&bse->link);
242 bse_free(bse);
246 /* Merge blocknr_set entries out of @from into @into. */
247 /* Audited by: green(2002.06.11) */
248 /* Auditor comments: This merge does not know if merged sets contain
249 blocks pairs (As for wandered sets) or extents, so it cannot really merge
250 overlapping ranges if there is some. So I believe it may lead to
251 some blocks being presented several times in one blocknr_set. To help
252 debugging such problems it might help to check for duplicate entries on
253 actual processing of this set. Testing this kind of stuff right here is
254 also complicated by the fact that these sets are not sorted and going
255 through whole set on each element addition is going to be CPU-heavy task */
256 void blocknr_set_merge(struct list_head *from, struct list_head *into)
258 blocknr_set_entry *bse_into = NULL;
260 /* If @from is empty, no work to perform. */
261 if (list_empty(from))
262 return;
263 /* If @into is not empty, try merging partial-entries. */
264 if (!list_empty(into)) {
266 /* Neither set is empty, pop the front to members and try to
267 combine them. */
268 blocknr_set_entry *bse_from;
269 unsigned into_avail;
271 bse_into = list_entry(into->next, blocknr_set_entry, link);
272 list_del_init(&bse_into->link);
273 bse_from = list_entry(from->next, blocknr_set_entry, link);
274 list_del_init(&bse_from->link);
276 /* Combine singles. */
277 for (into_avail = bse_avail(bse_into);
278 into_avail != 0 && bse_from->nr_singles != 0;
279 into_avail -= 1) {
280 bse_put_single(bse_into,
281 &bse_from->entries[--bse_from->
282 nr_singles]);
285 /* Combine pairs. */
286 for (; into_avail > 1 && bse_from->nr_pairs != 0;
287 into_avail -= 2) {
288 struct blocknr_pair *pair =
289 bse_get_pair(bse_from, --bse_from->nr_pairs);
290 bse_put_pair(bse_into, &pair->a, &pair->b);
293 /* If bse_from is empty, delete it now. */
294 if (bse_avail(bse_from) == BLOCKNR_SET_ENTRIES_NUMBER) {
295 bse_free(bse_from);
296 } else {
297 /* Otherwise, bse_into is full or nearly full (e.g.,
298 it could have one slot avail and bse_from has one
299 pair left). Push it back onto the list. bse_from
300 becomes bse_into, which will be the new partial. */
301 list_add(&bse_into->link, into);
302 bse_into = bse_from;
306 /* Splice lists together. */
307 list_splice_init(from, into->prev);
309 /* Add the partial entry back to the head of the list. */
310 if (bse_into != NULL)
311 list_add(&bse_into->link, into);
314 /* Iterate over all blocknr set elements. */
315 int blocknr_set_iterator(txn_atom *atom, struct list_head *bset,
316 blocknr_set_actor_f actor, void *data, int delete)
319 blocknr_set_entry *entry;
321 assert("zam-429", atom != NULL);
322 assert("zam-430", atom_is_protected(atom));
323 assert("zam-431", bset != 0);
324 assert("zam-432", actor != NULL);
326 entry = list_entry(bset->next, blocknr_set_entry, link);
327 while (bset != &entry->link) {
328 blocknr_set_entry *tmp = list_entry(entry->link.next, blocknr_set_entry, link);
329 unsigned int i;
330 int ret;
332 for (i = 0; i < entry->nr_singles; i++) {
333 ret = actor(atom, &entry->entries[i], NULL, data);
335 /* We can't break a loop if delete flag is set. */
336 if (ret != 0 && !delete)
337 return ret;
340 for (i = 0; i < entry->nr_pairs; i++) {
341 struct blocknr_pair *ab;
343 ab = bse_get_pair(entry, i);
345 ret = actor(atom, &ab->a, &ab->b, data);
347 if (ret != 0 && !delete)
348 return ret;
351 if (delete) {
352 list_del(&entry->link);
353 bse_free(entry);
356 entry = tmp;
359 return 0;
363 * Local variables:
364 * c-indentation-style: "K&R"
365 * mode-name: "LC"
366 * c-basic-offset: 8
367 * tab-width: 8
368 * fill-column: 79
369 * scroll-step: 1
370 * End: