1 /* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */
3 /* This file contains code for various block number sets used by the atom to
4 track the deleted set and wandered block mappings. */
11 #include <linux/slab.h>
13 /* The proposed data structure for storing unordered block number sets is a
14 list of elements, each of which contains an array of block number or/and
15 array of block number pairs. That element called blocknr_set_entry is used
16 to store block numbers from the beginning and for extents from the end of
17 the data field (char data[...]). The ->nr_blocks and ->nr_pairs fields
18 count numbers of blocks and extents.
20 +------------------- blocknr_set_entry->data ------------------+
21 |block1|block2| ... <free space> ... |pair3|pair2|pair1|
22 +------------------------------------------------------------+
24 When current blocknr_set_entry is full, allocate a new one. */
26 /* Usage examples: blocknr sets are used in reiser4 for storing atom's delete
27 * set (single blocks and block extents), in that case blocknr pair represent an
28 * extent; atom's wandered map is also stored as a blocknr set, blocknr pairs
29 * there represent a (real block) -> (wandered block) mapping. */
31 /* Protection: blocknr sets belong to reiser4 atom, and
32 * their modifications are performed with the atom lock held */
34 /* The total size of a blocknr_set_entry. */
35 #define BLOCKNR_SET_ENTRY_SIZE 128
37 /* The number of blocks that can fit the blocknr data area. */
38 #define BLOCKNR_SET_ENTRIES_NUMBER \
39 ((BLOCKNR_SET_ENTRY_SIZE - \
40 2 * sizeof (unsigned) - \
41 sizeof(struct list_head)) / \
42 sizeof(reiser4_block_nr))
44 /* An entry of the blocknr_set */
45 struct blocknr_set_entry
{
48 struct list_head link
;
49 reiser4_block_nr entries
[BLOCKNR_SET_ENTRIES_NUMBER
];
52 /* A pair of blocks as recorded in the blocknr_set_entry data. */
58 /* Return the number of blocknr slots available in a blocknr_set_entry. */
59 /* Audited by: green(2002.06.11) */
60 static unsigned bse_avail(blocknr_set_entry
* bse
)
62 unsigned used
= bse
->nr_singles
+ 2 * bse
->nr_pairs
;
64 assert("jmacd-5088", BLOCKNR_SET_ENTRIES_NUMBER
>= used
);
65 cassert(sizeof(blocknr_set_entry
) == BLOCKNR_SET_ENTRY_SIZE
);
67 return BLOCKNR_SET_ENTRIES_NUMBER
- used
;
70 /* Initialize a blocknr_set_entry. */
71 static void bse_init(blocknr_set_entry
*bse
)
75 INIT_LIST_HEAD(&bse
->link
);
78 /* Allocate and initialize a blocknr_set_entry. */
79 /* Audited by: green(2002.06.11) */
80 static blocknr_set_entry
*bse_alloc(void)
84 if ((e
= (blocknr_set_entry
*) kmalloc(sizeof(blocknr_set_entry
),
85 reiser4_ctx_gfp_mask_get())) == NULL
)
93 /* Free a blocknr_set_entry. */
94 /* Audited by: green(2002.06.11) */
95 static void bse_free(blocknr_set_entry
* bse
)
100 /* Add a block number to a blocknr_set_entry */
101 /* Audited by: green(2002.06.11) */
103 bse_put_single(blocknr_set_entry
* bse
, const reiser4_block_nr
* block
)
105 assert("jmacd-5099", bse_avail(bse
) >= 1);
107 bse
->entries
[bse
->nr_singles
++] = *block
;
110 /* Get a pair of block numbers */
111 /* Audited by: green(2002.06.11) */
112 static inline struct blocknr_pair
*bse_get_pair(blocknr_set_entry
* bse
,
115 assert("green-1", BLOCKNR_SET_ENTRIES_NUMBER
>= 2 * (pno
+ 1));
117 return (struct blocknr_pair
*) (bse
->entries
+
118 BLOCKNR_SET_ENTRIES_NUMBER
-
122 /* Add a pair of block numbers to a blocknr_set_entry */
123 /* Audited by: green(2002.06.11) */
125 bse_put_pair(blocknr_set_entry
* bse
, const reiser4_block_nr
* a
,
126 const reiser4_block_nr
* b
)
128 struct blocknr_pair
*pair
;
130 assert("jmacd-5100", bse_avail(bse
) >= 2 && a
!= NULL
&& b
!= NULL
);
132 pair
= bse_get_pair(bse
, bse
->nr_pairs
++);
138 /* Add either a block or pair of blocks to the block number set. The first
139 blocknr (@a) must be non-NULL. If @b is NULL a single blocknr is added, if
140 @b is non-NULL a pair is added. The block number set belongs to atom, and
141 the call is made with the atom lock held. There may not be enough space in
142 the current blocknr_set_entry. If new_bsep points to a non-NULL
143 blocknr_set_entry then it will be added to the blocknr_set and new_bsep
144 will be set to NULL. If new_bsep contains NULL then the atom lock will be
145 released and a new bse will be allocated in new_bsep. E_REPEAT will be
146 returned with the atom unlocked for the operation to be tried again. If
147 the operation succeeds, 0 is returned. If new_bsep is non-NULL and not
148 used during the call, it will be freed automatically. */
149 static int blocknr_set_add(txn_atom
*atom
, struct list_head
*bset
,
150 blocknr_set_entry
**new_bsep
, const reiser4_block_nr
*a
,
151 const reiser4_block_nr
*b
)
153 blocknr_set_entry
*bse
;
154 unsigned entries_needed
;
156 assert("jmacd-5101", a
!= NULL
);
158 entries_needed
= (b
== NULL
) ? 1 : 2;
159 if (list_empty(bset
) ||
160 bse_avail(list_entry(bset
->next
, blocknr_set_entry
, link
)) < entries_needed
) {
161 /* See if a bse was previously allocated. */
162 if (*new_bsep
== NULL
) {
163 spin_unlock_atom(atom
);
164 *new_bsep
= bse_alloc();
165 return (*new_bsep
!= NULL
) ? -E_REPEAT
:
169 /* Put it on the head of the list. */
170 list_add(&((*new_bsep
)->link
), bset
);
175 /* Add the single or pair. */
176 bse
= list_entry(bset
->next
, blocknr_set_entry
, link
);
178 bse_put_single(bse
, a
);
180 bse_put_pair(bse
, a
, b
);
183 /* If new_bsep is non-NULL then there was an allocation race, free this copy. */
184 if (*new_bsep
!= NULL
) {
192 /* Add an extent to the block set. If the length is 1, it is treated as a
193 single block (e.g., reiser4_set_add_block). */
194 /* Audited by: green(2002.06.11) */
195 /* Auditor note: Entire call chain cannot hold any spinlocks, because
196 kmalloc might schedule. The only exception is atom spinlock, which is
199 blocknr_set_add_extent(txn_atom
* atom
,
200 struct list_head
* bset
,
201 blocknr_set_entry
** new_bsep
,
202 const reiser4_block_nr
* start
,
203 const reiser4_block_nr
* len
)
205 assert("jmacd-5102", start
!= NULL
&& len
!= NULL
&& *len
> 0);
206 return blocknr_set_add(atom
, bset
, new_bsep
, start
,
207 *len
== 1 ? NULL
: len
);
210 /* Add a block pair to the block set. It adds exactly a pair, which is checked
211 * by an assertion that both arguments are not null.*/
212 /* Audited by: green(2002.06.11) */
213 /* Auditor note: Entire call chain cannot hold any spinlocks, because
214 kmalloc might schedule. The only exception is atom spinlock, which is
217 blocknr_set_add_pair(txn_atom
* atom
,
218 struct list_head
* bset
,
219 blocknr_set_entry
** new_bsep
, const reiser4_block_nr
* a
,
220 const reiser4_block_nr
* b
)
222 assert("jmacd-5103", a
!= NULL
&& b
!= NULL
);
223 return blocknr_set_add(atom
, bset
, new_bsep
, a
, b
);
226 /* Initialize a blocknr_set. */
227 void blocknr_set_init(struct list_head
*bset
)
229 INIT_LIST_HEAD(bset
);
232 /* Release the entries of a blocknr_set. */
233 void blocknr_set_destroy(struct list_head
*bset
)
235 blocknr_set_entry
*bse
;
237 while (!list_empty(bset
)) {
238 bse
= list_entry(bset
->next
, blocknr_set_entry
, link
);
239 list_del_init(&bse
->link
);
244 /* Merge blocknr_set entries out of @from into @into. */
245 /* Audited by: green(2002.06.11) */
246 /* Auditor comments: This merge does not know if merged sets contain
247 blocks pairs (As for wandered sets) or extents, so it cannot really merge
248 overlapping ranges if there is some. So I believe it may lead to
249 some blocks being presented several times in one blocknr_set. To help
250 debugging such problems it might help to check for duplicate entries on
251 actual processing of this set. Testing this kind of stuff right here is
252 also complicated by the fact that these sets are not sorted and going
253 through whole set on each element addition is going to be CPU-heavy task */
254 void blocknr_set_merge(struct list_head
* from
, struct list_head
* into
)
256 blocknr_set_entry
*bse_into
= NULL
;
258 /* If @from is empty, no work to perform. */
259 if (list_empty(from
))
261 /* If @into is not empty, try merging partial-entries. */
262 if (!list_empty(into
)) {
264 /* Neither set is empty, pop the front to members and try to combine them. */
265 blocknr_set_entry
*bse_from
;
268 bse_into
= list_entry(into
->next
, blocknr_set_entry
, link
);
269 list_del_init(&bse_into
->link
);
270 bse_from
= list_entry(from
->next
, blocknr_set_entry
, link
);
271 list_del_init(&bse_from
->link
);
273 /* Combine singles. */
274 for (into_avail
= bse_avail(bse_into
);
275 into_avail
!= 0 && bse_from
->nr_singles
!= 0;
277 bse_put_single(bse_into
,
278 &bse_from
->entries
[--bse_from
->
283 for (; into_avail
> 1 && bse_from
->nr_pairs
!= 0;
285 struct blocknr_pair
*pair
=
286 bse_get_pair(bse_from
, --bse_from
->nr_pairs
);
287 bse_put_pair(bse_into
, &pair
->a
, &pair
->b
);
290 /* If bse_from is empty, delete it now. */
291 if (bse_avail(bse_from
) == BLOCKNR_SET_ENTRIES_NUMBER
) {
294 /* Otherwise, bse_into is full or nearly full (e.g.,
295 it could have one slot avail and bse_from has one
296 pair left). Push it back onto the list. bse_from
297 becomes bse_into, which will be the new partial. */
298 list_add(&bse_into
->link
, into
);
303 /* Splice lists together. */
304 list_splice_init(from
, into
->prev
);
306 /* Add the partial entry back to the head of the list. */
307 if (bse_into
!= NULL
)
308 list_add(&bse_into
->link
, into
);
311 /* Iterate over all blocknr set elements. */
312 int blocknr_set_iterator(txn_atom
*atom
, struct list_head
*bset
,
313 blocknr_set_actor_f actor
, void *data
, int delete)
316 blocknr_set_entry
*entry
;
318 assert("zam-429", atom
!= NULL
);
319 assert("zam-430", atom_is_protected(atom
));
320 assert("zam-431", bset
!= 0);
321 assert("zam-432", actor
!= NULL
);
323 entry
= list_entry(bset
->next
, blocknr_set_entry
, link
);
324 while (bset
!= &entry
->link
) {
325 blocknr_set_entry
*tmp
= list_entry(entry
->link
.next
, blocknr_set_entry
, link
);
329 for (i
= 0; i
< entry
->nr_singles
; i
++) {
330 ret
= actor(atom
, &entry
->entries
[i
], NULL
, data
);
332 /* We can't break a loop if delete flag is set. */
333 if (ret
!= 0 && !delete)
337 for (i
= 0; i
< entry
->nr_pairs
; i
++) {
338 struct blocknr_pair
*ab
;
340 ab
= bse_get_pair(entry
, i
);
342 ret
= actor(atom
, &ab
->a
, &ab
->b
, data
);
344 if (ret
!= 0 && !delete)
349 list_del(&entry
->link
);
361 * c-indentation-style: "K&R"