HID: lenovo-tpkbd: validate output report details
[linux/fpc-iii.git] / fs / reiserfs / bitmap.c
blobdc9a6829f7c673ffa127115f7bce3cf355638264
1 /*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */
4 /* Reiserfs block (de)allocator, bitmap-based. */
6 #include <linux/time.h>
7 #include "reiserfs.h"
8 #include <linux/errno.h>
9 #include <linux/buffer_head.h>
10 #include <linux/kernel.h>
11 #include <linux/pagemap.h>
12 #include <linux/vmalloc.h>
13 #include <linux/quotaops.h>
14 #include <linux/seq_file.h>
16 #define PREALLOCATION_SIZE 9
18 /* different reiserfs block allocator options */
20 #define SB_ALLOC_OPTS(s) (REISERFS_SB(s)->s_alloc_options.bits)
22 #define _ALLOC_concentrating_formatted_nodes 0
23 #define _ALLOC_displacing_large_files 1
24 #define _ALLOC_displacing_new_packing_localities 2
25 #define _ALLOC_old_hashed_relocation 3
26 #define _ALLOC_new_hashed_relocation 4
27 #define _ALLOC_skip_busy 5
28 #define _ALLOC_displace_based_on_dirid 6
29 #define _ALLOC_hashed_formatted_nodes 7
30 #define _ALLOC_old_way 8
31 #define _ALLOC_hundredth_slices 9
32 #define _ALLOC_dirid_groups 10
33 #define _ALLOC_oid_groups 11
34 #define _ALLOC_packing_groups 12
36 #define concentrating_formatted_nodes(s) test_bit(_ALLOC_concentrating_formatted_nodes, &SB_ALLOC_OPTS(s))
37 #define displacing_large_files(s) test_bit(_ALLOC_displacing_large_files, &SB_ALLOC_OPTS(s))
38 #define displacing_new_packing_localities(s) test_bit(_ALLOC_displacing_new_packing_localities, &SB_ALLOC_OPTS(s))
40 #define SET_OPTION(optname) \
41 do { \
42 reiserfs_info(s, "block allocator option \"%s\" is set", #optname); \
43 set_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)); \
44 } while(0)
45 #define TEST_OPTION(optname, s) \
46 test_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s))
48 static inline void get_bit_address(struct super_block *s,
49 b_blocknr_t block,
50 unsigned int *bmap_nr,
51 unsigned int *offset)
53 /* It is in the bitmap block number equal to the block
54 * number divided by the number of bits in a block. */
55 *bmap_nr = block >> (s->s_blocksize_bits + 3);
56 /* Within that bitmap block it is located at bit offset *offset. */
57 *offset = block & ((s->s_blocksize << 3) - 1);
60 int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value)
62 unsigned int bmap, offset;
63 unsigned int bmap_count = reiserfs_bmap_count(s);
65 if (block == 0 || block >= SB_BLOCK_COUNT(s)) {
66 reiserfs_error(s, "vs-4010",
67 "block number is out of range %lu (%u)",
68 block, SB_BLOCK_COUNT(s));
69 return 0;
72 get_bit_address(s, block, &bmap, &offset);
74 /* Old format filesystem? Unlikely, but the bitmaps are all up front so
75 * we need to account for it. */
76 if (unlikely(test_bit(REISERFS_OLD_FORMAT,
77 &(REISERFS_SB(s)->s_properties)))) {
78 b_blocknr_t bmap1 = REISERFS_SB(s)->s_sbh->b_blocknr + 1;
79 if (block >= bmap1 &&
80 block <= bmap1 + bmap_count) {
81 reiserfs_error(s, "vs-4019", "bitmap block %lu(%u) "
82 "can't be freed or reused",
83 block, bmap_count);
84 return 0;
86 } else {
87 if (offset == 0) {
88 reiserfs_error(s, "vs-4020", "bitmap block %lu(%u) "
89 "can't be freed or reused",
90 block, bmap_count);
91 return 0;
95 if (bmap >= bmap_count) {
96 reiserfs_error(s, "vs-4030", "bitmap for requested block "
97 "is out of range: block=%lu, bitmap_nr=%u",
98 block, bmap);
99 return 0;
102 if (bit_value == 0 && block == SB_ROOT_BLOCK(s)) {
103 reiserfs_error(s, "vs-4050", "this is root block (%u), "
104 "it must be busy", SB_ROOT_BLOCK(s));
105 return 0;
108 return 1;
111 /* searches in journal structures for a given block number (bmap, off). If block
112 is found in reiserfs journal it suggests next free block candidate to test. */
113 static inline int is_block_in_journal(struct super_block *s, unsigned int bmap,
114 int off, int *next)
116 b_blocknr_t tmp;
118 if (reiserfs_in_journal(s, bmap, off, 1, &tmp)) {
119 if (tmp) { /* hint supplied */
120 *next = tmp;
121 PROC_INFO_INC(s, scan_bitmap.in_journal_hint);
122 } else {
123 (*next) = off + 1; /* inc offset to avoid looping. */
124 PROC_INFO_INC(s, scan_bitmap.in_journal_nohint);
126 PROC_INFO_INC(s, scan_bitmap.retry);
127 return 1;
129 return 0;
132 /* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
133 * block; */
134 static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
135 unsigned int bmap_n, int *beg, int boundary,
136 int min, int max, int unfm)
138 struct super_block *s = th->t_super;
139 struct reiserfs_bitmap_info *bi = &SB_AP_BITMAP(s)[bmap_n];
140 struct buffer_head *bh;
141 int end, next;
142 int org = *beg;
144 BUG_ON(!th->t_trans_id);
146 RFALSE(bmap_n >= reiserfs_bmap_count(s), "Bitmap %u is out of "
147 "range (0..%u)", bmap_n, reiserfs_bmap_count(s) - 1);
148 PROC_INFO_INC(s, scan_bitmap.bmap);
149 /* this is unclear and lacks comments, explain how journal bitmaps
150 work here for the reader. Convey a sense of the design here. What
151 is a window? */
152 /* - I mean `a window of zero bits' as in description of this function - Zam. */
154 if (!bi) {
155 reiserfs_error(s, "jdm-4055", "NULL bitmap info pointer "
156 "for bitmap %d", bmap_n);
157 return 0;
160 bh = reiserfs_read_bitmap_block(s, bmap_n);
161 if (bh == NULL)
162 return 0;
164 while (1) {
165 cont:
166 if (bi->free_count < min) {
167 brelse(bh);
168 return 0; // No free blocks in this bitmap
171 /* search for a first zero bit -- beginning of a window */
172 *beg = reiserfs_find_next_zero_le_bit
173 ((unsigned long *)(bh->b_data), boundary, *beg);
175 if (*beg + min > boundary) { /* search for a zero bit fails or the rest of bitmap block
176 * cannot contain a zero window of minimum size */
177 brelse(bh);
178 return 0;
181 if (unfm && is_block_in_journal(s, bmap_n, *beg, beg))
182 continue;
183 /* first zero bit found; we check next bits */
184 for (end = *beg + 1;; end++) {
185 if (end >= *beg + max || end >= boundary
186 || reiserfs_test_le_bit(end, bh->b_data)) {
187 next = end;
188 break;
190 /* finding the other end of zero bit window requires looking into journal structures (in
191 * case of searching for free blocks for unformatted nodes) */
192 if (unfm && is_block_in_journal(s, bmap_n, end, &next))
193 break;
196 /* now (*beg) points to beginning of zero bits window,
197 * (end) points to one bit after the window end */
198 if (end - *beg >= min) { /* it seems we have found window of proper size */
199 int i;
200 reiserfs_prepare_for_journal(s, bh, 1);
201 /* try to set all blocks used checking are they still free */
202 for (i = *beg; i < end; i++) {
203 /* It seems that we should not check in journal again. */
204 if (reiserfs_test_and_set_le_bit
205 (i, bh->b_data)) {
206 /* bit was set by another process
207 * while we slept in prepare_for_journal() */
208 PROC_INFO_INC(s, scan_bitmap.stolen);
209 if (i >= *beg + min) { /* we can continue with smaller set of allocated blocks,
210 * if length of this set is more or equal to `min' */
211 end = i;
212 break;
214 /* otherwise we clear all bit were set ... */
215 while (--i >= *beg)
216 reiserfs_clear_le_bit
217 (i, bh->b_data);
218 reiserfs_restore_prepared_buffer(s, bh);
219 *beg = org;
220 /* ... and search again in current block from beginning */
221 goto cont;
224 bi->free_count -= (end - *beg);
225 journal_mark_dirty(th, s, bh);
226 brelse(bh);
228 /* free block count calculation */
229 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s),
231 PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg));
232 journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
234 return end - (*beg);
235 } else {
236 *beg = next;
241 static int bmap_hash_id(struct super_block *s, u32 id)
243 char *hash_in = NULL;
244 unsigned long hash;
245 unsigned bm;
247 if (id <= 2) {
248 bm = 1;
249 } else {
250 hash_in = (char *)(&id);
251 hash = keyed_hash(hash_in, 4);
252 bm = hash % reiserfs_bmap_count(s);
253 if (!bm)
254 bm = 1;
256 /* this can only be true when SB_BMAP_NR = 1 */
257 if (bm >= reiserfs_bmap_count(s))
258 bm = 0;
259 return bm;
263 * hashes the id and then returns > 0 if the block group for the
264 * corresponding hash is full
266 static inline int block_group_used(struct super_block *s, u32 id)
268 int bm = bmap_hash_id(s, id);
269 struct reiserfs_bitmap_info *info = &SB_AP_BITMAP(s)[bm];
271 /* If we don't have cached information on this bitmap block, we're
272 * going to have to load it later anyway. Loading it here allows us
273 * to make a better decision. This favors long-term performance gain
274 * with a better on-disk layout vs. a short term gain of skipping the
275 * read and potentially having a bad placement. */
276 if (info->free_count == UINT_MAX) {
277 struct buffer_head *bh = reiserfs_read_bitmap_block(s, bm);
278 brelse(bh);
281 if (info->free_count > ((s->s_blocksize << 3) * 60 / 100)) {
282 return 0;
284 return 1;
288 * the packing is returned in disk byte order
290 __le32 reiserfs_choose_packing(struct inode * dir)
292 __le32 packing;
293 if (TEST_OPTION(packing_groups, dir->i_sb)) {
294 u32 parent_dir = le32_to_cpu(INODE_PKEY(dir)->k_dir_id);
296 * some versions of reiserfsck expect packing locality 1 to be
297 * special
299 if (parent_dir == 1 || block_group_used(dir->i_sb, parent_dir))
300 packing = INODE_PKEY(dir)->k_objectid;
301 else
302 packing = INODE_PKEY(dir)->k_dir_id;
303 } else
304 packing = INODE_PKEY(dir)->k_objectid;
305 return packing;
308 /* Tries to find contiguous zero bit window (given size) in given region of
309 * bitmap and place new blocks there. Returns number of allocated blocks. */
310 static int scan_bitmap(struct reiserfs_transaction_handle *th,
311 b_blocknr_t * start, b_blocknr_t finish,
312 int min, int max, int unfm, sector_t file_block)
314 int nr_allocated = 0;
315 struct super_block *s = th->t_super;
316 /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
317 * - Hans, it is not a block number - Zam. */
319 unsigned int bm, off;
320 unsigned int end_bm, end_off;
321 unsigned int off_max = s->s_blocksize << 3;
323 BUG_ON(!th->t_trans_id);
325 PROC_INFO_INC(s, scan_bitmap.call);
326 if (SB_FREE_BLOCKS(s) <= 0)
327 return 0; // No point in looking for more free blocks
329 get_bit_address(s, *start, &bm, &off);
330 get_bit_address(s, finish, &end_bm, &end_off);
331 if (bm > reiserfs_bmap_count(s))
332 return 0;
333 if (end_bm > reiserfs_bmap_count(s))
334 end_bm = reiserfs_bmap_count(s);
336 /* When the bitmap is more than 10% free, anyone can allocate.
337 * When it's less than 10% free, only files that already use the
338 * bitmap are allowed. Once we pass 80% full, this restriction
339 * is lifted.
341 * We do this so that files that grow later still have space close to
342 * their original allocation. This improves locality, and presumably
343 * performance as a result.
345 * This is only an allocation policy and does not make up for getting a
346 * bad hint. Decent hinting must be implemented for this to work well.
348 if (TEST_OPTION(skip_busy, s)
349 && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s) / 20) {
350 for (; bm < end_bm; bm++, off = 0) {
351 if ((off && (!unfm || (file_block != 0)))
352 || SB_AP_BITMAP(s)[bm].free_count >
353 (s->s_blocksize << 3) / 10)
354 nr_allocated =
355 scan_bitmap_block(th, bm, &off, off_max,
356 min, max, unfm);
357 if (nr_allocated)
358 goto ret;
360 /* we know from above that start is a reasonable number */
361 get_bit_address(s, *start, &bm, &off);
364 for (; bm < end_bm; bm++, off = 0) {
365 nr_allocated =
366 scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
367 if (nr_allocated)
368 goto ret;
371 nr_allocated =
372 scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);
374 ret:
375 *start = bm * off_max + off;
376 return nr_allocated;
380 static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
381 struct inode *inode, b_blocknr_t block,
382 int for_unformatted)
384 struct super_block *s = th->t_super;
385 struct reiserfs_super_block *rs;
386 struct buffer_head *sbh, *bmbh;
387 struct reiserfs_bitmap_info *apbi;
388 unsigned int nr, offset;
390 BUG_ON(!th->t_trans_id);
392 PROC_INFO_INC(s, free_block);
394 rs = SB_DISK_SUPER_BLOCK(s);
395 sbh = SB_BUFFER_WITH_SB(s);
396 apbi = SB_AP_BITMAP(s);
398 get_bit_address(s, block, &nr, &offset);
400 if (nr >= reiserfs_bmap_count(s)) {
401 reiserfs_error(s, "vs-4075", "block %lu is out of range",
402 block);
403 return;
406 bmbh = reiserfs_read_bitmap_block(s, nr);
407 if (!bmbh)
408 return;
410 reiserfs_prepare_for_journal(s, bmbh, 1);
412 /* clear bit for the given block in bit map */
413 if (!reiserfs_test_and_clear_le_bit(offset, bmbh->b_data)) {
414 reiserfs_error(s, "vs-4080",
415 "block %lu: bit already cleared", block);
417 apbi[nr].free_count++;
418 journal_mark_dirty(th, s, bmbh);
419 brelse(bmbh);
421 reiserfs_prepare_for_journal(s, sbh, 1);
422 /* update super block */
423 set_sb_free_blocks(rs, sb_free_blocks(rs) + 1);
425 journal_mark_dirty(th, s, sbh);
426 if (for_unformatted) {
427 int depth = reiserfs_write_unlock_nested(s);
428 dquot_free_block_nodirty(inode, 1);
429 reiserfs_write_lock_nested(s, depth);
433 void reiserfs_free_block(struct reiserfs_transaction_handle *th,
434 struct inode *inode, b_blocknr_t block,
435 int for_unformatted)
437 struct super_block *s = th->t_super;
438 BUG_ON(!th->t_trans_id);
440 RFALSE(!s, "vs-4061: trying to free block on nonexistent device");
441 if (!is_reusable(s, block, 1))
442 return;
444 if (block > sb_block_count(REISERFS_SB(s)->s_rs)) {
445 reiserfs_error(th->t_super, "bitmap-4072",
446 "Trying to free block outside file system "
447 "boundaries (%lu > %lu)",
448 block, sb_block_count(REISERFS_SB(s)->s_rs));
449 return;
451 /* mark it before we clear it, just in case */
452 journal_mark_freed(th, s, block);
453 _reiserfs_free_block(th, inode, block, for_unformatted);
456 /* preallocated blocks don't need to be run through journal_mark_freed */
457 static void reiserfs_free_prealloc_block(struct reiserfs_transaction_handle *th,
458 struct inode *inode, b_blocknr_t block)
460 BUG_ON(!th->t_trans_id);
461 RFALSE(!th->t_super,
462 "vs-4060: trying to free block on nonexistent device");
463 if (!is_reusable(th->t_super, block, 1))
464 return;
465 _reiserfs_free_block(th, inode, block, 1);
468 static void __discard_prealloc(struct reiserfs_transaction_handle *th,
469 struct reiserfs_inode_info *ei)
471 unsigned long save = ei->i_prealloc_block;
472 int dirty = 0;
473 struct inode *inode = &ei->vfs_inode;
474 BUG_ON(!th->t_trans_id);
475 #ifdef CONFIG_REISERFS_CHECK
476 if (ei->i_prealloc_count < 0)
477 reiserfs_error(th->t_super, "zam-4001",
478 "inode has negative prealloc blocks count.");
479 #endif
480 while (ei->i_prealloc_count > 0) {
481 reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
482 ei->i_prealloc_block++;
483 ei->i_prealloc_count--;
484 dirty = 1;
486 if (dirty)
487 reiserfs_update_sd(th, inode);
488 ei->i_prealloc_block = save;
489 list_del_init(&(ei->i_prealloc_list));
492 /* FIXME: It should be inline function */
493 void reiserfs_discard_prealloc(struct reiserfs_transaction_handle *th,
494 struct inode *inode)
496 struct reiserfs_inode_info *ei = REISERFS_I(inode);
497 BUG_ON(!th->t_trans_id);
498 if (ei->i_prealloc_count)
499 __discard_prealloc(th, ei);
502 void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle *th)
504 struct list_head *plist = &SB_JOURNAL(th->t_super)->j_prealloc_list;
506 BUG_ON(!th->t_trans_id);
508 while (!list_empty(plist)) {
509 struct reiserfs_inode_info *ei;
510 ei = list_entry(plist->next, struct reiserfs_inode_info,
511 i_prealloc_list);
512 #ifdef CONFIG_REISERFS_CHECK
513 if (!ei->i_prealloc_count) {
514 reiserfs_error(th->t_super, "zam-4001",
515 "inode is in prealloc list but has "
516 "no preallocated blocks.");
518 #endif
519 __discard_prealloc(th, ei);
523 void reiserfs_init_alloc_options(struct super_block *s)
525 set_bit(_ALLOC_skip_busy, &SB_ALLOC_OPTS(s));
526 set_bit(_ALLOC_dirid_groups, &SB_ALLOC_OPTS(s));
527 set_bit(_ALLOC_packing_groups, &SB_ALLOC_OPTS(s));
530 /* block allocator related options are parsed here */
531 int reiserfs_parse_alloc_options(struct super_block *s, char *options)
533 char *this_char, *value;
535 REISERFS_SB(s)->s_alloc_options.bits = 0; /* clear default settings */
537 while ((this_char = strsep(&options, ":")) != NULL) {
538 if ((value = strchr(this_char, '=')) != NULL)
539 *value++ = 0;
541 if (!strcmp(this_char, "concentrating_formatted_nodes")) {
542 int temp;
543 SET_OPTION(concentrating_formatted_nodes);
544 temp = (value
545 && *value) ? simple_strtoul(value, &value,
546 0) : 10;
547 if (temp <= 0 || temp > 100) {
548 REISERFS_SB(s)->s_alloc_options.border = 10;
549 } else {
550 REISERFS_SB(s)->s_alloc_options.border =
551 100 / temp;
553 continue;
555 if (!strcmp(this_char, "displacing_large_files")) {
556 SET_OPTION(displacing_large_files);
557 REISERFS_SB(s)->s_alloc_options.large_file_size =
558 (value
559 && *value) ? simple_strtoul(value, &value, 0) : 16;
560 continue;
562 if (!strcmp(this_char, "displacing_new_packing_localities")) {
563 SET_OPTION(displacing_new_packing_localities);
564 continue;
567 if (!strcmp(this_char, "old_hashed_relocation")) {
568 SET_OPTION(old_hashed_relocation);
569 continue;
572 if (!strcmp(this_char, "new_hashed_relocation")) {
573 SET_OPTION(new_hashed_relocation);
574 continue;
577 if (!strcmp(this_char, "dirid_groups")) {
578 SET_OPTION(dirid_groups);
579 continue;
581 if (!strcmp(this_char, "oid_groups")) {
582 SET_OPTION(oid_groups);
583 continue;
585 if (!strcmp(this_char, "packing_groups")) {
586 SET_OPTION(packing_groups);
587 continue;
589 if (!strcmp(this_char, "hashed_formatted_nodes")) {
590 SET_OPTION(hashed_formatted_nodes);
591 continue;
594 if (!strcmp(this_char, "skip_busy")) {
595 SET_OPTION(skip_busy);
596 continue;
599 if (!strcmp(this_char, "hundredth_slices")) {
600 SET_OPTION(hundredth_slices);
601 continue;
604 if (!strcmp(this_char, "old_way")) {
605 SET_OPTION(old_way);
606 continue;
609 if (!strcmp(this_char, "displace_based_on_dirid")) {
610 SET_OPTION(displace_based_on_dirid);
611 continue;
614 if (!strcmp(this_char, "preallocmin")) {
615 REISERFS_SB(s)->s_alloc_options.preallocmin =
616 (value
617 && *value) ? simple_strtoul(value, &value, 0) : 4;
618 continue;
621 if (!strcmp(this_char, "preallocsize")) {
622 REISERFS_SB(s)->s_alloc_options.preallocsize =
623 (value
624 && *value) ? simple_strtoul(value, &value,
625 0) :
626 PREALLOCATION_SIZE;
627 continue;
630 reiserfs_warning(s, "zam-4001", "unknown option - %s",
631 this_char);
632 return 1;
635 reiserfs_info(s, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s));
636 return 0;
639 static void print_sep(struct seq_file *seq, int *first)
641 if (!*first)
642 seq_puts(seq, ":");
643 else
644 *first = 0;
647 void show_alloc_options(struct seq_file *seq, struct super_block *s)
649 int first = 1;
651 if (SB_ALLOC_OPTS(s) == ((1 << _ALLOC_skip_busy) |
652 (1 << _ALLOC_dirid_groups) | (1 << _ALLOC_packing_groups)))
653 return;
655 seq_puts(seq, ",alloc=");
657 if (TEST_OPTION(concentrating_formatted_nodes, s)) {
658 print_sep(seq, &first);
659 if (REISERFS_SB(s)->s_alloc_options.border != 10) {
660 seq_printf(seq, "concentrating_formatted_nodes=%d",
661 100 / REISERFS_SB(s)->s_alloc_options.border);
662 } else
663 seq_puts(seq, "concentrating_formatted_nodes");
665 if (TEST_OPTION(displacing_large_files, s)) {
666 print_sep(seq, &first);
667 if (REISERFS_SB(s)->s_alloc_options.large_file_size != 16) {
668 seq_printf(seq, "displacing_large_files=%lu",
669 REISERFS_SB(s)->s_alloc_options.large_file_size);
670 } else
671 seq_puts(seq, "displacing_large_files");
673 if (TEST_OPTION(displacing_new_packing_localities, s)) {
674 print_sep(seq, &first);
675 seq_puts(seq, "displacing_new_packing_localities");
677 if (TEST_OPTION(old_hashed_relocation, s)) {
678 print_sep(seq, &first);
679 seq_puts(seq, "old_hashed_relocation");
681 if (TEST_OPTION(new_hashed_relocation, s)) {
682 print_sep(seq, &first);
683 seq_puts(seq, "new_hashed_relocation");
685 if (TEST_OPTION(dirid_groups, s)) {
686 print_sep(seq, &first);
687 seq_puts(seq, "dirid_groups");
689 if (TEST_OPTION(oid_groups, s)) {
690 print_sep(seq, &first);
691 seq_puts(seq, "oid_groups");
693 if (TEST_OPTION(packing_groups, s)) {
694 print_sep(seq, &first);
695 seq_puts(seq, "packing_groups");
697 if (TEST_OPTION(hashed_formatted_nodes, s)) {
698 print_sep(seq, &first);
699 seq_puts(seq, "hashed_formatted_nodes");
701 if (TEST_OPTION(skip_busy, s)) {
702 print_sep(seq, &first);
703 seq_puts(seq, "skip_busy");
705 if (TEST_OPTION(hundredth_slices, s)) {
706 print_sep(seq, &first);
707 seq_puts(seq, "hundredth_slices");
709 if (TEST_OPTION(old_way, s)) {
710 print_sep(seq, &first);
711 seq_puts(seq, "old_way");
713 if (TEST_OPTION(displace_based_on_dirid, s)) {
714 print_sep(seq, &first);
715 seq_puts(seq, "displace_based_on_dirid");
717 if (REISERFS_SB(s)->s_alloc_options.preallocmin != 0) {
718 print_sep(seq, &first);
719 seq_printf(seq, "preallocmin=%d",
720 REISERFS_SB(s)->s_alloc_options.preallocmin);
722 if (REISERFS_SB(s)->s_alloc_options.preallocsize != 17) {
723 print_sep(seq, &first);
724 seq_printf(seq, "preallocsize=%d",
725 REISERFS_SB(s)->s_alloc_options.preallocsize);
729 static inline void new_hashed_relocation(reiserfs_blocknr_hint_t * hint)
731 char *hash_in;
732 if (hint->formatted_node) {
733 hash_in = (char *)&hint->key.k_dir_id;
734 } else {
735 if (!hint->inode) {
736 //hint->search_start = hint->beg;
737 hash_in = (char *)&hint->key.k_dir_id;
738 } else
739 if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
740 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
741 else
742 hash_in =
743 (char *)(&INODE_PKEY(hint->inode)->k_objectid);
746 hint->search_start =
747 hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
751 * Relocation based on dirid, hashing them into a given bitmap block
752 * files. Formatted nodes are unaffected, a separate policy covers them
754 static void dirid_groups(reiserfs_blocknr_hint_t * hint)
756 unsigned long hash;
757 __u32 dirid = 0;
758 int bm = 0;
759 struct super_block *sb = hint->th->t_super;
760 if (hint->inode)
761 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
762 else if (hint->formatted_node)
763 dirid = hint->key.k_dir_id;
765 if (dirid) {
766 bm = bmap_hash_id(sb, dirid);
767 hash = bm * (sb->s_blocksize << 3);
768 /* give a portion of the block group to metadata */
769 if (hint->inode)
770 hash += sb->s_blocksize / 2;
771 hint->search_start = hash;
776 * Relocation based on oid, hashing them into a given bitmap block
777 * files. Formatted nodes are unaffected, a separate policy covers them
779 static void oid_groups(reiserfs_blocknr_hint_t * hint)
781 if (hint->inode) {
782 unsigned long hash;
783 __u32 oid;
784 __u32 dirid;
785 int bm;
787 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
789 /* keep the root dir and it's first set of subdirs close to
790 * the start of the disk
792 if (dirid <= 2)
793 hash = (hint->inode->i_sb->s_blocksize << 3);
794 else {
795 oid = le32_to_cpu(INODE_PKEY(hint->inode)->k_objectid);
796 bm = bmap_hash_id(hint->inode->i_sb, oid);
797 hash = bm * (hint->inode->i_sb->s_blocksize << 3);
799 hint->search_start = hash;
803 /* returns 1 if it finds an indirect item and gets valid hint info
804 * from it, otherwise 0
806 static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
808 struct treepath *path;
809 struct buffer_head *bh;
810 struct item_head *ih;
811 int pos_in_item;
812 __le32 *item;
813 int ret = 0;
815 if (!hint->path) /* reiserfs code can call this function w/o pointer to path
816 * structure supplied; then we rely on supplied search_start */
817 return 0;
819 path = hint->path;
820 bh = get_last_bh(path);
821 RFALSE(!bh, "green-4002: Illegal path specified to get_left_neighbor");
822 ih = get_ih(path);
823 pos_in_item = path->pos_in_item;
824 item = get_item(path);
826 hint->search_start = bh->b_blocknr;
828 if (!hint->formatted_node && is_indirect_le_ih(ih)) {
829 /* for indirect item: go to left and look for the first non-hole entry
830 in the indirect item */
831 if (pos_in_item == I_UNFM_NUM(ih))
832 pos_in_item--;
833 // pos_in_item = I_UNFM_NUM (ih) - 1;
834 while (pos_in_item >= 0) {
835 int t = get_block_num(item, pos_in_item);
836 if (t) {
837 hint->search_start = t;
838 ret = 1;
839 break;
841 pos_in_item--;
845 /* does result value fit into specified region? */
846 return ret;
849 /* should be, if formatted node, then try to put on first part of the device
850 specified as number of percent with mount option device, else try to put
851 on last of device. This is not to say it is good code to do so,
852 but the effect should be measured. */
853 static inline void set_border_in_hint(struct super_block *s,
854 reiserfs_blocknr_hint_t * hint)
856 b_blocknr_t border =
857 SB_BLOCK_COUNT(s) / REISERFS_SB(s)->s_alloc_options.border;
859 if (hint->formatted_node)
860 hint->end = border - 1;
861 else
862 hint->beg = border;
865 static inline void displace_large_file(reiserfs_blocknr_hint_t * hint)
867 if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
868 hint->search_start =
869 hint->beg +
870 keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_dir_id),
871 4) % (hint->end - hint->beg);
872 else
873 hint->search_start =
874 hint->beg +
875 keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_objectid),
876 4) % (hint->end - hint->beg);
879 static inline void hash_formatted_node(reiserfs_blocknr_hint_t * hint)
881 char *hash_in;
883 if (!hint->inode)
884 hash_in = (char *)&hint->key.k_dir_id;
885 else if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
886 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
887 else
888 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_objectid);
890 hint->search_start =
891 hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
894 static inline int
895 this_blocknr_allocation_would_make_it_a_large_file(reiserfs_blocknr_hint_t *
896 hint)
898 return hint->block ==
899 REISERFS_SB(hint->th->t_super)->s_alloc_options.large_file_size;
902 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
903 static inline void displace_new_packing_locality(reiserfs_blocknr_hint_t * hint)
905 struct in_core_key *key = &hint->key;
907 hint->th->displace_new_blocks = 0;
908 hint->search_start =
909 hint->beg + keyed_hash((char *)(&key->k_objectid),
910 4) % (hint->end - hint->beg);
912 #endif
914 static inline int old_hashed_relocation(reiserfs_blocknr_hint_t * hint)
916 b_blocknr_t border;
917 u32 hash_in;
919 if (hint->formatted_node || hint->inode == NULL) {
920 return 0;
923 hash_in = le32_to_cpu((INODE_PKEY(hint->inode))->k_dir_id);
924 border =
925 hint->beg + (u32) keyed_hash(((char *)(&hash_in)),
926 4) % (hint->end - hint->beg - 1);
927 if (border > hint->search_start)
928 hint->search_start = border;
930 return 1;
933 static inline int old_way(reiserfs_blocknr_hint_t * hint)
935 b_blocknr_t border;
937 if (hint->formatted_node || hint->inode == NULL) {
938 return 0;
941 border =
942 hint->beg +
943 le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id) % (hint->end -
944 hint->beg);
945 if (border > hint->search_start)
946 hint->search_start = border;
948 return 1;
951 static inline void hundredth_slices(reiserfs_blocknr_hint_t * hint)
953 struct in_core_key *key = &hint->key;
954 b_blocknr_t slice_start;
956 slice_start =
957 (keyed_hash((char *)(&key->k_dir_id), 4) % 100) * (hint->end / 100);
958 if (slice_start > hint->search_start
959 || slice_start + (hint->end / 100) <= hint->search_start) {
960 hint->search_start = slice_start;
964 static void determine_search_start(reiserfs_blocknr_hint_t * hint,
965 int amount_needed)
967 struct super_block *s = hint->th->t_super;
968 int unfm_hint;
970 hint->beg = 0;
971 hint->end = SB_BLOCK_COUNT(s) - 1;
973 /* This is former border algorithm. Now with tunable border offset */
974 if (concentrating_formatted_nodes(s))
975 set_border_in_hint(s, hint);
977 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
978 /* whenever we create a new directory, we displace it. At first we will
979 hash for location, later we might look for a moderately empty place for
980 it */
981 if (displacing_new_packing_localities(s)
982 && hint->th->displace_new_blocks) {
983 displace_new_packing_locality(hint);
985 /* we do not continue determine_search_start,
986 * if new packing locality is being displaced */
987 return;
989 #endif
991 /* all persons should feel encouraged to add more special cases here and
992 * test them */
994 if (displacing_large_files(s) && !hint->formatted_node
995 && this_blocknr_allocation_would_make_it_a_large_file(hint)) {
996 displace_large_file(hint);
997 return;
1000 /* if none of our special cases is relevant, use the left neighbor in the
1001 tree order of the new node we are allocating for */
1002 if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes, s)) {
1003 hash_formatted_node(hint);
1004 return;
1007 unfm_hint = get_left_neighbor(hint);
1009 /* Mimic old block allocator behaviour, that is if VFS allowed for preallocation,
1010 new blocks are displaced based on directory ID. Also, if suggested search_start
1011 is less than last preallocated block, we start searching from it, assuming that
1012 HDD dataflow is faster in forward direction */
1013 if (TEST_OPTION(old_way, s)) {
1014 if (!hint->formatted_node) {
1015 if (!reiserfs_hashed_relocation(s))
1016 old_way(hint);
1017 else if (!reiserfs_no_unhashed_relocation(s))
1018 old_hashed_relocation(hint);
1020 if (hint->inode
1021 && hint->search_start <
1022 REISERFS_I(hint->inode)->i_prealloc_block)
1023 hint->search_start =
1024 REISERFS_I(hint->inode)->i_prealloc_block;
1026 return;
1029 /* This is an approach proposed by Hans */
1030 if (TEST_OPTION(hundredth_slices, s)
1031 && !(displacing_large_files(s) && !hint->formatted_node)) {
1032 hundredth_slices(hint);
1033 return;
1036 /* old_hashed_relocation only works on unformatted */
1037 if (!unfm_hint && !hint->formatted_node &&
1038 TEST_OPTION(old_hashed_relocation, s)) {
1039 old_hashed_relocation(hint);
1041 /* new_hashed_relocation works with both formatted/unformatted nodes */
1042 if ((!unfm_hint || hint->formatted_node) &&
1043 TEST_OPTION(new_hashed_relocation, s)) {
1044 new_hashed_relocation(hint);
1046 /* dirid grouping works only on unformatted nodes */
1047 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
1048 dirid_groups(hint);
1050 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
1051 if (hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
1052 dirid_groups(hint);
1054 #endif
1056 /* oid grouping works only on unformatted nodes */
1057 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(oid_groups, s)) {
1058 oid_groups(hint);
1060 return;
1063 static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
1065 /* make minimum size a mount option and benchmark both ways */
1066 /* we preallocate blocks only for regular files, specific size */
1067 /* benchmark preallocating always and see what happens */
1069 hint->prealloc_size = 0;
1071 if (!hint->formatted_node && hint->preallocate) {
1072 if (S_ISREG(hint->inode->i_mode)
1073 && hint->inode->i_size >=
1074 REISERFS_SB(hint->th->t_super)->s_alloc_options.
1075 preallocmin * hint->inode->i_sb->s_blocksize)
1076 hint->prealloc_size =
1077 REISERFS_SB(hint->th->t_super)->s_alloc_options.
1078 preallocsize - 1;
1080 return CARRY_ON;
1083 /* XXX I know it could be merged with upper-level function;
1084 but may be result function would be too complex. */
1085 static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint,
1086 b_blocknr_t * new_blocknrs,
1087 b_blocknr_t start,
1088 b_blocknr_t finish, int min,
1089 int amount_needed,
1090 int prealloc_size)
1092 int rest = amount_needed;
1093 int nr_allocated;
1095 while (rest > 0 && start <= finish) {
1096 nr_allocated = scan_bitmap(hint->th, &start, finish, min,
1097 rest + prealloc_size,
1098 !hint->formatted_node, hint->block);
1100 if (nr_allocated == 0) /* no new blocks allocated, return */
1101 break;
1103 /* fill free_blocknrs array first */
1104 while (rest > 0 && nr_allocated > 0) {
1105 *new_blocknrs++ = start++;
1106 rest--;
1107 nr_allocated--;
1110 /* do we have something to fill prealloc. array also ? */
1111 if (nr_allocated > 0) {
1112 /* it means prealloc_size was greater that 0 and we do preallocation */
1113 list_add(&REISERFS_I(hint->inode)->i_prealloc_list,
1114 &SB_JOURNAL(hint->th->t_super)->
1115 j_prealloc_list);
1116 REISERFS_I(hint->inode)->i_prealloc_block = start;
1117 REISERFS_I(hint->inode)->i_prealloc_count =
1118 nr_allocated;
1119 break;
1123 return (amount_needed - rest);
1126 static inline int blocknrs_and_prealloc_arrays_from_search_start
1127 (reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs,
1128 int amount_needed) {
1129 struct super_block *s = hint->th->t_super;
1130 b_blocknr_t start = hint->search_start;
1131 b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
1132 int passno = 0;
1133 int nr_allocated = 0;
1134 int depth;
1136 determine_prealloc_size(hint);
1137 if (!hint->formatted_node) {
1138 int quota_ret;
1139 #ifdef REISERQUOTA_DEBUG
1140 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1141 "reiserquota: allocating %d blocks id=%u",
1142 amount_needed, hint->inode->i_uid);
1143 #endif
1144 depth = reiserfs_write_unlock_nested(s);
1145 quota_ret =
1146 dquot_alloc_block_nodirty(hint->inode, amount_needed);
1147 if (quota_ret) { /* Quota exceeded? */
1148 reiserfs_write_lock_nested(s, depth);
1149 return QUOTA_EXCEEDED;
1151 if (hint->preallocate && hint->prealloc_size) {
1152 #ifdef REISERQUOTA_DEBUG
1153 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1154 "reiserquota: allocating (prealloc) %d blocks id=%u",
1155 hint->prealloc_size, hint->inode->i_uid);
1156 #endif
1157 quota_ret = dquot_prealloc_block_nodirty(hint->inode,
1158 hint->prealloc_size);
1159 if (quota_ret)
1160 hint->preallocate = hint->prealloc_size = 0;
1162 /* for unformatted nodes, force large allocations */
1163 reiserfs_write_lock_nested(s, depth);
1166 do {
1167 switch (passno++) {
1168 case 0: /* Search from hint->search_start to end of disk */
1169 start = hint->search_start;
1170 finish = SB_BLOCK_COUNT(s) - 1;
1171 break;
1172 case 1: /* Search from hint->beg to hint->search_start */
1173 start = hint->beg;
1174 finish = hint->search_start;
1175 break;
1176 case 2: /* Last chance: Search from 0 to hint->beg */
1177 start = 0;
1178 finish = hint->beg;
1179 break;
1180 default: /* We've tried searching everywhere, not enough space */
1181 /* Free the blocks */
1182 if (!hint->formatted_node) {
1183 #ifdef REISERQUOTA_DEBUG
1184 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1185 "reiserquota: freeing (nospace) %d blocks id=%u",
1186 amount_needed +
1187 hint->prealloc_size -
1188 nr_allocated,
1189 hint->inode->i_uid);
1190 #endif
1191 /* Free not allocated blocks */
1192 depth = reiserfs_write_unlock_nested(s);
1193 dquot_free_block_nodirty(hint->inode,
1194 amount_needed + hint->prealloc_size -
1195 nr_allocated);
1196 reiserfs_write_lock_nested(s, depth);
1198 while (nr_allocated--)
1199 reiserfs_free_block(hint->th, hint->inode,
1200 new_blocknrs[nr_allocated],
1201 !hint->formatted_node);
1203 return NO_DISK_SPACE;
1205 } while ((nr_allocated += allocate_without_wrapping_disk(hint,
1206 new_blocknrs +
1207 nr_allocated,
1208 start, finish,
1210 amount_needed -
1211 nr_allocated,
1212 hint->
1213 prealloc_size))
1214 < amount_needed);
1215 if (!hint->formatted_node &&
1216 amount_needed + hint->prealloc_size >
1217 nr_allocated + REISERFS_I(hint->inode)->i_prealloc_count) {
1218 /* Some of preallocation blocks were not allocated */
1219 #ifdef REISERQUOTA_DEBUG
1220 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1221 "reiserquota: freeing (failed prealloc) %d blocks id=%u",
1222 amount_needed + hint->prealloc_size -
1223 nr_allocated -
1224 REISERFS_I(hint->inode)->i_prealloc_count,
1225 hint->inode->i_uid);
1226 #endif
1228 depth = reiserfs_write_unlock_nested(s);
1229 dquot_free_block_nodirty(hint->inode, amount_needed +
1230 hint->prealloc_size - nr_allocated -
1231 REISERFS_I(hint->inode)->
1232 i_prealloc_count);
1233 reiserfs_write_lock_nested(s, depth);
1236 return CARRY_ON;
1239 /* grab new blocknrs from preallocated list */
1240 /* return amount still needed after using them */
1241 static int use_preallocated_list_if_available(reiserfs_blocknr_hint_t * hint,
1242 b_blocknr_t * new_blocknrs,
1243 int amount_needed)
1245 struct inode *inode = hint->inode;
1247 if (REISERFS_I(inode)->i_prealloc_count > 0) {
1248 while (amount_needed) {
1250 *new_blocknrs++ = REISERFS_I(inode)->i_prealloc_block++;
1251 REISERFS_I(inode)->i_prealloc_count--;
1253 amount_needed--;
1255 if (REISERFS_I(inode)->i_prealloc_count <= 0) {
1256 list_del(&REISERFS_I(inode)->i_prealloc_list);
1257 break;
1261 /* return amount still needed after using preallocated blocks */
1262 return amount_needed;
1265 int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed, int reserved_by_us /* Amount of blocks we have
1266 already reserved */ )
1268 int initial_amount_needed = amount_needed;
1269 int ret;
1270 struct super_block *s = hint->th->t_super;
1272 /* Check if there is enough space, taking into account reserved space */
1273 if (SB_FREE_BLOCKS(s) - REISERFS_SB(s)->reserved_blocks <
1274 amount_needed - reserved_by_us)
1275 return NO_DISK_SPACE;
1276 /* should this be if !hint->inode && hint->preallocate? */
1277 /* do you mean hint->formatted_node can be removed ? - Zam */
1278 /* hint->formatted_node cannot be removed because we try to access
1279 inode information here, and there is often no inode assotiated with
1280 metadata allocations - green */
1282 if (!hint->formatted_node && hint->preallocate) {
1283 amount_needed = use_preallocated_list_if_available
1284 (hint, new_blocknrs, amount_needed);
1285 if (amount_needed == 0) /* all blocknrs we need we got from
1286 prealloc. list */
1287 return CARRY_ON;
1288 new_blocknrs += (initial_amount_needed - amount_needed);
1291 /* find search start and save it in hint structure */
1292 determine_search_start(hint, amount_needed);
1293 if (hint->search_start >= SB_BLOCK_COUNT(s))
1294 hint->search_start = SB_BLOCK_COUNT(s) - 1;
1296 /* allocation itself; fill new_blocknrs and preallocation arrays */
1297 ret = blocknrs_and_prealloc_arrays_from_search_start
1298 (hint, new_blocknrs, amount_needed);
1300 /* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we
1301 * need to return blocks back to prealloc. list or just free them. -- Zam (I chose second
1302 * variant) */
1304 if (ret != CARRY_ON) {
1305 while (amount_needed++ < initial_amount_needed) {
1306 reiserfs_free_block(hint->th, hint->inode,
1307 *(--new_blocknrs), 1);
1310 return ret;
1313 void reiserfs_cache_bitmap_metadata(struct super_block *sb,
1314 struct buffer_head *bh,
1315 struct reiserfs_bitmap_info *info)
1317 unsigned long *cur = (unsigned long *)(bh->b_data + bh->b_size);
1319 /* The first bit must ALWAYS be 1 */
1320 if (!reiserfs_test_le_bit(0, (unsigned long *)bh->b_data))
1321 reiserfs_error(sb, "reiserfs-2025", "bitmap block %lu is "
1322 "corrupted: first bit must be 1", bh->b_blocknr);
1324 info->free_count = 0;
1326 while (--cur >= (unsigned long *)bh->b_data) {
1327 /* 0 and ~0 are special, we can optimize for them */
1328 if (*cur == 0)
1329 info->free_count += BITS_PER_LONG;
1330 else if (*cur != ~0L) /* A mix, investigate */
1331 info->free_count += BITS_PER_LONG - hweight_long(*cur);
1335 struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
1336 unsigned int bitmap)
1338 b_blocknr_t block = (sb->s_blocksize << 3) * bitmap;
1339 struct reiserfs_bitmap_info *info = SB_AP_BITMAP(sb) + bitmap;
1340 struct buffer_head *bh;
1342 /* Way old format filesystems had the bitmaps packed up front.
1343 * I doubt there are any of these left, but just in case... */
1344 if (unlikely(test_bit(REISERFS_OLD_FORMAT,
1345 &(REISERFS_SB(sb)->s_properties))))
1346 block = REISERFS_SB(sb)->s_sbh->b_blocknr + 1 + bitmap;
1347 else if (bitmap == 0)
1348 block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
1350 bh = sb_bread(sb, block);
1351 if (bh == NULL)
1352 reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
1353 "reading failed", __func__, block);
1354 else {
1355 if (buffer_locked(bh)) {
1356 int depth;
1357 PROC_INFO_INC(sb, scan_bitmap.wait);
1358 depth = reiserfs_write_unlock_nested(sb);
1359 __wait_on_buffer(bh);
1360 reiserfs_write_lock_nested(sb, depth);
1362 BUG_ON(!buffer_uptodate(bh));
1363 BUG_ON(atomic_read(&bh->b_count) == 0);
1365 if (info->free_count == UINT_MAX)
1366 reiserfs_cache_bitmap_metadata(sb, bh, info);
1369 return bh;
1372 int reiserfs_init_bitmap_cache(struct super_block *sb)
1374 struct reiserfs_bitmap_info *bitmap;
1375 unsigned int bmap_nr = reiserfs_bmap_count(sb);
1377 bitmap = vmalloc(sizeof(*bitmap) * bmap_nr);
1378 if (bitmap == NULL)
1379 return -ENOMEM;
1381 memset(bitmap, 0xff, sizeof(*bitmap) * bmap_nr);
1383 SB_AP_BITMAP(sb) = bitmap;
1385 return 0;
1388 void reiserfs_free_bitmap_cache(struct super_block *sb)
1390 if (SB_AP_BITMAP(sb)) {
1391 vfree(SB_AP_BITMAP(sb));
1392 SB_AP_BITMAP(sb) = NULL;