2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
4 /* Reiserfs block (de)allocator, bitmap-based. */
6 #include <linux/time.h>
8 #include <linux/errno.h>
9 #include <linux/buffer_head.h>
10 #include <linux/kernel.h>
11 #include <linux/pagemap.h>
12 #include <linux/vmalloc.h>
13 #include <linux/quotaops.h>
14 #include <linux/seq_file.h>
16 #define PREALLOCATION_SIZE 9
18 /* different reiserfs block allocator options */
20 #define SB_ALLOC_OPTS(s) (REISERFS_SB(s)->s_alloc_options.bits)
22 #define _ALLOC_concentrating_formatted_nodes 0
23 #define _ALLOC_displacing_large_files 1
24 #define _ALLOC_displacing_new_packing_localities 2
25 #define _ALLOC_old_hashed_relocation 3
26 #define _ALLOC_new_hashed_relocation 4
27 #define _ALLOC_skip_busy 5
28 #define _ALLOC_displace_based_on_dirid 6
29 #define _ALLOC_hashed_formatted_nodes 7
30 #define _ALLOC_old_way 8
31 #define _ALLOC_hundredth_slices 9
32 #define _ALLOC_dirid_groups 10
33 #define _ALLOC_oid_groups 11
34 #define _ALLOC_packing_groups 12
36 #define concentrating_formatted_nodes(s) test_bit(_ALLOC_concentrating_formatted_nodes, &SB_ALLOC_OPTS(s))
37 #define displacing_large_files(s) test_bit(_ALLOC_displacing_large_files, &SB_ALLOC_OPTS(s))
38 #define displacing_new_packing_localities(s) test_bit(_ALLOC_displacing_new_packing_localities, &SB_ALLOC_OPTS(s))
40 #define SET_OPTION(optname) \
42 reiserfs_info(s, "block allocator option \"%s\" is set", #optname); \
43 set_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)); \
45 #define TEST_OPTION(optname, s) \
46 test_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s))
48 static inline void get_bit_address(struct super_block
*s
,
50 unsigned int *bmap_nr
,
54 * It is in the bitmap block number equal to the block
55 * number divided by the number of bits in a block.
57 *bmap_nr
= block
>> (s
->s_blocksize_bits
+ 3);
58 /* Within that bitmap block it is located at bit offset *offset. */
59 *offset
= block
& ((s
->s_blocksize
<< 3) - 1);
62 int is_reusable(struct super_block
*s
, b_blocknr_t block
, int bit_value
)
64 unsigned int bmap
, offset
;
65 unsigned int bmap_count
= reiserfs_bmap_count(s
);
67 if (block
== 0 || block
>= SB_BLOCK_COUNT(s
)) {
68 reiserfs_error(s
, "vs-4010",
69 "block number is out of range %lu (%u)",
70 block
, SB_BLOCK_COUNT(s
));
74 get_bit_address(s
, block
, &bmap
, &offset
);
77 * Old format filesystem? Unlikely, but the bitmaps are all
78 * up front so we need to account for it.
80 if (unlikely(test_bit(REISERFS_OLD_FORMAT
,
81 &REISERFS_SB(s
)->s_properties
))) {
82 b_blocknr_t bmap1
= REISERFS_SB(s
)->s_sbh
->b_blocknr
+ 1;
84 block
<= bmap1
+ bmap_count
) {
85 reiserfs_error(s
, "vs-4019", "bitmap block %lu(%u) "
86 "can't be freed or reused",
92 reiserfs_error(s
, "vs-4020", "bitmap block %lu(%u) "
93 "can't be freed or reused",
99 if (bmap
>= bmap_count
) {
100 reiserfs_error(s
, "vs-4030", "bitmap for requested block "
101 "is out of range: block=%lu, bitmap_nr=%u",
106 if (bit_value
== 0 && block
== SB_ROOT_BLOCK(s
)) {
107 reiserfs_error(s
, "vs-4050", "this is root block (%u), "
108 "it must be busy", SB_ROOT_BLOCK(s
));
116 * Searches in journal structures for a given block number (bmap, off).
117 * If block is found in reiserfs journal it suggests next free block
120 static inline int is_block_in_journal(struct super_block
*s
, unsigned int bmap
,
125 if (reiserfs_in_journal(s
, bmap
, off
, 1, &tmp
)) {
126 if (tmp
) { /* hint supplied */
128 PROC_INFO_INC(s
, scan_bitmap
.in_journal_hint
);
130 (*next
) = off
+ 1; /* inc offset to avoid looping. */
131 PROC_INFO_INC(s
, scan_bitmap
.in_journal_nohint
);
133 PROC_INFO_INC(s
, scan_bitmap
.retry
);
140 * Searches for a window of zero bits with given minimum and maximum
141 * lengths in one bitmap block
143 static int scan_bitmap_block(struct reiserfs_transaction_handle
*th
,
144 unsigned int bmap_n
, int *beg
, int boundary
,
145 int min
, int max
, int unfm
)
147 struct super_block
*s
= th
->t_super
;
148 struct reiserfs_bitmap_info
*bi
= &SB_AP_BITMAP(s
)[bmap_n
];
149 struct buffer_head
*bh
;
153 BUG_ON(!th
->t_trans_id
);
154 RFALSE(bmap_n
>= reiserfs_bmap_count(s
), "Bitmap %u is out of "
155 "range (0..%u)", bmap_n
, reiserfs_bmap_count(s
) - 1);
156 PROC_INFO_INC(s
, scan_bitmap
.bmap
);
159 reiserfs_error(s
, "jdm-4055", "NULL bitmap info pointer "
160 "for bitmap %d", bmap_n
);
164 bh
= reiserfs_read_bitmap_block(s
, bmap_n
);
170 if (bi
->free_count
< min
) {
172 return 0; /* No free blocks in this bitmap */
175 /* search for a first zero bit -- beginning of a window */
176 *beg
= reiserfs_find_next_zero_le_bit
177 ((unsigned long *)(bh
->b_data
), boundary
, *beg
);
180 * search for a zero bit fails or the rest of bitmap block
181 * cannot contain a zero window of minimum size
183 if (*beg
+ min
> boundary
) {
188 if (unfm
&& is_block_in_journal(s
, bmap_n
, *beg
, beg
))
190 /* first zero bit found; we check next bits */
191 for (end
= *beg
+ 1;; end
++) {
192 if (end
>= *beg
+ max
|| end
>= boundary
193 || reiserfs_test_le_bit(end
, bh
->b_data
)) {
199 * finding the other end of zero bit window requires
200 * looking into journal structures (in case of
201 * searching for free blocks for unformatted nodes)
203 if (unfm
&& is_block_in_journal(s
, bmap_n
, end
, &next
))
208 * now (*beg) points to beginning of zero bits window,
209 * (end) points to one bit after the window end
212 /* found window of proper size */
213 if (end
- *beg
>= min
) {
215 reiserfs_prepare_for_journal(s
, bh
, 1);
217 * try to set all blocks used checking are
220 for (i
= *beg
; i
< end
; i
++) {
221 /* Don't check in journal again. */
222 if (reiserfs_test_and_set_le_bit
225 * bit was set by another process while
226 * we slept in prepare_for_journal()
228 PROC_INFO_INC(s
, scan_bitmap
.stolen
);
231 * we can continue with smaller set
232 * of allocated blocks, if length of
233 * this set is more or equal to `min'
235 if (i
>= *beg
+ min
) {
241 * otherwise we clear all bit
245 reiserfs_clear_le_bit
247 reiserfs_restore_prepared_buffer(s
, bh
);
251 * Search again in current block
257 bi
->free_count
-= (end
- *beg
);
258 journal_mark_dirty(th
, bh
);
261 /* free block count calculation */
262 reiserfs_prepare_for_journal(s
, SB_BUFFER_WITH_SB(s
),
264 PUT_SB_FREE_BLOCKS(s
, SB_FREE_BLOCKS(s
) - (end
- *beg
));
265 journal_mark_dirty(th
, SB_BUFFER_WITH_SB(s
));
274 static int bmap_hash_id(struct super_block
*s
, u32 id
)
276 char *hash_in
= NULL
;
283 hash_in
= (char *)(&id
);
284 hash
= keyed_hash(hash_in
, 4);
285 bm
= hash
% reiserfs_bmap_count(s
);
289 /* this can only be true when SB_BMAP_NR = 1 */
290 if (bm
>= reiserfs_bmap_count(s
))
296 * hashes the id and then returns > 0 if the block group for the
297 * corresponding hash is full
299 static inline int block_group_used(struct super_block
*s
, u32 id
)
301 int bm
= bmap_hash_id(s
, id
);
302 struct reiserfs_bitmap_info
*info
= &SB_AP_BITMAP(s
)[bm
];
305 * If we don't have cached information on this bitmap block, we're
306 * going to have to load it later anyway. Loading it here allows us
307 * to make a better decision. This favors long-term performance gain
308 * with a better on-disk layout vs. a short term gain of skipping the
309 * read and potentially having a bad placement.
311 if (info
->free_count
== UINT_MAX
) {
312 struct buffer_head
*bh
= reiserfs_read_bitmap_block(s
, bm
);
316 if (info
->free_count
> ((s
->s_blocksize
<< 3) * 60 / 100)) {
323 * the packing is returned in disk byte order
325 __le32
reiserfs_choose_packing(struct inode
* dir
)
328 if (TEST_OPTION(packing_groups
, dir
->i_sb
)) {
329 u32 parent_dir
= le32_to_cpu(INODE_PKEY(dir
)->k_dir_id
);
331 * some versions of reiserfsck expect packing locality 1 to be
334 if (parent_dir
== 1 || block_group_used(dir
->i_sb
, parent_dir
))
335 packing
= INODE_PKEY(dir
)->k_objectid
;
337 packing
= INODE_PKEY(dir
)->k_dir_id
;
339 packing
= INODE_PKEY(dir
)->k_objectid
;
344 * Tries to find contiguous zero bit window (given size) in given region of
345 * bitmap and place new blocks there. Returns number of allocated blocks.
347 static int scan_bitmap(struct reiserfs_transaction_handle
*th
,
348 b_blocknr_t
* start
, b_blocknr_t finish
,
349 int min
, int max
, int unfm
, sector_t file_block
)
351 int nr_allocated
= 0;
352 struct super_block
*s
= th
->t_super
;
353 unsigned int bm
, off
;
354 unsigned int end_bm
, end_off
;
355 unsigned int off_max
= s
->s_blocksize
<< 3;
357 BUG_ON(!th
->t_trans_id
);
358 PROC_INFO_INC(s
, scan_bitmap
.call
);
360 /* No point in looking for more free blocks */
361 if (SB_FREE_BLOCKS(s
) <= 0)
364 get_bit_address(s
, *start
, &bm
, &off
);
365 get_bit_address(s
, finish
, &end_bm
, &end_off
);
366 if (bm
> reiserfs_bmap_count(s
))
368 if (end_bm
> reiserfs_bmap_count(s
))
369 end_bm
= reiserfs_bmap_count(s
);
372 * When the bitmap is more than 10% free, anyone can allocate.
373 * When it's less than 10% free, only files that already use the
374 * bitmap are allowed. Once we pass 80% full, this restriction
377 * We do this so that files that grow later still have space close to
378 * their original allocation. This improves locality, and presumably
379 * performance as a result.
381 * This is only an allocation policy and does not make up for getting a
382 * bad hint. Decent hinting must be implemented for this to work well.
384 if (TEST_OPTION(skip_busy
, s
)
385 && SB_FREE_BLOCKS(s
) > SB_BLOCK_COUNT(s
) / 20) {
386 for (; bm
< end_bm
; bm
++, off
= 0) {
387 if ((off
&& (!unfm
|| (file_block
!= 0)))
388 || SB_AP_BITMAP(s
)[bm
].free_count
>
389 (s
->s_blocksize
<< 3) / 10)
391 scan_bitmap_block(th
, bm
, &off
, off_max
,
396 /* we know from above that start is a reasonable number */
397 get_bit_address(s
, *start
, &bm
, &off
);
400 for (; bm
< end_bm
; bm
++, off
= 0) {
402 scan_bitmap_block(th
, bm
, &off
, off_max
, min
, max
, unfm
);
408 scan_bitmap_block(th
, bm
, &off
, end_off
+ 1, min
, max
, unfm
);
411 *start
= bm
* off_max
+ off
;
416 static void _reiserfs_free_block(struct reiserfs_transaction_handle
*th
,
417 struct inode
*inode
, b_blocknr_t block
,
420 struct super_block
*s
= th
->t_super
;
421 struct reiserfs_super_block
*rs
;
422 struct buffer_head
*sbh
, *bmbh
;
423 struct reiserfs_bitmap_info
*apbi
;
424 unsigned int nr
, offset
;
426 BUG_ON(!th
->t_trans_id
);
427 PROC_INFO_INC(s
, free_block
);
428 rs
= SB_DISK_SUPER_BLOCK(s
);
429 sbh
= SB_BUFFER_WITH_SB(s
);
430 apbi
= SB_AP_BITMAP(s
);
432 get_bit_address(s
, block
, &nr
, &offset
);
434 if (nr
>= reiserfs_bmap_count(s
)) {
435 reiserfs_error(s
, "vs-4075", "block %lu is out of range",
440 bmbh
= reiserfs_read_bitmap_block(s
, nr
);
444 reiserfs_prepare_for_journal(s
, bmbh
, 1);
446 /* clear bit for the given block in bit map */
447 if (!reiserfs_test_and_clear_le_bit(offset
, bmbh
->b_data
)) {
448 reiserfs_error(s
, "vs-4080",
449 "block %lu: bit already cleared", block
);
451 apbi
[nr
].free_count
++;
452 journal_mark_dirty(th
, bmbh
);
455 reiserfs_prepare_for_journal(s
, sbh
, 1);
456 /* update super block */
457 set_sb_free_blocks(rs
, sb_free_blocks(rs
) + 1);
459 journal_mark_dirty(th
, sbh
);
460 if (for_unformatted
) {
461 int depth
= reiserfs_write_unlock_nested(s
);
462 dquot_free_block_nodirty(inode
, 1);
463 reiserfs_write_lock_nested(s
, depth
);
467 void reiserfs_free_block(struct reiserfs_transaction_handle
*th
,
468 struct inode
*inode
, b_blocknr_t block
,
471 struct super_block
*s
= th
->t_super
;
473 BUG_ON(!th
->t_trans_id
);
474 RFALSE(!s
, "vs-4061: trying to free block on nonexistent device");
475 if (!is_reusable(s
, block
, 1))
478 if (block
> sb_block_count(REISERFS_SB(s
)->s_rs
)) {
479 reiserfs_error(th
->t_super
, "bitmap-4072",
480 "Trying to free block outside file system "
481 "boundaries (%lu > %lu)",
482 block
, sb_block_count(REISERFS_SB(s
)->s_rs
));
485 /* mark it before we clear it, just in case */
486 journal_mark_freed(th
, s
, block
);
487 _reiserfs_free_block(th
, inode
, block
, for_unformatted
);
490 /* preallocated blocks don't need to be run through journal_mark_freed */
491 static void reiserfs_free_prealloc_block(struct reiserfs_transaction_handle
*th
,
492 struct inode
*inode
, b_blocknr_t block
)
494 BUG_ON(!th
->t_trans_id
);
496 "vs-4060: trying to free block on nonexistent device");
497 if (!is_reusable(th
->t_super
, block
, 1))
499 _reiserfs_free_block(th
, inode
, block
, 1);
502 static void __discard_prealloc(struct reiserfs_transaction_handle
*th
,
503 struct reiserfs_inode_info
*ei
)
505 unsigned long save
= ei
->i_prealloc_block
;
507 struct inode
*inode
= &ei
->vfs_inode
;
509 BUG_ON(!th
->t_trans_id
);
510 #ifdef CONFIG_REISERFS_CHECK
511 if (ei
->i_prealloc_count
< 0)
512 reiserfs_error(th
->t_super
, "zam-4001",
513 "inode has negative prealloc blocks count.");
515 while (ei
->i_prealloc_count
> 0) {
516 reiserfs_free_prealloc_block(th
, inode
, ei
->i_prealloc_block
);
517 ei
->i_prealloc_block
++;
518 ei
->i_prealloc_count
--;
522 reiserfs_update_sd(th
, inode
);
523 ei
->i_prealloc_block
= save
;
524 list_del_init(&ei
->i_prealloc_list
);
527 /* FIXME: It should be inline function */
528 void reiserfs_discard_prealloc(struct reiserfs_transaction_handle
*th
,
531 struct reiserfs_inode_info
*ei
= REISERFS_I(inode
);
533 BUG_ON(!th
->t_trans_id
);
534 if (ei
->i_prealloc_count
)
535 __discard_prealloc(th
, ei
);
538 void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle
*th
)
540 struct list_head
*plist
= &SB_JOURNAL(th
->t_super
)->j_prealloc_list
;
542 BUG_ON(!th
->t_trans_id
);
543 while (!list_empty(plist
)) {
544 struct reiserfs_inode_info
*ei
;
545 ei
= list_entry(plist
->next
, struct reiserfs_inode_info
,
547 #ifdef CONFIG_REISERFS_CHECK
548 if (!ei
->i_prealloc_count
) {
549 reiserfs_error(th
->t_super
, "zam-4001",
550 "inode is in prealloc list but has "
551 "no preallocated blocks.");
554 __discard_prealloc(th
, ei
);
558 void reiserfs_init_alloc_options(struct super_block
*s
)
560 set_bit(_ALLOC_skip_busy
, &SB_ALLOC_OPTS(s
));
561 set_bit(_ALLOC_dirid_groups
, &SB_ALLOC_OPTS(s
));
562 set_bit(_ALLOC_packing_groups
, &SB_ALLOC_OPTS(s
));
565 /* block allocator related options are parsed here */
566 int reiserfs_parse_alloc_options(struct super_block
*s
, char *options
)
568 char *this_char
, *value
;
570 /* clear default settings */
571 REISERFS_SB(s
)->s_alloc_options
.bits
= 0;
573 while ((this_char
= strsep(&options
, ":")) != NULL
) {
574 if ((value
= strchr(this_char
, '=')) != NULL
)
577 if (!strcmp(this_char
, "concentrating_formatted_nodes")) {
579 SET_OPTION(concentrating_formatted_nodes
);
581 && *value
) ? simple_strtoul(value
, &value
,
583 if (temp
<= 0 || temp
> 100) {
584 REISERFS_SB(s
)->s_alloc_options
.border
= 10;
586 REISERFS_SB(s
)->s_alloc_options
.border
=
591 if (!strcmp(this_char
, "displacing_large_files")) {
592 SET_OPTION(displacing_large_files
);
593 REISERFS_SB(s
)->s_alloc_options
.large_file_size
=
595 && *value
) ? simple_strtoul(value
, &value
, 0) : 16;
598 if (!strcmp(this_char
, "displacing_new_packing_localities")) {
599 SET_OPTION(displacing_new_packing_localities
);
603 if (!strcmp(this_char
, "old_hashed_relocation")) {
604 SET_OPTION(old_hashed_relocation
);
608 if (!strcmp(this_char
, "new_hashed_relocation")) {
609 SET_OPTION(new_hashed_relocation
);
613 if (!strcmp(this_char
, "dirid_groups")) {
614 SET_OPTION(dirid_groups
);
617 if (!strcmp(this_char
, "oid_groups")) {
618 SET_OPTION(oid_groups
);
621 if (!strcmp(this_char
, "packing_groups")) {
622 SET_OPTION(packing_groups
);
625 if (!strcmp(this_char
, "hashed_formatted_nodes")) {
626 SET_OPTION(hashed_formatted_nodes
);
630 if (!strcmp(this_char
, "skip_busy")) {
631 SET_OPTION(skip_busy
);
635 if (!strcmp(this_char
, "hundredth_slices")) {
636 SET_OPTION(hundredth_slices
);
640 if (!strcmp(this_char
, "old_way")) {
645 if (!strcmp(this_char
, "displace_based_on_dirid")) {
646 SET_OPTION(displace_based_on_dirid
);
650 if (!strcmp(this_char
, "preallocmin")) {
651 REISERFS_SB(s
)->s_alloc_options
.preallocmin
=
653 && *value
) ? simple_strtoul(value
, &value
, 0) : 4;
657 if (!strcmp(this_char
, "preallocsize")) {
658 REISERFS_SB(s
)->s_alloc_options
.preallocsize
=
660 && *value
) ? simple_strtoul(value
, &value
,
666 reiserfs_warning(s
, "zam-4001", "unknown option - %s",
671 reiserfs_info(s
, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s
));
675 static void print_sep(struct seq_file
*seq
, int *first
)
683 void show_alloc_options(struct seq_file
*seq
, struct super_block
*s
)
687 if (SB_ALLOC_OPTS(s
) == ((1 << _ALLOC_skip_busy
) |
688 (1 << _ALLOC_dirid_groups
) | (1 << _ALLOC_packing_groups
)))
691 seq_puts(seq
, ",alloc=");
693 if (TEST_OPTION(concentrating_formatted_nodes
, s
)) {
694 print_sep(seq
, &first
);
695 if (REISERFS_SB(s
)->s_alloc_options
.border
!= 10) {
696 seq_printf(seq
, "concentrating_formatted_nodes=%d",
697 100 / REISERFS_SB(s
)->s_alloc_options
.border
);
699 seq_puts(seq
, "concentrating_formatted_nodes");
701 if (TEST_OPTION(displacing_large_files
, s
)) {
702 print_sep(seq
, &first
);
703 if (REISERFS_SB(s
)->s_alloc_options
.large_file_size
!= 16) {
704 seq_printf(seq
, "displacing_large_files=%lu",
705 REISERFS_SB(s
)->s_alloc_options
.large_file_size
);
707 seq_puts(seq
, "displacing_large_files");
709 if (TEST_OPTION(displacing_new_packing_localities
, s
)) {
710 print_sep(seq
, &first
);
711 seq_puts(seq
, "displacing_new_packing_localities");
713 if (TEST_OPTION(old_hashed_relocation
, s
)) {
714 print_sep(seq
, &first
);
715 seq_puts(seq
, "old_hashed_relocation");
717 if (TEST_OPTION(new_hashed_relocation
, s
)) {
718 print_sep(seq
, &first
);
719 seq_puts(seq
, "new_hashed_relocation");
721 if (TEST_OPTION(dirid_groups
, s
)) {
722 print_sep(seq
, &first
);
723 seq_puts(seq
, "dirid_groups");
725 if (TEST_OPTION(oid_groups
, s
)) {
726 print_sep(seq
, &first
);
727 seq_puts(seq
, "oid_groups");
729 if (TEST_OPTION(packing_groups
, s
)) {
730 print_sep(seq
, &first
);
731 seq_puts(seq
, "packing_groups");
733 if (TEST_OPTION(hashed_formatted_nodes
, s
)) {
734 print_sep(seq
, &first
);
735 seq_puts(seq
, "hashed_formatted_nodes");
737 if (TEST_OPTION(skip_busy
, s
)) {
738 print_sep(seq
, &first
);
739 seq_puts(seq
, "skip_busy");
741 if (TEST_OPTION(hundredth_slices
, s
)) {
742 print_sep(seq
, &first
);
743 seq_puts(seq
, "hundredth_slices");
745 if (TEST_OPTION(old_way
, s
)) {
746 print_sep(seq
, &first
);
747 seq_puts(seq
, "old_way");
749 if (TEST_OPTION(displace_based_on_dirid
, s
)) {
750 print_sep(seq
, &first
);
751 seq_puts(seq
, "displace_based_on_dirid");
753 if (REISERFS_SB(s
)->s_alloc_options
.preallocmin
!= 0) {
754 print_sep(seq
, &first
);
755 seq_printf(seq
, "preallocmin=%d",
756 REISERFS_SB(s
)->s_alloc_options
.preallocmin
);
758 if (REISERFS_SB(s
)->s_alloc_options
.preallocsize
!= 17) {
759 print_sep(seq
, &first
);
760 seq_printf(seq
, "preallocsize=%d",
761 REISERFS_SB(s
)->s_alloc_options
.preallocsize
);
765 static inline void new_hashed_relocation(reiserfs_blocknr_hint_t
* hint
)
769 if (hint
->formatted_node
) {
770 hash_in
= (char *)&hint
->key
.k_dir_id
;
773 /*hint->search_start = hint->beg;*/
774 hash_in
= (char *)&hint
->key
.k_dir_id
;
776 if (TEST_OPTION(displace_based_on_dirid
, hint
->th
->t_super
))
777 hash_in
= (char *)(&INODE_PKEY(hint
->inode
)->k_dir_id
);
780 (char *)(&INODE_PKEY(hint
->inode
)->k_objectid
);
784 hint
->beg
+ keyed_hash(hash_in
, 4) % (hint
->end
- hint
->beg
);
788 * Relocation based on dirid, hashing them into a given bitmap block
789 * files. Formatted nodes are unaffected, a separate policy covers them
791 static void dirid_groups(reiserfs_blocknr_hint_t
* hint
)
796 struct super_block
*sb
= hint
->th
->t_super
;
799 dirid
= le32_to_cpu(INODE_PKEY(hint
->inode
)->k_dir_id
);
800 else if (hint
->formatted_node
)
801 dirid
= hint
->key
.k_dir_id
;
804 bm
= bmap_hash_id(sb
, dirid
);
805 hash
= bm
* (sb
->s_blocksize
<< 3);
806 /* give a portion of the block group to metadata */
808 hash
+= sb
->s_blocksize
/ 2;
809 hint
->search_start
= hash
;
814 * Relocation based on oid, hashing them into a given bitmap block
815 * files. Formatted nodes are unaffected, a separate policy covers them
817 static void oid_groups(reiserfs_blocknr_hint_t
* hint
)
825 dirid
= le32_to_cpu(INODE_PKEY(hint
->inode
)->k_dir_id
);
828 * keep the root dir and it's first set of subdirs close to
829 * the start of the disk
832 hash
= (hint
->inode
->i_sb
->s_blocksize
<< 3);
834 oid
= le32_to_cpu(INODE_PKEY(hint
->inode
)->k_objectid
);
835 bm
= bmap_hash_id(hint
->inode
->i_sb
, oid
);
836 hash
= bm
* (hint
->inode
->i_sb
->s_blocksize
<< 3);
838 hint
->search_start
= hash
;
843 * returns 1 if it finds an indirect item and gets valid hint info
844 * from it, otherwise 0
846 static int get_left_neighbor(reiserfs_blocknr_hint_t
* hint
)
848 struct treepath
*path
;
849 struct buffer_head
*bh
;
850 struct item_head
*ih
;
856 * reiserfs code can call this function w/o pointer to path
857 * structure supplied; then we rely on supplied search_start
863 bh
= get_last_bh(path
);
864 RFALSE(!bh
, "green-4002: Illegal path specified to get_left_neighbor");
865 ih
= tp_item_head(path
);
866 pos_in_item
= path
->pos_in_item
;
867 item
= tp_item_body(path
);
869 hint
->search_start
= bh
->b_blocknr
;
872 * for indirect item: go to left and look for the first non-hole entry
873 * in the indirect item
875 if (!hint
->formatted_node
&& is_indirect_le_ih(ih
)) {
876 if (pos_in_item
== I_UNFM_NUM(ih
))
878 while (pos_in_item
>= 0) {
879 int t
= get_block_num(item
, pos_in_item
);
881 hint
->search_start
= t
;
889 /* does result value fit into specified region? */
894 * should be, if formatted node, then try to put on first part of the device
895 * specified as number of percent with mount option device, else try to put
896 * on last of device. This is not to say it is good code to do so,
897 * but the effect should be measured.
899 static inline void set_border_in_hint(struct super_block
*s
,
900 reiserfs_blocknr_hint_t
* hint
)
903 SB_BLOCK_COUNT(s
) / REISERFS_SB(s
)->s_alloc_options
.border
;
905 if (hint
->formatted_node
)
906 hint
->end
= border
- 1;
911 static inline void displace_large_file(reiserfs_blocknr_hint_t
* hint
)
913 if (TEST_OPTION(displace_based_on_dirid
, hint
->th
->t_super
))
916 keyed_hash((char *)(&INODE_PKEY(hint
->inode
)->k_dir_id
),
917 4) % (hint
->end
- hint
->beg
);
921 keyed_hash((char *)(&INODE_PKEY(hint
->inode
)->k_objectid
),
922 4) % (hint
->end
- hint
->beg
);
925 static inline void hash_formatted_node(reiserfs_blocknr_hint_t
* hint
)
930 hash_in
= (char *)&hint
->key
.k_dir_id
;
931 else if (TEST_OPTION(displace_based_on_dirid
, hint
->th
->t_super
))
932 hash_in
= (char *)(&INODE_PKEY(hint
->inode
)->k_dir_id
);
934 hash_in
= (char *)(&INODE_PKEY(hint
->inode
)->k_objectid
);
937 hint
->beg
+ keyed_hash(hash_in
, 4) % (hint
->end
- hint
->beg
);
941 this_blocknr_allocation_would_make_it_a_large_file(reiserfs_blocknr_hint_t
*
944 return hint
->block
==
945 REISERFS_SB(hint
->th
->t_super
)->s_alloc_options
.large_file_size
;
948 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
949 static inline void displace_new_packing_locality(reiserfs_blocknr_hint_t
* hint
)
951 struct in_core_key
*key
= &hint
->key
;
953 hint
->th
->displace_new_blocks
= 0;
955 hint
->beg
+ keyed_hash((char *)(&key
->k_objectid
),
956 4) % (hint
->end
- hint
->beg
);
960 static inline int old_hashed_relocation(reiserfs_blocknr_hint_t
* hint
)
965 if (hint
->formatted_node
|| hint
->inode
== NULL
) {
969 hash_in
= le32_to_cpu((INODE_PKEY(hint
->inode
))->k_dir_id
);
971 hint
->beg
+ (u32
) keyed_hash(((char *)(&hash_in
)),
972 4) % (hint
->end
- hint
->beg
- 1);
973 if (border
> hint
->search_start
)
974 hint
->search_start
= border
;
979 static inline int old_way(reiserfs_blocknr_hint_t
* hint
)
983 if (hint
->formatted_node
|| hint
->inode
== NULL
) {
989 le32_to_cpu(INODE_PKEY(hint
->inode
)->k_dir_id
) % (hint
->end
-
991 if (border
> hint
->search_start
)
992 hint
->search_start
= border
;
997 static inline void hundredth_slices(reiserfs_blocknr_hint_t
* hint
)
999 struct in_core_key
*key
= &hint
->key
;
1000 b_blocknr_t slice_start
;
1003 (keyed_hash((char *)(&key
->k_dir_id
), 4) % 100) * (hint
->end
/ 100);
1004 if (slice_start
> hint
->search_start
1005 || slice_start
+ (hint
->end
/ 100) <= hint
->search_start
) {
1006 hint
->search_start
= slice_start
;
1010 static void determine_search_start(reiserfs_blocknr_hint_t
* hint
,
1013 struct super_block
*s
= hint
->th
->t_super
;
1017 hint
->end
= SB_BLOCK_COUNT(s
) - 1;
1019 /* This is former border algorithm. Now with tunable border offset */
1020 if (concentrating_formatted_nodes(s
))
1021 set_border_in_hint(s
, hint
);
1023 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
1025 * whenever we create a new directory, we displace it. At first
1026 * we will hash for location, later we might look for a moderately
1027 * empty place for it
1029 if (displacing_new_packing_localities(s
)
1030 && hint
->th
->displace_new_blocks
) {
1031 displace_new_packing_locality(hint
);
1034 * we do not continue determine_search_start,
1035 * if new packing locality is being displaced
1042 * all persons should feel encouraged to add more special cases
1043 * here and test them
1046 if (displacing_large_files(s
) && !hint
->formatted_node
1047 && this_blocknr_allocation_would_make_it_a_large_file(hint
)) {
1048 displace_large_file(hint
);
1053 * if none of our special cases is relevant, use the left
1054 * neighbor in the tree order of the new node we are allocating for
1056 if (hint
->formatted_node
&& TEST_OPTION(hashed_formatted_nodes
, s
)) {
1057 hash_formatted_node(hint
);
1061 unfm_hint
= get_left_neighbor(hint
);
1064 * Mimic old block allocator behaviour, that is if VFS allowed for
1065 * preallocation, new blocks are displaced based on directory ID.
1066 * Also, if suggested search_start is less than last preallocated
1067 * block, we start searching from it, assuming that HDD dataflow
1068 * is faster in forward direction
1070 if (TEST_OPTION(old_way
, s
)) {
1071 if (!hint
->formatted_node
) {
1072 if (!reiserfs_hashed_relocation(s
))
1074 else if (!reiserfs_no_unhashed_relocation(s
))
1075 old_hashed_relocation(hint
);
1078 && hint
->search_start
<
1079 REISERFS_I(hint
->inode
)->i_prealloc_block
)
1080 hint
->search_start
=
1081 REISERFS_I(hint
->inode
)->i_prealloc_block
;
1086 /* This is an approach proposed by Hans */
1087 if (TEST_OPTION(hundredth_slices
, s
)
1088 && !(displacing_large_files(s
) && !hint
->formatted_node
)) {
1089 hundredth_slices(hint
);
1093 /* old_hashed_relocation only works on unformatted */
1094 if (!unfm_hint
&& !hint
->formatted_node
&&
1095 TEST_OPTION(old_hashed_relocation
, s
)) {
1096 old_hashed_relocation(hint
);
1099 /* new_hashed_relocation works with both formatted/unformatted nodes */
1100 if ((!unfm_hint
|| hint
->formatted_node
) &&
1101 TEST_OPTION(new_hashed_relocation
, s
)) {
1102 new_hashed_relocation(hint
);
1105 /* dirid grouping works only on unformatted nodes */
1106 if (!unfm_hint
&& !hint
->formatted_node
&& TEST_OPTION(dirid_groups
, s
)) {
1109 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
1110 if (hint
->formatted_node
&& TEST_OPTION(dirid_groups
, s
)) {
1115 /* oid grouping works only on unformatted nodes */
1116 if (!unfm_hint
&& !hint
->formatted_node
&& TEST_OPTION(oid_groups
, s
)) {
1122 static int determine_prealloc_size(reiserfs_blocknr_hint_t
* hint
)
1124 /* make minimum size a mount option and benchmark both ways */
1125 /* we preallocate blocks only for regular files, specific size */
1126 /* benchmark preallocating always and see what happens */
1128 hint
->prealloc_size
= 0;
1130 if (!hint
->formatted_node
&& hint
->preallocate
) {
1131 if (S_ISREG(hint
->inode
->i_mode
)
1132 && hint
->inode
->i_size
>=
1133 REISERFS_SB(hint
->th
->t_super
)->s_alloc_options
.
1134 preallocmin
* hint
->inode
->i_sb
->s_blocksize
)
1135 hint
->prealloc_size
=
1136 REISERFS_SB(hint
->th
->t_super
)->s_alloc_options
.
1142 static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t
* hint
,
1143 b_blocknr_t
* new_blocknrs
,
1145 b_blocknr_t finish
, int min
,
1149 int rest
= amount_needed
;
1152 while (rest
> 0 && start
<= finish
) {
1153 nr_allocated
= scan_bitmap(hint
->th
, &start
, finish
, min
,
1154 rest
+ prealloc_size
,
1155 !hint
->formatted_node
, hint
->block
);
1157 if (nr_allocated
== 0) /* no new blocks allocated, return */
1160 /* fill free_blocknrs array first */
1161 while (rest
> 0 && nr_allocated
> 0) {
1162 *new_blocknrs
++ = start
++;
1167 /* do we have something to fill prealloc. array also ? */
1168 if (nr_allocated
> 0) {
1170 * it means prealloc_size was greater that 0 and
1171 * we do preallocation
1173 list_add(&REISERFS_I(hint
->inode
)->i_prealloc_list
,
1174 &SB_JOURNAL(hint
->th
->t_super
)->
1176 REISERFS_I(hint
->inode
)->i_prealloc_block
= start
;
1177 REISERFS_I(hint
->inode
)->i_prealloc_count
=
1183 return (amount_needed
- rest
);
1186 static inline int blocknrs_and_prealloc_arrays_from_search_start
1187 (reiserfs_blocknr_hint_t
* hint
, b_blocknr_t
* new_blocknrs
,
1188 int amount_needed
) {
1189 struct super_block
*s
= hint
->th
->t_super
;
1190 b_blocknr_t start
= hint
->search_start
;
1191 b_blocknr_t finish
= SB_BLOCK_COUNT(s
) - 1;
1193 int nr_allocated
= 0;
1196 determine_prealloc_size(hint
);
1197 if (!hint
->formatted_node
) {
1199 #ifdef REISERQUOTA_DEBUG
1200 reiserfs_debug(s
, REISERFS_DEBUG_CODE
,
1201 "reiserquota: allocating %d blocks id=%u",
1202 amount_needed
, hint
->inode
->i_uid
);
1204 depth
= reiserfs_write_unlock_nested(s
);
1206 dquot_alloc_block_nodirty(hint
->inode
, amount_needed
);
1207 if (quota_ret
) { /* Quota exceeded? */
1208 reiserfs_write_lock_nested(s
, depth
);
1209 return QUOTA_EXCEEDED
;
1211 if (hint
->preallocate
&& hint
->prealloc_size
) {
1212 #ifdef REISERQUOTA_DEBUG
1213 reiserfs_debug(s
, REISERFS_DEBUG_CODE
,
1214 "reiserquota: allocating (prealloc) %d blocks id=%u",
1215 hint
->prealloc_size
, hint
->inode
->i_uid
);
1217 quota_ret
= dquot_prealloc_block_nodirty(hint
->inode
,
1218 hint
->prealloc_size
);
1220 hint
->preallocate
= hint
->prealloc_size
= 0;
1222 /* for unformatted nodes, force large allocations */
1223 reiserfs_write_lock_nested(s
, depth
);
1228 case 0: /* Search from hint->search_start to end of disk */
1229 start
= hint
->search_start
;
1230 finish
= SB_BLOCK_COUNT(s
) - 1;
1232 case 1: /* Search from hint->beg to hint->search_start */
1234 finish
= hint
->search_start
;
1236 case 2: /* Last chance: Search from 0 to hint->beg */
1241 /* We've tried searching everywhere, not enough space */
1242 /* Free the blocks */
1243 if (!hint
->formatted_node
) {
1244 #ifdef REISERQUOTA_DEBUG
1245 reiserfs_debug(s
, REISERFS_DEBUG_CODE
,
1246 "reiserquota: freeing (nospace) %d blocks id=%u",
1248 hint
->prealloc_size
-
1250 hint
->inode
->i_uid
);
1252 /* Free not allocated blocks */
1253 depth
= reiserfs_write_unlock_nested(s
);
1254 dquot_free_block_nodirty(hint
->inode
,
1255 amount_needed
+ hint
->prealloc_size
-
1257 reiserfs_write_lock_nested(s
, depth
);
1259 while (nr_allocated
--)
1260 reiserfs_free_block(hint
->th
, hint
->inode
,
1261 new_blocknrs
[nr_allocated
],
1262 !hint
->formatted_node
);
1264 return NO_DISK_SPACE
;
1266 } while ((nr_allocated
+= allocate_without_wrapping_disk(hint
,
1276 if (!hint
->formatted_node
&&
1277 amount_needed
+ hint
->prealloc_size
>
1278 nr_allocated
+ REISERFS_I(hint
->inode
)->i_prealloc_count
) {
1279 /* Some of preallocation blocks were not allocated */
1280 #ifdef REISERQUOTA_DEBUG
1281 reiserfs_debug(s
, REISERFS_DEBUG_CODE
,
1282 "reiserquota: freeing (failed prealloc) %d blocks id=%u",
1283 amount_needed
+ hint
->prealloc_size
-
1285 REISERFS_I(hint
->inode
)->i_prealloc_count
,
1286 hint
->inode
->i_uid
);
1289 depth
= reiserfs_write_unlock_nested(s
);
1290 dquot_free_block_nodirty(hint
->inode
, amount_needed
+
1291 hint
->prealloc_size
- nr_allocated
-
1292 REISERFS_I(hint
->inode
)->
1294 reiserfs_write_lock_nested(s
, depth
);
1300 /* grab new blocknrs from preallocated list */
1301 /* return amount still needed after using them */
1302 static int use_preallocated_list_if_available(reiserfs_blocknr_hint_t
* hint
,
1303 b_blocknr_t
* new_blocknrs
,
1306 struct inode
*inode
= hint
->inode
;
1308 if (REISERFS_I(inode
)->i_prealloc_count
> 0) {
1309 while (amount_needed
) {
1311 *new_blocknrs
++ = REISERFS_I(inode
)->i_prealloc_block
++;
1312 REISERFS_I(inode
)->i_prealloc_count
--;
1316 if (REISERFS_I(inode
)->i_prealloc_count
<= 0) {
1317 list_del(&REISERFS_I(inode
)->i_prealloc_list
);
1322 /* return amount still needed after using preallocated blocks */
1323 return amount_needed
;
1326 int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t
*hint
,
1327 b_blocknr_t
*new_blocknrs
,
1329 /* Amount of blocks we have already reserved */
1332 int initial_amount_needed
= amount_needed
;
1334 struct super_block
*s
= hint
->th
->t_super
;
1336 /* Check if there is enough space, taking into account reserved space */
1337 if (SB_FREE_BLOCKS(s
) - REISERFS_SB(s
)->reserved_blocks
<
1338 amount_needed
- reserved_by_us
)
1339 return NO_DISK_SPACE
;
1340 /* should this be if !hint->inode && hint->preallocate? */
1341 /* do you mean hint->formatted_node can be removed ? - Zam */
1343 * hint->formatted_node cannot be removed because we try to access
1344 * inode information here, and there is often no inode associated with
1345 * metadata allocations - green
1348 if (!hint
->formatted_node
&& hint
->preallocate
) {
1349 amount_needed
= use_preallocated_list_if_available
1350 (hint
, new_blocknrs
, amount_needed
);
1353 * We have all the block numbers we need from the
1356 if (amount_needed
== 0)
1358 new_blocknrs
+= (initial_amount_needed
- amount_needed
);
1361 /* find search start and save it in hint structure */
1362 determine_search_start(hint
, amount_needed
);
1363 if (hint
->search_start
>= SB_BLOCK_COUNT(s
))
1364 hint
->search_start
= SB_BLOCK_COUNT(s
) - 1;
1366 /* allocation itself; fill new_blocknrs and preallocation arrays */
1367 ret
= blocknrs_and_prealloc_arrays_from_search_start
1368 (hint
, new_blocknrs
, amount_needed
);
1371 * We used prealloc. list to fill (partially) new_blocknrs array.
1372 * If final allocation fails we need to return blocks back to
1373 * prealloc. list or just free them. -- Zam (I chose second
1376 if (ret
!= CARRY_ON
) {
1377 while (amount_needed
++ < initial_amount_needed
) {
1378 reiserfs_free_block(hint
->th
, hint
->inode
,
1379 *(--new_blocknrs
), 1);
1385 void reiserfs_cache_bitmap_metadata(struct super_block
*sb
,
1386 struct buffer_head
*bh
,
1387 struct reiserfs_bitmap_info
*info
)
1389 unsigned long *cur
= (unsigned long *)(bh
->b_data
+ bh
->b_size
);
1391 /* The first bit must ALWAYS be 1 */
1392 if (!reiserfs_test_le_bit(0, (unsigned long *)bh
->b_data
))
1393 reiserfs_error(sb
, "reiserfs-2025", "bitmap block %lu is "
1394 "corrupted: first bit must be 1", bh
->b_blocknr
);
1396 info
->free_count
= 0;
1398 while (--cur
>= (unsigned long *)bh
->b_data
) {
1399 /* 0 and ~0 are special, we can optimize for them */
1401 info
->free_count
+= BITS_PER_LONG
;
1402 else if (*cur
!= ~0L) /* A mix, investigate */
1403 info
->free_count
+= BITS_PER_LONG
- hweight_long(*cur
);
1407 struct buffer_head
*reiserfs_read_bitmap_block(struct super_block
*sb
,
1408 unsigned int bitmap
)
1410 b_blocknr_t block
= (sb
->s_blocksize
<< 3) * bitmap
;
1411 struct reiserfs_bitmap_info
*info
= SB_AP_BITMAP(sb
) + bitmap
;
1412 struct buffer_head
*bh
;
1415 * Way old format filesystems had the bitmaps packed up front.
1416 * I doubt there are any of these left, but just in case...
1418 if (unlikely(test_bit(REISERFS_OLD_FORMAT
,
1419 &REISERFS_SB(sb
)->s_properties
)))
1420 block
= REISERFS_SB(sb
)->s_sbh
->b_blocknr
+ 1 + bitmap
;
1421 else if (bitmap
== 0)
1422 block
= (REISERFS_DISK_OFFSET_IN_BYTES
>> sb
->s_blocksize_bits
) + 1;
1424 bh
= sb_bread(sb
, block
);
1426 reiserfs_warning(sb
, "sh-2029: %s: bitmap block (#%u) "
1427 "reading failed", __func__
, block
);
1429 if (buffer_locked(bh
)) {
1431 PROC_INFO_INC(sb
, scan_bitmap
.wait
);
1432 depth
= reiserfs_write_unlock_nested(sb
);
1433 __wait_on_buffer(bh
);
1434 reiserfs_write_lock_nested(sb
, depth
);
1436 BUG_ON(!buffer_uptodate(bh
));
1437 BUG_ON(atomic_read(&bh
->b_count
) == 0);
1439 if (info
->free_count
== UINT_MAX
)
1440 reiserfs_cache_bitmap_metadata(sb
, bh
, info
);
1446 int reiserfs_init_bitmap_cache(struct super_block
*sb
)
1448 struct reiserfs_bitmap_info
*bitmap
;
1449 unsigned int bmap_nr
= reiserfs_bmap_count(sb
);
1451 bitmap
= vmalloc(sizeof(*bitmap
) * bmap_nr
);
1455 memset(bitmap
, 0xff, sizeof(*bitmap
) * bmap_nr
);
1457 SB_AP_BITMAP(sb
) = bitmap
;
1462 void reiserfs_free_bitmap_cache(struct super_block
*sb
)
1464 if (SB_AP_BITMAP(sb
)) {
1465 vfree(SB_AP_BITMAP(sb
));
1466 SB_AP_BITMAP(sb
) = NULL
;