2 * linux/fs/ext4/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/time.h>
15 #include <linux/capability.h>
17 #include <linux/jbd2.h>
18 #include <linux/quotaops.h>
19 #include <linux/buffer_head.h>
21 #include "ext4_jbd2.h"
24 #include <trace/events/ext4.h>
27 * balloc.c contains the blocks allocation and deallocation routines
31 * Calculate the block group number and offset, given a block number
33 void ext4_get_group_no_and_offset(struct super_block
*sb
, ext4_fsblk_t blocknr
,
34 ext4_group_t
*blockgrpp
, ext4_grpblk_t
*offsetp
)
36 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
39 blocknr
= blocknr
- le32_to_cpu(es
->s_first_data_block
);
40 offset
= do_div(blocknr
, EXT4_BLOCKS_PER_GROUP(sb
));
48 static int ext4_block_in_group(struct super_block
*sb
, ext4_fsblk_t block
,
49 ext4_group_t block_group
)
51 ext4_group_t actual_group
;
52 ext4_get_group_no_and_offset(sb
, block
, &actual_group
, NULL
);
53 if (actual_group
== block_group
)
58 static int ext4_group_used_meta_blocks(struct super_block
*sb
,
59 ext4_group_t block_group
,
60 struct ext4_group_desc
*gdp
)
63 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
64 /* block bitmap, inode bitmap, and inode table blocks */
65 int used_blocks
= sbi
->s_itb_per_group
+ 2;
67 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_FLEX_BG
)) {
68 if (!ext4_block_in_group(sb
, ext4_block_bitmap(sb
, gdp
),
72 if (!ext4_block_in_group(sb
, ext4_inode_bitmap(sb
, gdp
),
76 tmp
= ext4_inode_table(sb
, gdp
);
77 for (; tmp
< ext4_inode_table(sb
, gdp
) +
78 sbi
->s_itb_per_group
; tmp
++) {
79 if (!ext4_block_in_group(sb
, tmp
, block_group
))
86 /* Initializes an uninitialized block bitmap if given, and returns the
87 * number of blocks free in the group. */
88 unsigned ext4_init_block_bitmap(struct super_block
*sb
, struct buffer_head
*bh
,
89 ext4_group_t block_group
, struct ext4_group_desc
*gdp
)
92 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
93 unsigned free_blocks
, group_blocks
;
94 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
97 J_ASSERT_BH(bh
, buffer_locked(bh
));
99 /* If checksum is bad mark all blocks used to prevent allocation
100 * essentially implementing a per-group read-only flag. */
101 if (!ext4_group_desc_csum_verify(sbi
, block_group
, gdp
)) {
102 ext4_error(sb
, "Checksum bad for group %u",
104 ext4_free_blks_set(sb
, gdp
, 0);
105 ext4_free_inodes_set(sb
, gdp
, 0);
106 ext4_itable_unused_set(sb
, gdp
, 0);
107 memset(bh
->b_data
, 0xff, sb
->s_blocksize
);
110 memset(bh
->b_data
, 0, sb
->s_blocksize
);
113 /* Check for superblock and gdt backups in this group */
114 bit_max
= ext4_bg_has_super(sb
, block_group
);
116 if (!EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_META_BG
) ||
117 block_group
< le32_to_cpu(sbi
->s_es
->s_first_meta_bg
) *
118 sbi
->s_desc_per_block
) {
120 bit_max
+= ext4_bg_num_gdb(sb
, block_group
);
122 le16_to_cpu(sbi
->s_es
->s_reserved_gdt_blocks
);
124 } else { /* For META_BG_BLOCK_GROUPS */
125 bit_max
+= ext4_bg_num_gdb(sb
, block_group
);
128 if (block_group
== ngroups
- 1) {
130 * Even though mke2fs always initialize first and last group
131 * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
132 * to make sure we calculate the right free blocks
134 group_blocks
= ext4_blocks_count(sbi
->s_es
) -
135 ext4_group_first_block_no(sb
, ngroups
- 1);
137 group_blocks
= EXT4_BLOCKS_PER_GROUP(sb
);
140 free_blocks
= group_blocks
- bit_max
;
143 ext4_fsblk_t start
, tmp
;
146 for (bit
= 0; bit
< bit_max
; bit
++)
147 ext4_set_bit(bit
, bh
->b_data
);
149 start
= ext4_group_first_block_no(sb
, block_group
);
151 if (EXT4_HAS_INCOMPAT_FEATURE(sb
,
152 EXT4_FEATURE_INCOMPAT_FLEX_BG
))
155 /* Set bits for block and inode bitmaps, and inode table */
156 tmp
= ext4_block_bitmap(sb
, gdp
);
157 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
158 ext4_set_bit(tmp
- start
, bh
->b_data
);
160 tmp
= ext4_inode_bitmap(sb
, gdp
);
161 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
162 ext4_set_bit(tmp
- start
, bh
->b_data
);
164 tmp
= ext4_inode_table(sb
, gdp
);
165 for (; tmp
< ext4_inode_table(sb
, gdp
) +
166 sbi
->s_itb_per_group
; tmp
++) {
168 ext4_block_in_group(sb
, tmp
, block_group
))
169 ext4_set_bit(tmp
- start
, bh
->b_data
);
172 * Also if the number of blocks within the group is
173 * less than the blocksize * 8 ( which is the size
174 * of bitmap ), set rest of the block bitmap to 1
176 ext4_mark_bitmap_end(group_blocks
, sb
->s_blocksize
* 8,
179 return free_blocks
- ext4_group_used_meta_blocks(sb
, block_group
, gdp
);
184 * The free blocks are managed by bitmaps. A file system contains several
185 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
186 * block for inodes, N blocks for the inode table and data blocks.
188 * The file system contains group descriptors which are located after the
189 * super block. Each descriptor contains the number of the bitmap block and
190 * the free blocks count in the block. The descriptors are loaded in memory
191 * when a file system is mounted (see ext4_fill_super).
195 * ext4_get_group_desc() -- load group descriptor from disk
197 * @block_group: given block group
198 * @bh: pointer to the buffer head to store the block
201 struct ext4_group_desc
* ext4_get_group_desc(struct super_block
*sb
,
202 ext4_group_t block_group
,
203 struct buffer_head
**bh
)
205 unsigned int group_desc
;
207 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
208 struct ext4_group_desc
*desc
;
209 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
211 if (block_group
>= ngroups
) {
212 ext4_error(sb
, "block_group >= groups_count - block_group = %u,"
213 " groups_count = %u", block_group
, ngroups
);
218 group_desc
= block_group
>> EXT4_DESC_PER_BLOCK_BITS(sb
);
219 offset
= block_group
& (EXT4_DESC_PER_BLOCK(sb
) - 1);
220 if (!sbi
->s_group_desc
[group_desc
]) {
221 ext4_error(sb
, "Group descriptor not loaded - "
222 "block_group = %u, group_desc = %u, desc = %u",
223 block_group
, group_desc
, offset
);
227 desc
= (struct ext4_group_desc
*)(
228 (__u8
*)sbi
->s_group_desc
[group_desc
]->b_data
+
229 offset
* EXT4_DESC_SIZE(sb
));
231 *bh
= sbi
->s_group_desc
[group_desc
];
235 static int ext4_valid_block_bitmap(struct super_block
*sb
,
236 struct ext4_group_desc
*desc
,
237 unsigned int block_group
,
238 struct buffer_head
*bh
)
240 ext4_grpblk_t offset
;
241 ext4_grpblk_t next_zero_bit
;
242 ext4_fsblk_t bitmap_blk
;
243 ext4_fsblk_t group_first_block
;
245 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_FLEX_BG
)) {
246 /* with FLEX_BG, the inode/block bitmaps and itable
247 * blocks may not be in the group at all
248 * so the bitmap validation will be skipped for those groups
249 * or it has to also read the block group where the bitmaps
250 * are located to verify they are set.
254 group_first_block
= ext4_group_first_block_no(sb
, block_group
);
256 /* check whether block bitmap block number is set */
257 bitmap_blk
= ext4_block_bitmap(sb
, desc
);
258 offset
= bitmap_blk
- group_first_block
;
259 if (!ext4_test_bit(offset
, bh
->b_data
))
260 /* bad block bitmap */
263 /* check whether the inode bitmap block number is set */
264 bitmap_blk
= ext4_inode_bitmap(sb
, desc
);
265 offset
= bitmap_blk
- group_first_block
;
266 if (!ext4_test_bit(offset
, bh
->b_data
))
267 /* bad block bitmap */
270 /* check whether the inode table block number is set */
271 bitmap_blk
= ext4_inode_table(sb
, desc
);
272 offset
= bitmap_blk
- group_first_block
;
273 next_zero_bit
= ext4_find_next_zero_bit(bh
->b_data
,
274 offset
+ EXT4_SB(sb
)->s_itb_per_group
,
276 if (next_zero_bit
>= offset
+ EXT4_SB(sb
)->s_itb_per_group
)
277 /* good bitmap for inode tables */
281 ext4_error(sb
, "Invalid block bitmap - block_group = %d, block = %llu",
282 block_group
, bitmap_blk
);
286 * ext4_read_block_bitmap()
288 * @block_group: given block group
290 * Read the bitmap for a given block_group,and validate the
291 * bits for block/inode/inode tables are set in the bitmaps
293 * Return buffer_head on success or NULL in case of failure.
296 ext4_read_block_bitmap(struct super_block
*sb
, ext4_group_t block_group
)
298 struct ext4_group_desc
*desc
;
299 struct buffer_head
*bh
= NULL
;
300 ext4_fsblk_t bitmap_blk
;
302 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
305 bitmap_blk
= ext4_block_bitmap(sb
, desc
);
306 bh
= sb_getblk(sb
, bitmap_blk
);
308 ext4_error(sb
, "Cannot read block bitmap - "
309 "block_group = %u, block_bitmap = %llu",
310 block_group
, bitmap_blk
);
314 if (bitmap_uptodate(bh
))
318 if (bitmap_uptodate(bh
)) {
322 ext4_lock_group(sb
, block_group
);
323 if (desc
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)) {
324 ext4_init_block_bitmap(sb
, bh
, block_group
, desc
);
325 set_bitmap_uptodate(bh
);
326 set_buffer_uptodate(bh
);
327 ext4_unlock_group(sb
, block_group
);
331 ext4_unlock_group(sb
, block_group
);
332 if (buffer_uptodate(bh
)) {
334 * if not uninit if bh is uptodate,
335 * bitmap is also uptodate
337 set_bitmap_uptodate(bh
);
342 * submit the buffer_head for read. We can
343 * safely mark the bitmap as uptodate now.
344 * We do it here so the bitmap uptodate bit
345 * get set with buffer lock held.
347 trace_ext4_read_block_bitmap_load(sb
, block_group
);
348 set_bitmap_uptodate(bh
);
349 if (bh_submit_read(bh
) < 0) {
351 ext4_error(sb
, "Cannot read block bitmap - "
352 "block_group = %u, block_bitmap = %llu",
353 block_group
, bitmap_blk
);
356 ext4_valid_block_bitmap(sb
, desc
, block_group
, bh
);
358 * file system mounted not to panic on error,
359 * continue with corrupt bitmap
365 * ext4_has_free_blocks()
366 * @sbi: in-core super block structure.
367 * @nblocks: number of needed blocks
369 * Check if filesystem has nblocks free & available for allocation.
370 * On success return 1, return 0 on failure.
372 static int ext4_has_free_blocks(struct ext4_sb_info
*sbi
,
373 s64 nblocks
, unsigned int flags
)
375 s64 free_blocks
, dirty_blocks
, root_blocks
;
376 struct percpu_counter
*fbc
= &sbi
->s_freeblocks_counter
;
377 struct percpu_counter
*dbc
= &sbi
->s_dirtyblocks_counter
;
379 free_blocks
= percpu_counter_read_positive(fbc
);
380 dirty_blocks
= percpu_counter_read_positive(dbc
);
381 root_blocks
= ext4_r_blocks_count(sbi
->s_es
);
383 if (free_blocks
- (nblocks
+ root_blocks
+ dirty_blocks
) <
384 EXT4_FREEBLOCKS_WATERMARK
) {
385 free_blocks
= percpu_counter_sum_positive(fbc
);
386 dirty_blocks
= percpu_counter_sum_positive(dbc
);
388 /* Check whether we have space after
389 * accounting for current dirty blocks & root reserved blocks.
391 if (free_blocks
>= ((root_blocks
+ nblocks
) + dirty_blocks
))
394 /* Hm, nope. Are (enough) root reserved blocks available? */
395 if (sbi
->s_resuid
== current_fsuid() ||
396 ((sbi
->s_resgid
!= 0) && in_group_p(sbi
->s_resgid
)) ||
397 capable(CAP_SYS_RESOURCE
) ||
398 (flags
& EXT4_MB_USE_ROOT_BLOCKS
)) {
400 if (free_blocks
>= (nblocks
+ dirty_blocks
))
407 int ext4_claim_free_blocks(struct ext4_sb_info
*sbi
,
408 s64 nblocks
, unsigned int flags
)
410 if (ext4_has_free_blocks(sbi
, nblocks
, flags
)) {
411 percpu_counter_add(&sbi
->s_dirtyblocks_counter
, nblocks
);
418 * ext4_should_retry_alloc()
420 * @retries number of attemps has been made
422 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
423 * it is profitable to retry the operation, this function will wait
424 * for the current or committing transaction to complete, and then
427 * if the total number of retries exceed three times, return FALSE.
429 int ext4_should_retry_alloc(struct super_block
*sb
, int *retries
)
431 if (!ext4_has_free_blocks(EXT4_SB(sb
), 1, 0) ||
433 !EXT4_SB(sb
)->s_journal
)
436 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb
->s_id
);
438 return jbd2_journal_force_commit_nested(EXT4_SB(sb
)->s_journal
);
442 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
444 * @handle: handle to this transaction
446 * @goal: given target block(filesystem wide)
447 * @count: pointer to total number of blocks needed
450 * Return 1st allocated block number on success, *count stores total account
451 * error stores in errp pointer
453 ext4_fsblk_t
ext4_new_meta_blocks(handle_t
*handle
, struct inode
*inode
,
454 ext4_fsblk_t goal
, unsigned int flags
,
455 unsigned long *count
, int *errp
)
457 struct ext4_allocation_request ar
;
460 memset(&ar
, 0, sizeof(ar
));
461 /* Fill with neighbour allocated blocks */
464 ar
.len
= count
? *count
: 1;
467 ret
= ext4_mb_new_blocks(handle
, &ar
, errp
);
471 * Account for the allocated meta blocks. We will never
472 * fail EDQUOT for metdata, but we do account for it.
475 ext4_test_inode_state(inode
, EXT4_STATE_DELALLOC_RESERVED
)) {
476 spin_lock(&EXT4_I(inode
)->i_block_reservation_lock
);
477 EXT4_I(inode
)->i_allocated_meta_blocks
+= ar
.len
;
478 spin_unlock(&EXT4_I(inode
)->i_block_reservation_lock
);
479 dquot_alloc_block_nofail(inode
, ar
.len
);
485 * ext4_count_free_blocks() -- count filesystem free blocks
488 * Adds up the number of free blocks from each block group.
490 ext4_fsblk_t
ext4_count_free_blocks(struct super_block
*sb
)
492 ext4_fsblk_t desc_count
;
493 struct ext4_group_desc
*gdp
;
495 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
497 struct ext4_super_block
*es
;
498 ext4_fsblk_t bitmap_count
;
500 struct buffer_head
*bitmap_bh
= NULL
;
502 es
= EXT4_SB(sb
)->s_es
;
507 for (i
= 0; i
< ngroups
; i
++) {
508 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
511 desc_count
+= ext4_free_blks_count(sb
, gdp
);
513 bitmap_bh
= ext4_read_block_bitmap(sb
, i
);
514 if (bitmap_bh
== NULL
)
517 x
= ext4_count_free(bitmap_bh
, sb
->s_blocksize
);
518 printk(KERN_DEBUG
"group %u: stored = %d, counted = %u\n",
519 i
, ext4_free_blks_count(sb
, gdp
), x
);
523 printk(KERN_DEBUG
"ext4_count_free_blocks: stored = %llu"
524 ", computed = %llu, %llu\n", ext4_free_blocks_count(es
),
525 desc_count
, bitmap_count
);
529 for (i
= 0; i
< ngroups
; i
++) {
530 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
533 desc_count
+= ext4_free_blks_count(sb
, gdp
);
540 static inline int test_root(ext4_group_t a
, int b
)
549 static int ext4_group_sparse(ext4_group_t group
)
555 return (test_root(group
, 7) || test_root(group
, 5) ||
556 test_root(group
, 3));
560 * ext4_bg_has_super - number of blocks used by the superblock in group
561 * @sb: superblock for filesystem
562 * @group: group number to check
564 * Return the number of blocks used by the superblock (primary or backup)
565 * in this group. Currently this will be only 0 or 1.
567 int ext4_bg_has_super(struct super_block
*sb
, ext4_group_t group
)
569 if (EXT4_HAS_RO_COMPAT_FEATURE(sb
,
570 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER
) &&
571 !ext4_group_sparse(group
))
576 static unsigned long ext4_bg_num_gdb_meta(struct super_block
*sb
,
579 unsigned long metagroup
= group
/ EXT4_DESC_PER_BLOCK(sb
);
580 ext4_group_t first
= metagroup
* EXT4_DESC_PER_BLOCK(sb
);
581 ext4_group_t last
= first
+ EXT4_DESC_PER_BLOCK(sb
) - 1;
583 if (group
== first
|| group
== first
+ 1 || group
== last
)
588 static unsigned long ext4_bg_num_gdb_nometa(struct super_block
*sb
,
591 if (!ext4_bg_has_super(sb
, group
))
594 if (EXT4_HAS_INCOMPAT_FEATURE(sb
,EXT4_FEATURE_INCOMPAT_META_BG
))
595 return le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_meta_bg
);
597 return EXT4_SB(sb
)->s_gdb_count
;
601 * ext4_bg_num_gdb - number of blocks used by the group table in group
602 * @sb: superblock for filesystem
603 * @group: group number to check
605 * Return the number of blocks used by the group descriptor table
606 * (primary or backup) in this group. In the future there may be a
607 * different number of descriptor blocks in each group.
609 unsigned long ext4_bg_num_gdb(struct super_block
*sb
, ext4_group_t group
)
611 unsigned long first_meta_bg
=
612 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_meta_bg
);
613 unsigned long metagroup
= group
/ EXT4_DESC_PER_BLOCK(sb
);
615 if (!EXT4_HAS_INCOMPAT_FEATURE(sb
,EXT4_FEATURE_INCOMPAT_META_BG
) ||
616 metagroup
< first_meta_bg
)
617 return ext4_bg_num_gdb_nometa(sb
, group
);
619 return ext4_bg_num_gdb_meta(sb
,group
);
624 * ext4_inode_to_goal_block - return a hint for block allocation
625 * @inode: inode for block allocation
627 * Return the ideal location to start allocating blocks for a
628 * newly created inode.
630 ext4_fsblk_t
ext4_inode_to_goal_block(struct inode
*inode
)
632 struct ext4_inode_info
*ei
= EXT4_I(inode
);
633 ext4_group_t block_group
;
634 ext4_grpblk_t colour
;
635 int flex_size
= ext4_flex_bg_size(EXT4_SB(inode
->i_sb
));
636 ext4_fsblk_t bg_start
;
637 ext4_fsblk_t last_block
;
639 block_group
= ei
->i_block_group
;
640 if (flex_size
>= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
) {
642 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
643 * block groups per flexgroup, reserve the first block
644 * group for directories and special files. Regular
645 * files will start at the second block group. This
646 * tends to speed up directory access and improves
649 block_group
&= ~(flex_size
-1);
650 if (S_ISREG(inode
->i_mode
))
653 bg_start
= ext4_group_first_block_no(inode
->i_sb
, block_group
);
654 last_block
= ext4_blocks_count(EXT4_SB(inode
->i_sb
)->s_es
) - 1;
657 * If we are doing delayed allocation, we don't need take
658 * colour into account.
660 if (test_opt(inode
->i_sb
, DELALLOC
))
663 if (bg_start
+ EXT4_BLOCKS_PER_GROUP(inode
->i_sb
) <= last_block
)
664 colour
= (current
->pid
% 16) *
665 (EXT4_BLOCKS_PER_GROUP(inode
->i_sb
) / 16);
667 colour
= (current
->pid
% 16) * ((last_block
- bg_start
) / 16);
668 return bg_start
+ colour
;