2 * linux/fs/ext4/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/time.h>
15 #include <linux/capability.h>
17 #include <linux/jbd2.h>
18 #include <linux/quotaops.h>
19 #include <linux/buffer_head.h>
21 #include "ext4_jbd2.h"
24 #include <trace/events/ext4.h>
26 static unsigned ext4_num_base_meta_clusters(struct super_block
*sb
,
27 ext4_group_t block_group
);
29 * balloc.c contains the blocks allocation and deallocation routines
33 * Calculate block group number for a given block number
35 ext4_group_t
ext4_get_group_number(struct super_block
*sb
,
40 if (test_opt2(sb
, STD_GROUP_SIZE
))
42 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_data_block
)) >>
43 (EXT4_BLOCK_SIZE_BITS(sb
) + EXT4_CLUSTER_BITS(sb
) + 3);
45 ext4_get_group_no_and_offset(sb
, block
, &group
, NULL
);
50 * Calculate the block group number and offset into the block/cluster
51 * allocation bitmap, given a block number
53 void ext4_get_group_no_and_offset(struct super_block
*sb
, ext4_fsblk_t blocknr
,
54 ext4_group_t
*blockgrpp
, ext4_grpblk_t
*offsetp
)
56 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
59 blocknr
= blocknr
- le32_to_cpu(es
->s_first_data_block
);
60 offset
= do_div(blocknr
, EXT4_BLOCKS_PER_GROUP(sb
)) >>
61 EXT4_SB(sb
)->s_cluster_bits
;
70 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
73 static inline int ext4_block_in_group(struct super_block
*sb
,
75 ext4_group_t block_group
)
77 ext4_group_t actual_group
;
79 actual_group
= ext4_get_group_number(sb
, block
);
80 return (actual_group
== block_group
) ? 1 : 0;
83 /* Return the number of clusters used for file system metadata; this
84 * represents the overhead needed by the file system.
86 static unsigned ext4_num_overhead_clusters(struct super_block
*sb
,
87 ext4_group_t block_group
,
88 struct ext4_group_desc
*gdp
)
90 unsigned num_clusters
;
91 int block_cluster
= -1, inode_cluster
= -1, itbl_cluster
= -1, i
, c
;
92 ext4_fsblk_t start
= ext4_group_first_block_no(sb
, block_group
);
93 ext4_fsblk_t itbl_blk
;
94 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
96 /* This is the number of clusters used by the superblock,
97 * block group descriptors, and reserved block group
98 * descriptor blocks */
99 num_clusters
= ext4_num_base_meta_clusters(sb
, block_group
);
102 * For the allocation bitmaps and inode table, we first need
103 * to check to see if the block is in the block group. If it
104 * is, then check to see if the cluster is already accounted
105 * for in the clusters used for the base metadata cluster, or
106 * if we can increment the base metadata cluster to include
107 * that block. Otherwise, we will have to track the cluster
108 * used for the allocation bitmap or inode table explicitly.
109 * Normally all of these blocks are contiguous, so the special
110 * case handling shouldn't be necessary except for *very*
111 * unusual file system layouts.
113 if (ext4_block_in_group(sb
, ext4_block_bitmap(sb
, gdp
), block_group
)) {
114 block_cluster
= EXT4_B2C(sbi
,
115 ext4_block_bitmap(sb
, gdp
) - start
);
116 if (block_cluster
< num_clusters
)
118 else if (block_cluster
== num_clusters
) {
124 if (ext4_block_in_group(sb
, ext4_inode_bitmap(sb
, gdp
), block_group
)) {
125 inode_cluster
= EXT4_B2C(sbi
,
126 ext4_inode_bitmap(sb
, gdp
) - start
);
127 if (inode_cluster
< num_clusters
)
129 else if (inode_cluster
== num_clusters
) {
135 itbl_blk
= ext4_inode_table(sb
, gdp
);
136 for (i
= 0; i
< sbi
->s_itb_per_group
; i
++) {
137 if (ext4_block_in_group(sb
, itbl_blk
+ i
, block_group
)) {
138 c
= EXT4_B2C(sbi
, itbl_blk
+ i
- start
);
139 if ((c
< num_clusters
) || (c
== inode_cluster
) ||
140 (c
== block_cluster
) || (c
== itbl_cluster
))
142 if (c
== num_clusters
) {
151 if (block_cluster
!= -1)
153 if (inode_cluster
!= -1)
159 static unsigned int num_clusters_in_group(struct super_block
*sb
,
160 ext4_group_t block_group
)
164 if (block_group
== ext4_get_groups_count(sb
) - 1) {
166 * Even though mke2fs always initializes the first and
167 * last group, just in case some other tool was used,
168 * we need to make sure we calculate the right free
171 blocks
= ext4_blocks_count(EXT4_SB(sb
)->s_es
) -
172 ext4_group_first_block_no(sb
, block_group
);
174 blocks
= EXT4_BLOCKS_PER_GROUP(sb
);
175 return EXT4_NUM_B2C(EXT4_SB(sb
), blocks
);
178 /* Initializes an uninitialized block bitmap */
179 static void ext4_init_block_bitmap(struct super_block
*sb
,
180 struct buffer_head
*bh
,
181 ext4_group_t block_group
,
182 struct ext4_group_desc
*gdp
)
184 unsigned int bit
, bit_max
;
185 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
186 ext4_fsblk_t start
, tmp
;
188 struct ext4_group_info
*grp
;
190 J_ASSERT_BH(bh
, buffer_locked(bh
));
192 /* If checksum is bad mark all blocks used to prevent allocation
193 * essentially implementing a per-group read-only flag. */
194 if (!ext4_group_desc_csum_verify(sb
, block_group
, gdp
)) {
195 ext4_error(sb
, "Checksum bad for group %u", block_group
);
196 grp
= ext4_get_group_info(sb
, block_group
);
197 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp
))
198 percpu_counter_sub(&sbi
->s_freeclusters_counter
,
200 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
201 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp
)) {
203 count
= ext4_free_inodes_count(sb
, gdp
);
204 percpu_counter_sub(&sbi
->s_freeinodes_counter
,
207 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
210 memset(bh
->b_data
, 0, sb
->s_blocksize
);
212 bit_max
= ext4_num_base_meta_clusters(sb
, block_group
);
213 for (bit
= 0; bit
< bit_max
; bit
++)
214 ext4_set_bit(bit
, bh
->b_data
);
216 start
= ext4_group_first_block_no(sb
, block_group
);
218 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_FLEX_BG
))
221 /* Set bits for block and inode bitmaps, and inode table */
222 tmp
= ext4_block_bitmap(sb
, gdp
);
223 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
224 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
226 tmp
= ext4_inode_bitmap(sb
, gdp
);
227 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
228 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
230 tmp
= ext4_inode_table(sb
, gdp
);
231 for (; tmp
< ext4_inode_table(sb
, gdp
) +
232 sbi
->s_itb_per_group
; tmp
++) {
233 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
234 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
238 * Also if the number of blocks within the group is less than
239 * the blocksize * 8 ( which is the size of bitmap ), set rest
240 * of the block bitmap to 1
242 ext4_mark_bitmap_end(num_clusters_in_group(sb
, block_group
),
243 sb
->s_blocksize
* 8, bh
->b_data
);
244 ext4_block_bitmap_csum_set(sb
, block_group
, gdp
, bh
);
245 ext4_group_desc_csum_set(sb
, block_group
, gdp
);
248 /* Return the number of free blocks in a block group. It is used when
249 * the block bitmap is uninitialized, so we can't just count the bits
251 unsigned ext4_free_clusters_after_init(struct super_block
*sb
,
252 ext4_group_t block_group
,
253 struct ext4_group_desc
*gdp
)
255 return num_clusters_in_group(sb
, block_group
) -
256 ext4_num_overhead_clusters(sb
, block_group
, gdp
);
260 * The free blocks are managed by bitmaps. A file system contains several
261 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
262 * block for inodes, N blocks for the inode table and data blocks.
264 * The file system contains group descriptors which are located after the
265 * super block. Each descriptor contains the number of the bitmap block and
266 * the free blocks count in the block. The descriptors are loaded in memory
267 * when a file system is mounted (see ext4_fill_super).
271 * ext4_get_group_desc() -- load group descriptor from disk
273 * @block_group: given block group
274 * @bh: pointer to the buffer head to store the block
277 struct ext4_group_desc
* ext4_get_group_desc(struct super_block
*sb
,
278 ext4_group_t block_group
,
279 struct buffer_head
**bh
)
281 unsigned int group_desc
;
283 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
284 struct ext4_group_desc
*desc
;
285 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
287 if (block_group
>= ngroups
) {
288 ext4_error(sb
, "block_group >= groups_count - block_group = %u,"
289 " groups_count = %u", block_group
, ngroups
);
294 group_desc
= block_group
>> EXT4_DESC_PER_BLOCK_BITS(sb
);
295 offset
= block_group
& (EXT4_DESC_PER_BLOCK(sb
) - 1);
296 if (!sbi
->s_group_desc
[group_desc
]) {
297 ext4_error(sb
, "Group descriptor not loaded - "
298 "block_group = %u, group_desc = %u, desc = %u",
299 block_group
, group_desc
, offset
);
303 desc
= (struct ext4_group_desc
*)(
304 (__u8
*)sbi
->s_group_desc
[group_desc
]->b_data
+
305 offset
* EXT4_DESC_SIZE(sb
));
307 *bh
= sbi
->s_group_desc
[group_desc
];
312 * Return the block number which was discovered to be invalid, or 0 if
313 * the block bitmap is valid.
315 static ext4_fsblk_t
ext4_valid_block_bitmap(struct super_block
*sb
,
316 struct ext4_group_desc
*desc
,
317 ext4_group_t block_group
,
318 struct buffer_head
*bh
)
320 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
321 ext4_grpblk_t offset
;
322 ext4_grpblk_t next_zero_bit
;
324 ext4_fsblk_t group_first_block
;
326 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_FLEX_BG
)) {
327 /* with FLEX_BG, the inode/block bitmaps and itable
328 * blocks may not be in the group at all
329 * so the bitmap validation will be skipped for those groups
330 * or it has to also read the block group where the bitmaps
331 * are located to verify they are set.
335 group_first_block
= ext4_group_first_block_no(sb
, block_group
);
337 /* check whether block bitmap block number is set */
338 blk
= ext4_block_bitmap(sb
, desc
);
339 offset
= blk
- group_first_block
;
340 if (!ext4_test_bit(EXT4_B2C(sbi
, offset
), bh
->b_data
))
341 /* bad block bitmap */
344 /* check whether the inode bitmap block number is set */
345 blk
= ext4_inode_bitmap(sb
, desc
);
346 offset
= blk
- group_first_block
;
347 if (!ext4_test_bit(EXT4_B2C(sbi
, offset
), bh
->b_data
))
348 /* bad block bitmap */
351 /* check whether the inode table block number is set */
352 blk
= ext4_inode_table(sb
, desc
);
353 offset
= blk
- group_first_block
;
354 next_zero_bit
= ext4_find_next_zero_bit(bh
->b_data
,
355 EXT4_B2C(sbi
, offset
+ EXT4_SB(sb
)->s_itb_per_group
),
356 EXT4_B2C(sbi
, offset
));
358 EXT4_B2C(sbi
, offset
+ EXT4_SB(sb
)->s_itb_per_group
))
359 /* bad bitmap for inode tables */
364 static void ext4_validate_block_bitmap(struct super_block
*sb
,
365 struct ext4_group_desc
*desc
,
366 ext4_group_t block_group
,
367 struct buffer_head
*bh
)
370 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, block_group
);
371 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
373 if (buffer_verified(bh
))
376 ext4_lock_group(sb
, block_group
);
377 blk
= ext4_valid_block_bitmap(sb
, desc
, block_group
, bh
);
378 if (unlikely(blk
!= 0)) {
379 ext4_unlock_group(sb
, block_group
);
380 ext4_error(sb
, "bg %u: block %llu: invalid block bitmap",
382 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp
))
383 percpu_counter_sub(&sbi
->s_freeclusters_counter
,
385 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
388 if (unlikely(!ext4_block_bitmap_csum_verify(sb
, block_group
,
390 ext4_unlock_group(sb
, block_group
);
391 ext4_error(sb
, "bg %u: bad block bitmap checksum", block_group
);
392 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp
))
393 percpu_counter_sub(&sbi
->s_freeclusters_counter
,
395 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
398 set_buffer_verified(bh
);
399 ext4_unlock_group(sb
, block_group
);
403 * ext4_read_block_bitmap_nowait()
405 * @block_group: given block group
407 * Read the bitmap for a given block_group,and validate the
408 * bits for block/inode/inode tables are set in the bitmaps
410 * Return buffer_head on success or NULL in case of failure.
413 ext4_read_block_bitmap_nowait(struct super_block
*sb
, ext4_group_t block_group
)
415 struct ext4_group_desc
*desc
;
416 struct buffer_head
*bh
;
417 ext4_fsblk_t bitmap_blk
;
419 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
422 bitmap_blk
= ext4_block_bitmap(sb
, desc
);
423 bh
= sb_getblk(sb
, bitmap_blk
);
425 ext4_error(sb
, "Cannot get buffer for block bitmap - "
426 "block_group = %u, block_bitmap = %llu",
427 block_group
, bitmap_blk
);
431 if (bitmap_uptodate(bh
))
435 if (bitmap_uptodate(bh
)) {
439 ext4_lock_group(sb
, block_group
);
440 if (desc
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)) {
441 ext4_init_block_bitmap(sb
, bh
, block_group
, desc
);
442 set_bitmap_uptodate(bh
);
443 set_buffer_uptodate(bh
);
444 ext4_unlock_group(sb
, block_group
);
448 ext4_unlock_group(sb
, block_group
);
449 if (buffer_uptodate(bh
)) {
451 * if not uninit if bh is uptodate,
452 * bitmap is also uptodate
454 set_bitmap_uptodate(bh
);
459 * submit the buffer_head for reading
462 trace_ext4_read_block_bitmap_load(sb
, block_group
);
463 bh
->b_end_io
= ext4_end_bitmap_read
;
465 submit_bh(READ
| REQ_META
| REQ_PRIO
, bh
);
468 ext4_validate_block_bitmap(sb
, desc
, block_group
, bh
);
469 if (buffer_verified(bh
))
475 /* Returns 0 on success, 1 on error */
476 int ext4_wait_block_bitmap(struct super_block
*sb
, ext4_group_t block_group
,
477 struct buffer_head
*bh
)
479 struct ext4_group_desc
*desc
;
483 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
487 if (!buffer_uptodate(bh
)) {
488 ext4_error(sb
, "Cannot read block bitmap - "
489 "block_group = %u, block_bitmap = %llu",
490 block_group
, (unsigned long long) bh
->b_blocknr
);
493 clear_buffer_new(bh
);
494 /* Panic or remount fs read-only if block bitmap is invalid */
495 ext4_validate_block_bitmap(sb
, desc
, block_group
, bh
);
496 /* ...but check for error just in case errors=continue. */
497 return !buffer_verified(bh
);
501 ext4_read_block_bitmap(struct super_block
*sb
, ext4_group_t block_group
)
503 struct buffer_head
*bh
;
505 bh
= ext4_read_block_bitmap_nowait(sb
, block_group
);
508 if (ext4_wait_block_bitmap(sb
, block_group
, bh
)) {
516 * ext4_has_free_clusters()
517 * @sbi: in-core super block structure.
518 * @nclusters: number of needed blocks
519 * @flags: flags from ext4_mb_new_blocks()
521 * Check if filesystem has nclusters free & available for allocation.
522 * On success return 1, return 0 on failure.
524 static int ext4_has_free_clusters(struct ext4_sb_info
*sbi
,
525 s64 nclusters
, unsigned int flags
)
527 s64 free_clusters
, dirty_clusters
, rsv
, resv_clusters
;
528 struct percpu_counter
*fcc
= &sbi
->s_freeclusters_counter
;
529 struct percpu_counter
*dcc
= &sbi
->s_dirtyclusters_counter
;
531 free_clusters
= percpu_counter_read_positive(fcc
);
532 dirty_clusters
= percpu_counter_read_positive(dcc
);
533 resv_clusters
= atomic64_read(&sbi
->s_resv_clusters
);
536 * r_blocks_count should always be multiple of the cluster ratio so
537 * we are safe to do a plane bit shift only.
539 rsv
= (ext4_r_blocks_count(sbi
->s_es
) >> sbi
->s_cluster_bits
) +
542 if (free_clusters
- (nclusters
+ rsv
+ dirty_clusters
) <
543 EXT4_FREECLUSTERS_WATERMARK
) {
544 free_clusters
= percpu_counter_sum_positive(fcc
);
545 dirty_clusters
= percpu_counter_sum_positive(dcc
);
547 /* Check whether we have space after accounting for current
548 * dirty clusters & root reserved clusters.
550 if (free_clusters
>= (rsv
+ nclusters
+ dirty_clusters
))
553 /* Hm, nope. Are (enough) root reserved clusters available? */
554 if (uid_eq(sbi
->s_resuid
, current_fsuid()) ||
555 (!gid_eq(sbi
->s_resgid
, GLOBAL_ROOT_GID
) && in_group_p(sbi
->s_resgid
)) ||
556 capable(CAP_SYS_RESOURCE
) ||
557 (flags
& EXT4_MB_USE_ROOT_BLOCKS
)) {
559 if (free_clusters
>= (nclusters
+ dirty_clusters
+
563 /* No free blocks. Let's see if we can dip into reserved pool */
564 if (flags
& EXT4_MB_USE_RESERVED
) {
565 if (free_clusters
>= (nclusters
+ dirty_clusters
))
572 int ext4_claim_free_clusters(struct ext4_sb_info
*sbi
,
573 s64 nclusters
, unsigned int flags
)
575 if (ext4_has_free_clusters(sbi
, nclusters
, flags
)) {
576 percpu_counter_add(&sbi
->s_dirtyclusters_counter
, nclusters
);
583 * ext4_should_retry_alloc()
585 * @retries number of attemps has been made
587 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
588 * it is profitable to retry the operation, this function will wait
589 * for the current or committing transaction to complete, and then
592 * if the total number of retries exceed three times, return FALSE.
594 int ext4_should_retry_alloc(struct super_block
*sb
, int *retries
)
596 if (!ext4_has_free_clusters(EXT4_SB(sb
), 1, 0) ||
598 !EXT4_SB(sb
)->s_journal
)
601 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb
->s_id
);
603 return jbd2_journal_force_commit_nested(EXT4_SB(sb
)->s_journal
);
607 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
609 * @handle: handle to this transaction
611 * @goal: given target block(filesystem wide)
612 * @count: pointer to total number of clusters needed
615 * Return 1st allocated block number on success, *count stores total account
616 * error stores in errp pointer
618 ext4_fsblk_t
ext4_new_meta_blocks(handle_t
*handle
, struct inode
*inode
,
619 ext4_fsblk_t goal
, unsigned int flags
,
620 unsigned long *count
, int *errp
)
622 struct ext4_allocation_request ar
;
625 memset(&ar
, 0, sizeof(ar
));
626 /* Fill with neighbour allocated blocks */
629 ar
.len
= count
? *count
: 1;
632 ret
= ext4_mb_new_blocks(handle
, &ar
, errp
);
636 * Account for the allocated meta blocks. We will never
637 * fail EDQUOT for metdata, but we do account for it.
640 ext4_test_inode_state(inode
, EXT4_STATE_DELALLOC_RESERVED
)) {
641 spin_lock(&EXT4_I(inode
)->i_block_reservation_lock
);
642 EXT4_I(inode
)->i_allocated_meta_blocks
+= ar
.len
;
643 spin_unlock(&EXT4_I(inode
)->i_block_reservation_lock
);
644 dquot_alloc_block_nofail(inode
,
645 EXT4_C2B(EXT4_SB(inode
->i_sb
), ar
.len
));
651 * ext4_count_free_clusters() -- count filesystem free clusters
654 * Adds up the number of free clusters from each block group.
656 ext4_fsblk_t
ext4_count_free_clusters(struct super_block
*sb
)
658 ext4_fsblk_t desc_count
;
659 struct ext4_group_desc
*gdp
;
661 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
662 struct ext4_group_info
*grp
;
664 struct ext4_super_block
*es
;
665 ext4_fsblk_t bitmap_count
;
667 struct buffer_head
*bitmap_bh
= NULL
;
669 es
= EXT4_SB(sb
)->s_es
;
674 for (i
= 0; i
< ngroups
; i
++) {
675 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
679 if (EXT4_SB(sb
)->s_group_info
)
680 grp
= ext4_get_group_info(sb
, i
);
681 if (!grp
|| !EXT4_MB_GRP_BBITMAP_CORRUPT(grp
))
682 desc_count
+= ext4_free_group_clusters(sb
, gdp
);
684 bitmap_bh
= ext4_read_block_bitmap(sb
, i
);
685 if (bitmap_bh
== NULL
)
688 x
= ext4_count_free(bitmap_bh
->b_data
,
689 EXT4_CLUSTERS_PER_GROUP(sb
) / 8);
690 printk(KERN_DEBUG
"group %u: stored = %d, counted = %u\n",
691 i
, ext4_free_group_clusters(sb
, gdp
), x
);
695 printk(KERN_DEBUG
"ext4_count_free_clusters: stored = %llu"
696 ", computed = %llu, %llu\n",
697 EXT4_NUM_B2C(EXT4_SB(sb
), ext4_free_blocks_count(es
)),
698 desc_count
, bitmap_count
);
702 for (i
= 0; i
< ngroups
; i
++) {
703 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
707 if (EXT4_SB(sb
)->s_group_info
)
708 grp
= ext4_get_group_info(sb
, i
);
709 if (!grp
|| !EXT4_MB_GRP_BBITMAP_CORRUPT(grp
))
710 desc_count
+= ext4_free_group_clusters(sb
, gdp
);
717 static inline int test_root(ext4_group_t a
, int b
)
731 * ext4_bg_has_super - number of blocks used by the superblock in group
732 * @sb: superblock for filesystem
733 * @group: group number to check
735 * Return the number of blocks used by the superblock (primary or backup)
736 * in this group. Currently this will be only 0 or 1.
738 int ext4_bg_has_super(struct super_block
*sb
, ext4_group_t group
)
740 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
744 if (EXT4_HAS_COMPAT_FEATURE(sb
, EXT4_FEATURE_COMPAT_SPARSE_SUPER2
)) {
745 if (group
== le32_to_cpu(es
->s_backup_bgs
[0]) ||
746 group
== le32_to_cpu(es
->s_backup_bgs
[1]))
750 if ((group
<= 1) || !EXT4_HAS_RO_COMPAT_FEATURE(sb
,
751 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER
))
755 if (test_root(group
, 3) || (test_root(group
, 5)) ||
762 static unsigned long ext4_bg_num_gdb_meta(struct super_block
*sb
,
765 unsigned long metagroup
= group
/ EXT4_DESC_PER_BLOCK(sb
);
766 ext4_group_t first
= metagroup
* EXT4_DESC_PER_BLOCK(sb
);
767 ext4_group_t last
= first
+ EXT4_DESC_PER_BLOCK(sb
) - 1;
769 if (group
== first
|| group
== first
+ 1 || group
== last
)
774 static unsigned long ext4_bg_num_gdb_nometa(struct super_block
*sb
,
777 if (!ext4_bg_has_super(sb
, group
))
780 if (EXT4_HAS_INCOMPAT_FEATURE(sb
,EXT4_FEATURE_INCOMPAT_META_BG
))
781 return le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_meta_bg
);
783 return EXT4_SB(sb
)->s_gdb_count
;
787 * ext4_bg_num_gdb - number of blocks used by the group table in group
788 * @sb: superblock for filesystem
789 * @group: group number to check
791 * Return the number of blocks used by the group descriptor table
792 * (primary or backup) in this group. In the future there may be a
793 * different number of descriptor blocks in each group.
795 unsigned long ext4_bg_num_gdb(struct super_block
*sb
, ext4_group_t group
)
797 unsigned long first_meta_bg
=
798 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_meta_bg
);
799 unsigned long metagroup
= group
/ EXT4_DESC_PER_BLOCK(sb
);
801 if (!EXT4_HAS_INCOMPAT_FEATURE(sb
,EXT4_FEATURE_INCOMPAT_META_BG
) ||
802 metagroup
< first_meta_bg
)
803 return ext4_bg_num_gdb_nometa(sb
, group
);
805 return ext4_bg_num_gdb_meta(sb
,group
);
810 * This function returns the number of file system metadata clusters at
811 * the beginning of a block group, including the reserved gdt blocks.
813 static unsigned ext4_num_base_meta_clusters(struct super_block
*sb
,
814 ext4_group_t block_group
)
816 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
819 /* Check for superblock and gdt backups in this group */
820 num
= ext4_bg_has_super(sb
, block_group
);
822 if (!EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_META_BG
) ||
823 block_group
< le32_to_cpu(sbi
->s_es
->s_first_meta_bg
) *
824 sbi
->s_desc_per_block
) {
826 num
+= ext4_bg_num_gdb(sb
, block_group
);
827 num
+= le16_to_cpu(sbi
->s_es
->s_reserved_gdt_blocks
);
829 } else { /* For META_BG_BLOCK_GROUPS */
830 num
+= ext4_bg_num_gdb(sb
, block_group
);
832 return EXT4_NUM_B2C(sbi
, num
);
835 * ext4_inode_to_goal_block - return a hint for block allocation
836 * @inode: inode for block allocation
838 * Return the ideal location to start allocating blocks for a
839 * newly created inode.
841 ext4_fsblk_t
ext4_inode_to_goal_block(struct inode
*inode
)
843 struct ext4_inode_info
*ei
= EXT4_I(inode
);
844 ext4_group_t block_group
;
845 ext4_grpblk_t colour
;
846 int flex_size
= ext4_flex_bg_size(EXT4_SB(inode
->i_sb
));
847 ext4_fsblk_t bg_start
;
848 ext4_fsblk_t last_block
;
850 block_group
= ei
->i_block_group
;
851 if (flex_size
>= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
) {
853 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
854 * block groups per flexgroup, reserve the first block
855 * group for directories and special files. Regular
856 * files will start at the second block group. This
857 * tends to speed up directory access and improves
860 block_group
&= ~(flex_size
-1);
861 if (S_ISREG(inode
->i_mode
))
864 bg_start
= ext4_group_first_block_no(inode
->i_sb
, block_group
);
865 last_block
= ext4_blocks_count(EXT4_SB(inode
->i_sb
)->s_es
) - 1;
868 * If we are doing delayed allocation, we don't need take
869 * colour into account.
871 if (test_opt(inode
->i_sb
, DELALLOC
))
874 if (bg_start
+ EXT4_BLOCKS_PER_GROUP(inode
->i_sb
) <= last_block
)
875 colour
= (current
->pid
% 16) *
876 (EXT4_BLOCKS_PER_GROUP(inode
->i_sb
) / 16);
878 colour
= (current
->pid
% 16) * ((last_block
- bg_start
) / 16);
879 return bg_start
+ colour
;