2 * linux/fs/ext4/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/time.h>
15 #include <linux/capability.h>
17 #include <linux/jbd2.h>
18 #include <linux/quotaops.h>
19 #include <linux/buffer_head.h>
21 #include "ext4_jbd2.h"
24 #include <trace/events/ext4.h>
26 static unsigned ext4_num_base_meta_clusters(struct super_block
*sb
,
27 ext4_group_t block_group
);
29 * balloc.c contains the blocks allocation and deallocation routines
33 * Calculate block group number for a given block number
35 ext4_group_t
ext4_get_group_number(struct super_block
*sb
,
40 if (test_opt2(sb
, STD_GROUP_SIZE
))
42 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_data_block
)) >>
43 (EXT4_BLOCK_SIZE_BITS(sb
) + EXT4_CLUSTER_BITS(sb
) + 3);
45 ext4_get_group_no_and_offset(sb
, block
, &group
, NULL
);
50 * Calculate the block group number and offset into the block/cluster
51 * allocation bitmap, given a block number
53 void ext4_get_group_no_and_offset(struct super_block
*sb
, ext4_fsblk_t blocknr
,
54 ext4_group_t
*blockgrpp
, ext4_grpblk_t
*offsetp
)
56 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
59 blocknr
= blocknr
- le32_to_cpu(es
->s_first_data_block
);
60 offset
= do_div(blocknr
, EXT4_BLOCKS_PER_GROUP(sb
)) >>
61 EXT4_SB(sb
)->s_cluster_bits
;
70 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
73 static inline int ext4_block_in_group(struct super_block
*sb
,
75 ext4_group_t block_group
)
77 ext4_group_t actual_group
;
79 actual_group
= ext4_get_group_number(sb
, block
);
80 return (actual_group
== block_group
) ? 1 : 0;
83 /* Return the number of clusters used for file system metadata; this
84 * represents the overhead needed by the file system.
86 unsigned ext4_num_overhead_clusters(struct super_block
*sb
,
87 ext4_group_t block_group
,
88 struct ext4_group_desc
*gdp
)
90 unsigned num_clusters
;
91 int block_cluster
= -1, inode_cluster
= -1, itbl_cluster
= -1, i
, c
;
92 ext4_fsblk_t start
= ext4_group_first_block_no(sb
, block_group
);
93 ext4_fsblk_t itbl_blk
;
94 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
96 /* This is the number of clusters used by the superblock,
97 * block group descriptors, and reserved block group
98 * descriptor blocks */
99 num_clusters
= ext4_num_base_meta_clusters(sb
, block_group
);
102 * For the allocation bitmaps and inode table, we first need
103 * to check to see if the block is in the block group. If it
104 * is, then check to see if the cluster is already accounted
105 * for in the clusters used for the base metadata cluster, or
106 * if we can increment the base metadata cluster to include
107 * that block. Otherwise, we will have to track the cluster
108 * used for the allocation bitmap or inode table explicitly.
109 * Normally all of these blocks are contiguous, so the special
110 * case handling shouldn't be necessary except for *very*
111 * unusual file system layouts.
113 if (ext4_block_in_group(sb
, ext4_block_bitmap(sb
, gdp
), block_group
)) {
114 block_cluster
= EXT4_B2C(sbi
,
115 ext4_block_bitmap(sb
, gdp
) - start
);
116 if (block_cluster
< num_clusters
)
118 else if (block_cluster
== num_clusters
) {
124 if (ext4_block_in_group(sb
, ext4_inode_bitmap(sb
, gdp
), block_group
)) {
125 inode_cluster
= EXT4_B2C(sbi
,
126 ext4_inode_bitmap(sb
, gdp
) - start
);
127 if (inode_cluster
< num_clusters
)
129 else if (inode_cluster
== num_clusters
) {
135 itbl_blk
= ext4_inode_table(sb
, gdp
);
136 for (i
= 0; i
< sbi
->s_itb_per_group
; i
++) {
137 if (ext4_block_in_group(sb
, itbl_blk
+ i
, block_group
)) {
138 c
= EXT4_B2C(sbi
, itbl_blk
+ i
- start
);
139 if ((c
< num_clusters
) || (c
== inode_cluster
) ||
140 (c
== block_cluster
) || (c
== itbl_cluster
))
142 if (c
== num_clusters
) {
151 if (block_cluster
!= -1)
153 if (inode_cluster
!= -1)
159 static unsigned int num_clusters_in_group(struct super_block
*sb
,
160 ext4_group_t block_group
)
164 if (block_group
== ext4_get_groups_count(sb
) - 1) {
166 * Even though mke2fs always initializes the first and
167 * last group, just in case some other tool was used,
168 * we need to make sure we calculate the right free
171 blocks
= ext4_blocks_count(EXT4_SB(sb
)->s_es
) -
172 ext4_group_first_block_no(sb
, block_group
);
174 blocks
= EXT4_BLOCKS_PER_GROUP(sb
);
175 return EXT4_NUM_B2C(EXT4_SB(sb
), blocks
);
178 /* Initializes an uninitialized block bitmap */
179 void ext4_init_block_bitmap(struct super_block
*sb
, struct buffer_head
*bh
,
180 ext4_group_t block_group
,
181 struct ext4_group_desc
*gdp
)
183 unsigned int bit
, bit_max
;
184 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
185 ext4_fsblk_t start
, tmp
;
187 struct ext4_group_info
*grp
;
189 J_ASSERT_BH(bh
, buffer_locked(bh
));
191 /* If checksum is bad mark all blocks used to prevent allocation
192 * essentially implementing a per-group read-only flag. */
193 if (!ext4_group_desc_csum_verify(sb
, block_group
, gdp
)) {
194 ext4_error(sb
, "Checksum bad for group %u", block_group
);
195 grp
= ext4_get_group_info(sb
, block_group
);
196 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
197 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
200 memset(bh
->b_data
, 0, sb
->s_blocksize
);
202 bit_max
= ext4_num_base_meta_clusters(sb
, block_group
);
203 for (bit
= 0; bit
< bit_max
; bit
++)
204 ext4_set_bit(bit
, bh
->b_data
);
206 start
= ext4_group_first_block_no(sb
, block_group
);
208 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_FLEX_BG
))
211 /* Set bits for block and inode bitmaps, and inode table */
212 tmp
= ext4_block_bitmap(sb
, gdp
);
213 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
214 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
216 tmp
= ext4_inode_bitmap(sb
, gdp
);
217 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
218 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
220 tmp
= ext4_inode_table(sb
, gdp
);
221 for (; tmp
< ext4_inode_table(sb
, gdp
) +
222 sbi
->s_itb_per_group
; tmp
++) {
223 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
224 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
228 * Also if the number of blocks within the group is less than
229 * the blocksize * 8 ( which is the size of bitmap ), set rest
230 * of the block bitmap to 1
232 ext4_mark_bitmap_end(num_clusters_in_group(sb
, block_group
),
233 sb
->s_blocksize
* 8, bh
->b_data
);
234 ext4_block_bitmap_csum_set(sb
, block_group
, gdp
, bh
);
235 ext4_group_desc_csum_set(sb
, block_group
, gdp
);
238 /* Return the number of free blocks in a block group. It is used when
239 * the block bitmap is uninitialized, so we can't just count the bits
241 unsigned ext4_free_clusters_after_init(struct super_block
*sb
,
242 ext4_group_t block_group
,
243 struct ext4_group_desc
*gdp
)
245 return num_clusters_in_group(sb
, block_group
) -
246 ext4_num_overhead_clusters(sb
, block_group
, gdp
);
250 * The free blocks are managed by bitmaps. A file system contains several
251 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
252 * block for inodes, N blocks for the inode table and data blocks.
254 * The file system contains group descriptors which are located after the
255 * super block. Each descriptor contains the number of the bitmap block and
256 * the free blocks count in the block. The descriptors are loaded in memory
257 * when a file system is mounted (see ext4_fill_super).
261 * ext4_get_group_desc() -- load group descriptor from disk
263 * @block_group: given block group
264 * @bh: pointer to the buffer head to store the block
267 struct ext4_group_desc
* ext4_get_group_desc(struct super_block
*sb
,
268 ext4_group_t block_group
,
269 struct buffer_head
**bh
)
271 unsigned int group_desc
;
273 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
274 struct ext4_group_desc
*desc
;
275 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
277 if (block_group
>= ngroups
) {
278 ext4_error(sb
, "block_group >= groups_count - block_group = %u,"
279 " groups_count = %u", block_group
, ngroups
);
284 group_desc
= block_group
>> EXT4_DESC_PER_BLOCK_BITS(sb
);
285 offset
= block_group
& (EXT4_DESC_PER_BLOCK(sb
) - 1);
286 if (!sbi
->s_group_desc
[group_desc
]) {
287 ext4_error(sb
, "Group descriptor not loaded - "
288 "block_group = %u, group_desc = %u, desc = %u",
289 block_group
, group_desc
, offset
);
293 desc
= (struct ext4_group_desc
*)(
294 (__u8
*)sbi
->s_group_desc
[group_desc
]->b_data
+
295 offset
* EXT4_DESC_SIZE(sb
));
297 *bh
= sbi
->s_group_desc
[group_desc
];
302 * Return the block number which was discovered to be invalid, or 0 if
303 * the block bitmap is valid.
305 static ext4_fsblk_t
ext4_valid_block_bitmap(struct super_block
*sb
,
306 struct ext4_group_desc
*desc
,
307 ext4_group_t block_group
,
308 struct buffer_head
*bh
)
310 ext4_grpblk_t offset
;
311 ext4_grpblk_t next_zero_bit
;
313 ext4_fsblk_t group_first_block
;
315 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_FLEX_BG
)) {
316 /* with FLEX_BG, the inode/block bitmaps and itable
317 * blocks may not be in the group at all
318 * so the bitmap validation will be skipped for those groups
319 * or it has to also read the block group where the bitmaps
320 * are located to verify they are set.
324 group_first_block
= ext4_group_first_block_no(sb
, block_group
);
326 /* check whether block bitmap block number is set */
327 blk
= ext4_block_bitmap(sb
, desc
);
328 offset
= blk
- group_first_block
;
329 if (!ext4_test_bit(offset
, bh
->b_data
))
330 /* bad block bitmap */
333 /* check whether the inode bitmap block number is set */
334 blk
= ext4_inode_bitmap(sb
, desc
);
335 offset
= blk
- group_first_block
;
336 if (!ext4_test_bit(offset
, bh
->b_data
))
337 /* bad block bitmap */
340 /* check whether the inode table block number is set */
341 blk
= ext4_inode_table(sb
, desc
);
342 offset
= blk
- group_first_block
;
343 next_zero_bit
= ext4_find_next_zero_bit(bh
->b_data
,
344 offset
+ EXT4_SB(sb
)->s_itb_per_group
,
346 if (next_zero_bit
< offset
+ EXT4_SB(sb
)->s_itb_per_group
)
347 /* bad bitmap for inode tables */
352 void ext4_validate_block_bitmap(struct super_block
*sb
,
353 struct ext4_group_desc
*desc
,
354 ext4_group_t block_group
,
355 struct buffer_head
*bh
)
358 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, block_group
);
360 if (buffer_verified(bh
))
363 ext4_lock_group(sb
, block_group
);
364 blk
= ext4_valid_block_bitmap(sb
, desc
, block_group
, bh
);
365 if (unlikely(blk
!= 0)) {
366 ext4_unlock_group(sb
, block_group
);
367 ext4_error(sb
, "bg %u: block %llu: invalid block bitmap",
369 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
372 if (unlikely(!ext4_block_bitmap_csum_verify(sb
, block_group
,
374 ext4_unlock_group(sb
, block_group
);
375 ext4_error(sb
, "bg %u: bad block bitmap checksum", block_group
);
376 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
379 set_buffer_verified(bh
);
380 ext4_unlock_group(sb
, block_group
);
384 * ext4_read_block_bitmap_nowait()
386 * @block_group: given block group
388 * Read the bitmap for a given block_group,and validate the
389 * bits for block/inode/inode tables are set in the bitmaps
391 * Return buffer_head on success or NULL in case of failure.
394 ext4_read_block_bitmap_nowait(struct super_block
*sb
, ext4_group_t block_group
)
396 struct ext4_group_desc
*desc
;
397 struct buffer_head
*bh
;
398 ext4_fsblk_t bitmap_blk
;
400 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
403 bitmap_blk
= ext4_block_bitmap(sb
, desc
);
404 bh
= sb_getblk(sb
, bitmap_blk
);
406 ext4_error(sb
, "Cannot get buffer for block bitmap - "
407 "block_group = %u, block_bitmap = %llu",
408 block_group
, bitmap_blk
);
412 if (bitmap_uptodate(bh
))
416 if (bitmap_uptodate(bh
)) {
420 ext4_lock_group(sb
, block_group
);
421 if (desc
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)) {
422 ext4_init_block_bitmap(sb
, bh
, block_group
, desc
);
423 set_bitmap_uptodate(bh
);
424 set_buffer_uptodate(bh
);
425 ext4_unlock_group(sb
, block_group
);
429 ext4_unlock_group(sb
, block_group
);
430 if (buffer_uptodate(bh
)) {
432 * if not uninit if bh is uptodate,
433 * bitmap is also uptodate
435 set_bitmap_uptodate(bh
);
440 * submit the buffer_head for reading
443 trace_ext4_read_block_bitmap_load(sb
, block_group
);
444 bh
->b_end_io
= ext4_end_bitmap_read
;
446 submit_bh(READ
| REQ_META
| REQ_PRIO
, bh
);
449 ext4_validate_block_bitmap(sb
, desc
, block_group
, bh
);
450 if (buffer_verified(bh
))
456 /* Returns 0 on success, 1 on error */
457 int ext4_wait_block_bitmap(struct super_block
*sb
, ext4_group_t block_group
,
458 struct buffer_head
*bh
)
460 struct ext4_group_desc
*desc
;
464 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
468 if (!buffer_uptodate(bh
)) {
469 ext4_error(sb
, "Cannot read block bitmap - "
470 "block_group = %u, block_bitmap = %llu",
471 block_group
, (unsigned long long) bh
->b_blocknr
);
474 clear_buffer_new(bh
);
475 /* Panic or remount fs read-only if block bitmap is invalid */
476 ext4_validate_block_bitmap(sb
, desc
, block_group
, bh
);
477 /* ...but check for error just in case errors=continue. */
478 return !buffer_verified(bh
);
482 ext4_read_block_bitmap(struct super_block
*sb
, ext4_group_t block_group
)
484 struct buffer_head
*bh
;
486 bh
= ext4_read_block_bitmap_nowait(sb
, block_group
);
489 if (ext4_wait_block_bitmap(sb
, block_group
, bh
)) {
497 * ext4_has_free_clusters()
498 * @sbi: in-core super block structure.
499 * @nclusters: number of needed blocks
500 * @flags: flags from ext4_mb_new_blocks()
502 * Check if filesystem has nclusters free & available for allocation.
503 * On success return 1, return 0 on failure.
505 static int ext4_has_free_clusters(struct ext4_sb_info
*sbi
,
506 s64 nclusters
, unsigned int flags
)
508 s64 free_clusters
, dirty_clusters
, rsv
, resv_clusters
;
509 struct percpu_counter
*fcc
= &sbi
->s_freeclusters_counter
;
510 struct percpu_counter
*dcc
= &sbi
->s_dirtyclusters_counter
;
512 free_clusters
= percpu_counter_read_positive(fcc
);
513 dirty_clusters
= percpu_counter_read_positive(dcc
);
514 resv_clusters
= atomic64_read(&sbi
->s_resv_clusters
);
517 * r_blocks_count should always be multiple of the cluster ratio so
518 * we are safe to do a plane bit shift only.
520 rsv
= (ext4_r_blocks_count(sbi
->s_es
) >> sbi
->s_cluster_bits
) +
523 if (free_clusters
- (nclusters
+ rsv
+ dirty_clusters
) <
524 EXT4_FREECLUSTERS_WATERMARK
) {
525 free_clusters
= percpu_counter_sum_positive(fcc
);
526 dirty_clusters
= percpu_counter_sum_positive(dcc
);
528 /* Check whether we have space after accounting for current
529 * dirty clusters & root reserved clusters.
531 if (free_clusters
>= (rsv
+ nclusters
+ dirty_clusters
))
534 /* Hm, nope. Are (enough) root reserved clusters available? */
535 if (uid_eq(sbi
->s_resuid
, current_fsuid()) ||
536 (!gid_eq(sbi
->s_resgid
, GLOBAL_ROOT_GID
) && in_group_p(sbi
->s_resgid
)) ||
537 capable(CAP_SYS_RESOURCE
) ||
538 (flags
& EXT4_MB_USE_ROOT_BLOCKS
)) {
540 if (free_clusters
>= (nclusters
+ dirty_clusters
+
544 /* No free blocks. Let's see if we can dip into reserved pool */
545 if (flags
& EXT4_MB_USE_RESERVED
) {
546 if (free_clusters
>= (nclusters
+ dirty_clusters
))
553 int ext4_claim_free_clusters(struct ext4_sb_info
*sbi
,
554 s64 nclusters
, unsigned int flags
)
556 if (ext4_has_free_clusters(sbi
, nclusters
, flags
)) {
557 percpu_counter_add(&sbi
->s_dirtyclusters_counter
, nclusters
);
564 * ext4_should_retry_alloc()
566 * @retries number of attemps has been made
568 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
569 * it is profitable to retry the operation, this function will wait
570 * for the current or committing transaction to complete, and then
573 * if the total number of retries exceed three times, return FALSE.
575 int ext4_should_retry_alloc(struct super_block
*sb
, int *retries
)
577 if (!ext4_has_free_clusters(EXT4_SB(sb
), 1, 0) ||
579 !EXT4_SB(sb
)->s_journal
)
582 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb
->s_id
);
584 return jbd2_journal_force_commit_nested(EXT4_SB(sb
)->s_journal
);
588 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
590 * @handle: handle to this transaction
592 * @goal: given target block(filesystem wide)
593 * @count: pointer to total number of clusters needed
596 * Return 1st allocated block number on success, *count stores total account
597 * error stores in errp pointer
599 ext4_fsblk_t
ext4_new_meta_blocks(handle_t
*handle
, struct inode
*inode
,
600 ext4_fsblk_t goal
, unsigned int flags
,
601 unsigned long *count
, int *errp
)
603 struct ext4_allocation_request ar
;
606 memset(&ar
, 0, sizeof(ar
));
607 /* Fill with neighbour allocated blocks */
610 ar
.len
= count
? *count
: 1;
613 ret
= ext4_mb_new_blocks(handle
, &ar
, errp
);
617 * Account for the allocated meta blocks. We will never
618 * fail EDQUOT for metdata, but we do account for it.
621 ext4_test_inode_state(inode
, EXT4_STATE_DELALLOC_RESERVED
)) {
622 spin_lock(&EXT4_I(inode
)->i_block_reservation_lock
);
623 EXT4_I(inode
)->i_allocated_meta_blocks
+= ar
.len
;
624 spin_unlock(&EXT4_I(inode
)->i_block_reservation_lock
);
625 dquot_alloc_block_nofail(inode
,
626 EXT4_C2B(EXT4_SB(inode
->i_sb
), ar
.len
));
632 * ext4_count_free_clusters() -- count filesystem free clusters
635 * Adds up the number of free clusters from each block group.
637 ext4_fsblk_t
ext4_count_free_clusters(struct super_block
*sb
)
639 ext4_fsblk_t desc_count
;
640 struct ext4_group_desc
*gdp
;
642 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
644 struct ext4_super_block
*es
;
645 ext4_fsblk_t bitmap_count
;
647 struct buffer_head
*bitmap_bh
= NULL
;
649 es
= EXT4_SB(sb
)->s_es
;
654 for (i
= 0; i
< ngroups
; i
++) {
655 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
658 desc_count
+= ext4_free_group_clusters(sb
, gdp
);
660 bitmap_bh
= ext4_read_block_bitmap(sb
, i
);
661 if (bitmap_bh
== NULL
)
664 x
= ext4_count_free(bitmap_bh
->b_data
,
665 EXT4_BLOCKS_PER_GROUP(sb
) / 8);
666 printk(KERN_DEBUG
"group %u: stored = %d, counted = %u\n",
667 i
, ext4_free_group_clusters(sb
, gdp
), x
);
671 printk(KERN_DEBUG
"ext4_count_free_clusters: stored = %llu"
672 ", computed = %llu, %llu\n",
673 EXT4_NUM_B2C(EXT4_SB(sb
), ext4_free_blocks_count(es
)),
674 desc_count
, bitmap_count
);
678 for (i
= 0; i
< ngroups
; i
++) {
679 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
682 desc_count
+= ext4_free_group_clusters(sb
, gdp
);
689 static inline int test_root(ext4_group_t a
, int b
)
702 static int ext4_group_sparse(ext4_group_t group
)
708 return (test_root(group
, 7) || test_root(group
, 5) ||
709 test_root(group
, 3));
713 * ext4_bg_has_super - number of blocks used by the superblock in group
714 * @sb: superblock for filesystem
715 * @group: group number to check
717 * Return the number of blocks used by the superblock (primary or backup)
718 * in this group. Currently this will be only 0 or 1.
720 int ext4_bg_has_super(struct super_block
*sb
, ext4_group_t group
)
722 if (EXT4_HAS_RO_COMPAT_FEATURE(sb
,
723 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER
) &&
724 !ext4_group_sparse(group
))
729 static unsigned long ext4_bg_num_gdb_meta(struct super_block
*sb
,
732 unsigned long metagroup
= group
/ EXT4_DESC_PER_BLOCK(sb
);
733 ext4_group_t first
= metagroup
* EXT4_DESC_PER_BLOCK(sb
);
734 ext4_group_t last
= first
+ EXT4_DESC_PER_BLOCK(sb
) - 1;
736 if (group
== first
|| group
== first
+ 1 || group
== last
)
741 static unsigned long ext4_bg_num_gdb_nometa(struct super_block
*sb
,
744 if (!ext4_bg_has_super(sb
, group
))
747 if (EXT4_HAS_INCOMPAT_FEATURE(sb
,EXT4_FEATURE_INCOMPAT_META_BG
))
748 return le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_meta_bg
);
750 return EXT4_SB(sb
)->s_gdb_count
;
754 * ext4_bg_num_gdb - number of blocks used by the group table in group
755 * @sb: superblock for filesystem
756 * @group: group number to check
758 * Return the number of blocks used by the group descriptor table
759 * (primary or backup) in this group. In the future there may be a
760 * different number of descriptor blocks in each group.
762 unsigned long ext4_bg_num_gdb(struct super_block
*sb
, ext4_group_t group
)
764 unsigned long first_meta_bg
=
765 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_meta_bg
);
766 unsigned long metagroup
= group
/ EXT4_DESC_PER_BLOCK(sb
);
768 if (!EXT4_HAS_INCOMPAT_FEATURE(sb
,EXT4_FEATURE_INCOMPAT_META_BG
) ||
769 metagroup
< first_meta_bg
)
770 return ext4_bg_num_gdb_nometa(sb
, group
);
772 return ext4_bg_num_gdb_meta(sb
,group
);
777 * This function returns the number of file system metadata clusters at
778 * the beginning of a block group, including the reserved gdt blocks.
780 static unsigned ext4_num_base_meta_clusters(struct super_block
*sb
,
781 ext4_group_t block_group
)
783 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
786 /* Check for superblock and gdt backups in this group */
787 num
= ext4_bg_has_super(sb
, block_group
);
789 if (!EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_META_BG
) ||
790 block_group
< le32_to_cpu(sbi
->s_es
->s_first_meta_bg
) *
791 sbi
->s_desc_per_block
) {
793 num
+= ext4_bg_num_gdb(sb
, block_group
);
794 num
+= le16_to_cpu(sbi
->s_es
->s_reserved_gdt_blocks
);
796 } else { /* For META_BG_BLOCK_GROUPS */
797 num
+= ext4_bg_num_gdb(sb
, block_group
);
799 return EXT4_NUM_B2C(sbi
, num
);
802 * ext4_inode_to_goal_block - return a hint for block allocation
803 * @inode: inode for block allocation
805 * Return the ideal location to start allocating blocks for a
806 * newly created inode.
808 ext4_fsblk_t
ext4_inode_to_goal_block(struct inode
*inode
)
810 struct ext4_inode_info
*ei
= EXT4_I(inode
);
811 ext4_group_t block_group
;
812 ext4_grpblk_t colour
;
813 int flex_size
= ext4_flex_bg_size(EXT4_SB(inode
->i_sb
));
814 ext4_fsblk_t bg_start
;
815 ext4_fsblk_t last_block
;
817 block_group
= ei
->i_block_group
;
818 if (flex_size
>= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
) {
820 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
821 * block groups per flexgroup, reserve the first block
822 * group for directories and special files. Regular
823 * files will start at the second block group. This
824 * tends to speed up directory access and improves
827 block_group
&= ~(flex_size
-1);
828 if (S_ISREG(inode
->i_mode
))
831 bg_start
= ext4_group_first_block_no(inode
->i_sb
, block_group
);
832 last_block
= ext4_blocks_count(EXT4_SB(inode
->i_sb
)->s_es
) - 1;
835 * If we are doing delayed allocation, we don't need take
836 * colour into account.
838 if (test_opt(inode
->i_sb
, DELALLOC
))
841 if (bg_start
+ EXT4_BLOCKS_PER_GROUP(inode
->i_sb
) <= last_block
)
842 colour
= (current
->pid
% 16) *
843 (EXT4_BLOCKS_PER_GROUP(inode
->i_sb
) / 16);
845 colour
= (current
->pid
% 16) * ((last_block
- bg_start
) / 16);
846 return bg_start
+ colour
;