2 * linux/fs/ext4/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/time.h>
15 #include <linux/capability.h>
17 #include <linux/quotaops.h>
18 #include <linux/buffer_head.h>
20 #include "ext4_jbd2.h"
23 #include <trace/events/ext4.h>
25 static unsigned ext4_num_base_meta_clusters(struct super_block
*sb
,
26 ext4_group_t block_group
);
28 * balloc.c contains the blocks allocation and deallocation routines
32 * Calculate block group number for a given block number
34 ext4_group_t
ext4_get_group_number(struct super_block
*sb
,
39 if (test_opt2(sb
, STD_GROUP_SIZE
))
41 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_data_block
)) >>
42 (EXT4_BLOCK_SIZE_BITS(sb
) + EXT4_CLUSTER_BITS(sb
) + 3);
44 ext4_get_group_no_and_offset(sb
, block
, &group
, NULL
);
49 * Calculate the block group number and offset into the block/cluster
50 * allocation bitmap, given a block number
52 void ext4_get_group_no_and_offset(struct super_block
*sb
, ext4_fsblk_t blocknr
,
53 ext4_group_t
*blockgrpp
, ext4_grpblk_t
*offsetp
)
55 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
58 blocknr
= blocknr
- le32_to_cpu(es
->s_first_data_block
);
59 offset
= do_div(blocknr
, EXT4_BLOCKS_PER_GROUP(sb
)) >>
60 EXT4_SB(sb
)->s_cluster_bits
;
69 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
72 static inline int ext4_block_in_group(struct super_block
*sb
,
74 ext4_group_t block_group
)
76 ext4_group_t actual_group
;
78 actual_group
= ext4_get_group_number(sb
, block
);
79 return (actual_group
== block_group
) ? 1 : 0;
82 /* Return the number of clusters used for file system metadata; this
83 * represents the overhead needed by the file system.
85 static unsigned ext4_num_overhead_clusters(struct super_block
*sb
,
86 ext4_group_t block_group
,
87 struct ext4_group_desc
*gdp
)
89 unsigned num_clusters
;
90 int block_cluster
= -1, inode_cluster
= -1, itbl_cluster
= -1, i
, c
;
91 ext4_fsblk_t start
= ext4_group_first_block_no(sb
, block_group
);
92 ext4_fsblk_t itbl_blk
;
93 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
95 /* This is the number of clusters used by the superblock,
96 * block group descriptors, and reserved block group
97 * descriptor blocks */
98 num_clusters
= ext4_num_base_meta_clusters(sb
, block_group
);
101 * For the allocation bitmaps and inode table, we first need
102 * to check to see if the block is in the block group. If it
103 * is, then check to see if the cluster is already accounted
104 * for in the clusters used for the base metadata cluster, or
105 * if we can increment the base metadata cluster to include
106 * that block. Otherwise, we will have to track the cluster
107 * used for the allocation bitmap or inode table explicitly.
108 * Normally all of these blocks are contiguous, so the special
109 * case handling shouldn't be necessary except for *very*
110 * unusual file system layouts.
112 if (ext4_block_in_group(sb
, ext4_block_bitmap(sb
, gdp
), block_group
)) {
113 block_cluster
= EXT4_B2C(sbi
,
114 ext4_block_bitmap(sb
, gdp
) - start
);
115 if (block_cluster
< num_clusters
)
117 else if (block_cluster
== num_clusters
) {
123 if (ext4_block_in_group(sb
, ext4_inode_bitmap(sb
, gdp
), block_group
)) {
124 inode_cluster
= EXT4_B2C(sbi
,
125 ext4_inode_bitmap(sb
, gdp
) - start
);
126 if (inode_cluster
< num_clusters
)
128 else if (inode_cluster
== num_clusters
) {
134 itbl_blk
= ext4_inode_table(sb
, gdp
);
135 for (i
= 0; i
< sbi
->s_itb_per_group
; i
++) {
136 if (ext4_block_in_group(sb
, itbl_blk
+ i
, block_group
)) {
137 c
= EXT4_B2C(sbi
, itbl_blk
+ i
- start
);
138 if ((c
< num_clusters
) || (c
== inode_cluster
) ||
139 (c
== block_cluster
) || (c
== itbl_cluster
))
141 if (c
== num_clusters
) {
150 if (block_cluster
!= -1)
152 if (inode_cluster
!= -1)
158 static unsigned int num_clusters_in_group(struct super_block
*sb
,
159 ext4_group_t block_group
)
163 if (block_group
== ext4_get_groups_count(sb
) - 1) {
165 * Even though mke2fs always initializes the first and
166 * last group, just in case some other tool was used,
167 * we need to make sure we calculate the right free
170 blocks
= ext4_blocks_count(EXT4_SB(sb
)->s_es
) -
171 ext4_group_first_block_no(sb
, block_group
);
173 blocks
= EXT4_BLOCKS_PER_GROUP(sb
);
174 return EXT4_NUM_B2C(EXT4_SB(sb
), blocks
);
177 /* Initializes an uninitialized block bitmap */
178 static int ext4_init_block_bitmap(struct super_block
*sb
,
179 struct buffer_head
*bh
,
180 ext4_group_t block_group
,
181 struct ext4_group_desc
*gdp
)
183 unsigned int bit
, bit_max
;
184 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
185 ext4_fsblk_t start
, tmp
;
187 struct ext4_group_info
*grp
;
189 J_ASSERT_BH(bh
, buffer_locked(bh
));
191 /* If checksum is bad mark all blocks used to prevent allocation
192 * essentially implementing a per-group read-only flag. */
193 if (!ext4_group_desc_csum_verify(sb
, block_group
, gdp
)) {
194 grp
= ext4_get_group_info(sb
, block_group
);
195 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp
))
196 percpu_counter_sub(&sbi
->s_freeclusters_counter
,
198 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
199 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp
)) {
201 count
= ext4_free_inodes_count(sb
, gdp
);
202 percpu_counter_sub(&sbi
->s_freeinodes_counter
,
205 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
208 memset(bh
->b_data
, 0, sb
->s_blocksize
);
210 bit_max
= ext4_num_base_meta_clusters(sb
, block_group
);
211 for (bit
= 0; bit
< bit_max
; bit
++)
212 ext4_set_bit(bit
, bh
->b_data
);
214 start
= ext4_group_first_block_no(sb
, block_group
);
216 if (ext4_has_feature_flex_bg(sb
))
219 /* Set bits for block and inode bitmaps, and inode table */
220 tmp
= ext4_block_bitmap(sb
, gdp
);
221 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
222 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
224 tmp
= ext4_inode_bitmap(sb
, gdp
);
225 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
226 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
228 tmp
= ext4_inode_table(sb
, gdp
);
229 for (; tmp
< ext4_inode_table(sb
, gdp
) +
230 sbi
->s_itb_per_group
; tmp
++) {
231 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
232 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
236 * Also if the number of blocks within the group is less than
237 * the blocksize * 8 ( which is the size of bitmap ), set rest
238 * of the block bitmap to 1
240 ext4_mark_bitmap_end(num_clusters_in_group(sb
, block_group
),
241 sb
->s_blocksize
* 8, bh
->b_data
);
242 ext4_block_bitmap_csum_set(sb
, block_group
, gdp
, bh
);
243 ext4_group_desc_csum_set(sb
, block_group
, gdp
);
247 /* Return the number of free blocks in a block group. It is used when
248 * the block bitmap is uninitialized, so we can't just count the bits
250 unsigned ext4_free_clusters_after_init(struct super_block
*sb
,
251 ext4_group_t block_group
,
252 struct ext4_group_desc
*gdp
)
254 return num_clusters_in_group(sb
, block_group
) -
255 ext4_num_overhead_clusters(sb
, block_group
, gdp
);
259 * The free blocks are managed by bitmaps. A file system contains several
260 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
261 * block for inodes, N blocks for the inode table and data blocks.
263 * The file system contains group descriptors which are located after the
264 * super block. Each descriptor contains the number of the bitmap block and
265 * the free blocks count in the block. The descriptors are loaded in memory
266 * when a file system is mounted (see ext4_fill_super).
270 * ext4_get_group_desc() -- load group descriptor from disk
272 * @block_group: given block group
273 * @bh: pointer to the buffer head to store the block
276 struct ext4_group_desc
* ext4_get_group_desc(struct super_block
*sb
,
277 ext4_group_t block_group
,
278 struct buffer_head
**bh
)
280 unsigned int group_desc
;
282 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
283 struct ext4_group_desc
*desc
;
284 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
286 if (block_group
>= ngroups
) {
287 ext4_error(sb
, "block_group >= groups_count - block_group = %u,"
288 " groups_count = %u", block_group
, ngroups
);
293 group_desc
= block_group
>> EXT4_DESC_PER_BLOCK_BITS(sb
);
294 offset
= block_group
& (EXT4_DESC_PER_BLOCK(sb
) - 1);
295 if (!sbi
->s_group_desc
[group_desc
]) {
296 ext4_error(sb
, "Group descriptor not loaded - "
297 "block_group = %u, group_desc = %u, desc = %u",
298 block_group
, group_desc
, offset
);
302 desc
= (struct ext4_group_desc
*)(
303 (__u8
*)sbi
->s_group_desc
[group_desc
]->b_data
+
304 offset
* EXT4_DESC_SIZE(sb
));
306 *bh
= sbi
->s_group_desc
[group_desc
];
311 * Return the block number which was discovered to be invalid, or 0 if
312 * the block bitmap is valid.
314 static ext4_fsblk_t
ext4_valid_block_bitmap(struct super_block
*sb
,
315 struct ext4_group_desc
*desc
,
316 ext4_group_t block_group
,
317 struct buffer_head
*bh
)
319 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
320 ext4_grpblk_t offset
;
321 ext4_grpblk_t next_zero_bit
;
323 ext4_fsblk_t group_first_block
;
325 if (ext4_has_feature_flex_bg(sb
)) {
326 /* with FLEX_BG, the inode/block bitmaps and itable
327 * blocks may not be in the group at all
328 * so the bitmap validation will be skipped for those groups
329 * or it has to also read the block group where the bitmaps
330 * are located to verify they are set.
334 group_first_block
= ext4_group_first_block_no(sb
, block_group
);
336 /* check whether block bitmap block number is set */
337 blk
= ext4_block_bitmap(sb
, desc
);
338 offset
= blk
- group_first_block
;
339 if (!ext4_test_bit(EXT4_B2C(sbi
, offset
), bh
->b_data
))
340 /* bad block bitmap */
343 /* check whether the inode bitmap block number is set */
344 blk
= ext4_inode_bitmap(sb
, desc
);
345 offset
= blk
- group_first_block
;
346 if (!ext4_test_bit(EXT4_B2C(sbi
, offset
), bh
->b_data
))
347 /* bad block bitmap */
350 /* check whether the inode table block number is set */
351 blk
= ext4_inode_table(sb
, desc
);
352 offset
= blk
- group_first_block
;
353 next_zero_bit
= ext4_find_next_zero_bit(bh
->b_data
,
354 EXT4_B2C(sbi
, offset
+ EXT4_SB(sb
)->s_itb_per_group
),
355 EXT4_B2C(sbi
, offset
));
357 EXT4_B2C(sbi
, offset
+ EXT4_SB(sb
)->s_itb_per_group
))
358 /* bad bitmap for inode tables */
363 static int ext4_validate_block_bitmap(struct super_block
*sb
,
364 struct ext4_group_desc
*desc
,
365 ext4_group_t block_group
,
366 struct buffer_head
*bh
)
369 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, block_group
);
370 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
372 if (buffer_verified(bh
))
374 if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp
))
375 return -EFSCORRUPTED
;
377 ext4_lock_group(sb
, block_group
);
378 if (unlikely(!ext4_block_bitmap_csum_verify(sb
, block_group
,
380 ext4_unlock_group(sb
, block_group
);
381 ext4_error(sb
, "bg %u: bad block bitmap checksum", block_group
);
382 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp
))
383 percpu_counter_sub(&sbi
->s_freeclusters_counter
,
385 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
388 blk
= ext4_valid_block_bitmap(sb
, desc
, block_group
, bh
);
389 if (unlikely(blk
!= 0)) {
390 ext4_unlock_group(sb
, block_group
);
391 ext4_error(sb
, "bg %u: block %llu: invalid block bitmap",
393 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp
))
394 percpu_counter_sub(&sbi
->s_freeclusters_counter
,
396 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT
, &grp
->bb_state
);
397 return -EFSCORRUPTED
;
399 set_buffer_verified(bh
);
400 ext4_unlock_group(sb
, block_group
);
405 * ext4_read_block_bitmap_nowait()
407 * @block_group: given block group
409 * Read the bitmap for a given block_group,and validate the
410 * bits for block/inode/inode tables are set in the bitmaps
412 * Return buffer_head on success or NULL in case of failure.
415 ext4_read_block_bitmap_nowait(struct super_block
*sb
, ext4_group_t block_group
)
417 struct ext4_group_desc
*desc
;
418 struct buffer_head
*bh
;
419 ext4_fsblk_t bitmap_blk
;
422 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
424 return ERR_PTR(-EFSCORRUPTED
);
425 bitmap_blk
= ext4_block_bitmap(sb
, desc
);
426 bh
= sb_getblk(sb
, bitmap_blk
);
428 ext4_error(sb
, "Cannot get buffer for block bitmap - "
429 "block_group = %u, block_bitmap = %llu",
430 block_group
, bitmap_blk
);
431 return ERR_PTR(-ENOMEM
);
434 if (bitmap_uptodate(bh
))
438 if (bitmap_uptodate(bh
)) {
442 ext4_lock_group(sb
, block_group
);
443 if (desc
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)) {
444 err
= ext4_init_block_bitmap(sb
, bh
, block_group
, desc
);
445 set_bitmap_uptodate(bh
);
446 set_buffer_uptodate(bh
);
447 ext4_unlock_group(sb
, block_group
);
450 ext4_error(sb
, "Failed to init block bitmap for group "
451 "%u: %d", block_group
, err
);
456 ext4_unlock_group(sb
, block_group
);
457 if (buffer_uptodate(bh
)) {
459 * if not uninit if bh is uptodate,
460 * bitmap is also uptodate
462 set_bitmap_uptodate(bh
);
467 * submit the buffer_head for reading
470 trace_ext4_read_block_bitmap_load(sb
, block_group
);
471 bh
->b_end_io
= ext4_end_bitmap_read
;
473 submit_bh(READ
| REQ_META
| REQ_PRIO
, bh
);
476 err
= ext4_validate_block_bitmap(sb
, desc
, block_group
, bh
);
485 /* Returns 0 on success, 1 on error */
486 int ext4_wait_block_bitmap(struct super_block
*sb
, ext4_group_t block_group
,
487 struct buffer_head
*bh
)
489 struct ext4_group_desc
*desc
;
493 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
495 return -EFSCORRUPTED
;
497 if (!buffer_uptodate(bh
)) {
498 ext4_error(sb
, "Cannot read block bitmap - "
499 "block_group = %u, block_bitmap = %llu",
500 block_group
, (unsigned long long) bh
->b_blocknr
);
503 clear_buffer_new(bh
);
504 /* Panic or remount fs read-only if block bitmap is invalid */
505 return ext4_validate_block_bitmap(sb
, desc
, block_group
, bh
);
509 ext4_read_block_bitmap(struct super_block
*sb
, ext4_group_t block_group
)
511 struct buffer_head
*bh
;
514 bh
= ext4_read_block_bitmap_nowait(sb
, block_group
);
517 err
= ext4_wait_block_bitmap(sb
, block_group
, bh
);
526 * ext4_has_free_clusters()
527 * @sbi: in-core super block structure.
528 * @nclusters: number of needed blocks
529 * @flags: flags from ext4_mb_new_blocks()
531 * Check if filesystem has nclusters free & available for allocation.
532 * On success return 1, return 0 on failure.
534 static int ext4_has_free_clusters(struct ext4_sb_info
*sbi
,
535 s64 nclusters
, unsigned int flags
)
537 s64 free_clusters
, dirty_clusters
, rsv
, resv_clusters
;
538 struct percpu_counter
*fcc
= &sbi
->s_freeclusters_counter
;
539 struct percpu_counter
*dcc
= &sbi
->s_dirtyclusters_counter
;
541 free_clusters
= percpu_counter_read_positive(fcc
);
542 dirty_clusters
= percpu_counter_read_positive(dcc
);
543 resv_clusters
= atomic64_read(&sbi
->s_resv_clusters
);
546 * r_blocks_count should always be multiple of the cluster ratio so
547 * we are safe to do a plane bit shift only.
549 rsv
= (ext4_r_blocks_count(sbi
->s_es
) >> sbi
->s_cluster_bits
) +
552 if (free_clusters
- (nclusters
+ rsv
+ dirty_clusters
) <
553 EXT4_FREECLUSTERS_WATERMARK
) {
554 free_clusters
= percpu_counter_sum_positive(fcc
);
555 dirty_clusters
= percpu_counter_sum_positive(dcc
);
557 /* Check whether we have space after accounting for current
558 * dirty clusters & root reserved clusters.
560 if (free_clusters
>= (rsv
+ nclusters
+ dirty_clusters
))
563 /* Hm, nope. Are (enough) root reserved clusters available? */
564 if (uid_eq(sbi
->s_resuid
, current_fsuid()) ||
565 (!gid_eq(sbi
->s_resgid
, GLOBAL_ROOT_GID
) && in_group_p(sbi
->s_resgid
)) ||
566 capable(CAP_SYS_RESOURCE
) ||
567 (flags
& EXT4_MB_USE_ROOT_BLOCKS
)) {
569 if (free_clusters
>= (nclusters
+ dirty_clusters
+
573 /* No free blocks. Let's see if we can dip into reserved pool */
574 if (flags
& EXT4_MB_USE_RESERVED
) {
575 if (free_clusters
>= (nclusters
+ dirty_clusters
))
582 int ext4_claim_free_clusters(struct ext4_sb_info
*sbi
,
583 s64 nclusters
, unsigned int flags
)
585 if (ext4_has_free_clusters(sbi
, nclusters
, flags
)) {
586 percpu_counter_add(&sbi
->s_dirtyclusters_counter
, nclusters
);
593 * ext4_should_retry_alloc()
595 * @retries number of attemps has been made
597 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
598 * it is profitable to retry the operation, this function will wait
599 * for the current or committing transaction to complete, and then
602 * if the total number of retries exceed three times, return FALSE.
604 int ext4_should_retry_alloc(struct super_block
*sb
, int *retries
)
606 if (!ext4_has_free_clusters(EXT4_SB(sb
), 1, 0) ||
608 !EXT4_SB(sb
)->s_journal
)
611 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb
->s_id
);
613 return jbd2_journal_force_commit_nested(EXT4_SB(sb
)->s_journal
);
617 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
619 * @handle: handle to this transaction
621 * @goal: given target block(filesystem wide)
622 * @count: pointer to total number of clusters needed
625 * Return 1st allocated block number on success, *count stores total account
626 * error stores in errp pointer
628 ext4_fsblk_t
ext4_new_meta_blocks(handle_t
*handle
, struct inode
*inode
,
629 ext4_fsblk_t goal
, unsigned int flags
,
630 unsigned long *count
, int *errp
)
632 struct ext4_allocation_request ar
;
635 memset(&ar
, 0, sizeof(ar
));
636 /* Fill with neighbour allocated blocks */
639 ar
.len
= count
? *count
: 1;
642 ret
= ext4_mb_new_blocks(handle
, &ar
, errp
);
646 * Account for the allocated meta blocks. We will never
647 * fail EDQUOT for metdata, but we do account for it.
649 if (!(*errp
) && (flags
& EXT4_MB_DELALLOC_RESERVED
)) {
650 dquot_alloc_block_nofail(inode
,
651 EXT4_C2B(EXT4_SB(inode
->i_sb
), ar
.len
));
657 * ext4_count_free_clusters() -- count filesystem free clusters
660 * Adds up the number of free clusters from each block group.
662 ext4_fsblk_t
ext4_count_free_clusters(struct super_block
*sb
)
664 ext4_fsblk_t desc_count
;
665 struct ext4_group_desc
*gdp
;
667 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
668 struct ext4_group_info
*grp
;
670 struct ext4_super_block
*es
;
671 ext4_fsblk_t bitmap_count
;
673 struct buffer_head
*bitmap_bh
= NULL
;
675 es
= EXT4_SB(sb
)->s_es
;
680 for (i
= 0; i
< ngroups
; i
++) {
681 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
685 if (EXT4_SB(sb
)->s_group_info
)
686 grp
= ext4_get_group_info(sb
, i
);
687 if (!grp
|| !EXT4_MB_GRP_BBITMAP_CORRUPT(grp
))
688 desc_count
+= ext4_free_group_clusters(sb
, gdp
);
690 bitmap_bh
= ext4_read_block_bitmap(sb
, i
);
691 if (IS_ERR(bitmap_bh
)) {
696 x
= ext4_count_free(bitmap_bh
->b_data
,
697 EXT4_CLUSTERS_PER_GROUP(sb
) / 8);
698 printk(KERN_DEBUG
"group %u: stored = %d, counted = %u\n",
699 i
, ext4_free_group_clusters(sb
, gdp
), x
);
703 printk(KERN_DEBUG
"ext4_count_free_clusters: stored = %llu"
704 ", computed = %llu, %llu\n",
705 EXT4_NUM_B2C(EXT4_SB(sb
), ext4_free_blocks_count(es
)),
706 desc_count
, bitmap_count
);
710 for (i
= 0; i
< ngroups
; i
++) {
711 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
715 if (EXT4_SB(sb
)->s_group_info
)
716 grp
= ext4_get_group_info(sb
, i
);
717 if (!grp
|| !EXT4_MB_GRP_BBITMAP_CORRUPT(grp
))
718 desc_count
+= ext4_free_group_clusters(sb
, gdp
);
725 static inline int test_root(ext4_group_t a
, int b
)
739 * ext4_bg_has_super - number of blocks used by the superblock in group
740 * @sb: superblock for filesystem
741 * @group: group number to check
743 * Return the number of blocks used by the superblock (primary or backup)
744 * in this group. Currently this will be only 0 or 1.
746 int ext4_bg_has_super(struct super_block
*sb
, ext4_group_t group
)
748 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
752 if (ext4_has_feature_sparse_super2(sb
)) {
753 if (group
== le32_to_cpu(es
->s_backup_bgs
[0]) ||
754 group
== le32_to_cpu(es
->s_backup_bgs
[1]))
758 if ((group
<= 1) || !ext4_has_feature_sparse_super(sb
))
762 if (test_root(group
, 3) || (test_root(group
, 5)) ||
769 static unsigned long ext4_bg_num_gdb_meta(struct super_block
*sb
,
772 unsigned long metagroup
= group
/ EXT4_DESC_PER_BLOCK(sb
);
773 ext4_group_t first
= metagroup
* EXT4_DESC_PER_BLOCK(sb
);
774 ext4_group_t last
= first
+ EXT4_DESC_PER_BLOCK(sb
) - 1;
776 if (group
== first
|| group
== first
+ 1 || group
== last
)
781 static unsigned long ext4_bg_num_gdb_nometa(struct super_block
*sb
,
784 if (!ext4_bg_has_super(sb
, group
))
787 if (ext4_has_feature_meta_bg(sb
))
788 return le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_meta_bg
);
790 return EXT4_SB(sb
)->s_gdb_count
;
794 * ext4_bg_num_gdb - number of blocks used by the group table in group
795 * @sb: superblock for filesystem
796 * @group: group number to check
798 * Return the number of blocks used by the group descriptor table
799 * (primary or backup) in this group. In the future there may be a
800 * different number of descriptor blocks in each group.
802 unsigned long ext4_bg_num_gdb(struct super_block
*sb
, ext4_group_t group
)
804 unsigned long first_meta_bg
=
805 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_meta_bg
);
806 unsigned long metagroup
= group
/ EXT4_DESC_PER_BLOCK(sb
);
808 if (!ext4_has_feature_meta_bg(sb
) || metagroup
< first_meta_bg
)
809 return ext4_bg_num_gdb_nometa(sb
, group
);
811 return ext4_bg_num_gdb_meta(sb
,group
);
816 * This function returns the number of file system metadata clusters at
817 * the beginning of a block group, including the reserved gdt blocks.
819 static unsigned ext4_num_base_meta_clusters(struct super_block
*sb
,
820 ext4_group_t block_group
)
822 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
825 /* Check for superblock and gdt backups in this group */
826 num
= ext4_bg_has_super(sb
, block_group
);
828 if (!ext4_has_feature_meta_bg(sb
) ||
829 block_group
< le32_to_cpu(sbi
->s_es
->s_first_meta_bg
) *
830 sbi
->s_desc_per_block
) {
832 num
+= ext4_bg_num_gdb(sb
, block_group
);
833 num
+= le16_to_cpu(sbi
->s_es
->s_reserved_gdt_blocks
);
835 } else { /* For META_BG_BLOCK_GROUPS */
836 num
+= ext4_bg_num_gdb(sb
, block_group
);
838 return EXT4_NUM_B2C(sbi
, num
);
841 * ext4_inode_to_goal_block - return a hint for block allocation
842 * @inode: inode for block allocation
844 * Return the ideal location to start allocating blocks for a
845 * newly created inode.
847 ext4_fsblk_t
ext4_inode_to_goal_block(struct inode
*inode
)
849 struct ext4_inode_info
*ei
= EXT4_I(inode
);
850 ext4_group_t block_group
;
851 ext4_grpblk_t colour
;
852 int flex_size
= ext4_flex_bg_size(EXT4_SB(inode
->i_sb
));
853 ext4_fsblk_t bg_start
;
854 ext4_fsblk_t last_block
;
856 block_group
= ei
->i_block_group
;
857 if (flex_size
>= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
) {
859 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
860 * block groups per flexgroup, reserve the first block
861 * group for directories and special files. Regular
862 * files will start at the second block group. This
863 * tends to speed up directory access and improves
866 block_group
&= ~(flex_size
-1);
867 if (S_ISREG(inode
->i_mode
))
870 bg_start
= ext4_group_first_block_no(inode
->i_sb
, block_group
);
871 last_block
= ext4_blocks_count(EXT4_SB(inode
->i_sb
)->s_es
) - 1;
874 * If we are doing delayed allocation, we don't need take
875 * colour into account.
877 if (test_opt(inode
->i_sb
, DELALLOC
))
880 if (bg_start
+ EXT4_BLOCKS_PER_GROUP(inode
->i_sb
) <= last_block
)
881 colour
= (current
->pid
% 16) *
882 (EXT4_BLOCKS_PER_GROUP(inode
->i_sb
) / 16);
884 colour
= (current
->pid
% 16) * ((last_block
- bg_start
) / 16);
885 return bg_start
+ colour
;