1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ext4/ialloc.c
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
10 * BSD ufs-inspired inode and directory allocation by
11 * Stephen Tweedie (sct@redhat.com), 1993
12 * Big-endian to little-endian byte-swapping/bitmaps by
13 * David S. Miller (davem@caip.rutgers.edu), 1995
16 #include <linux/time.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <linux/cred.h>
27 #include <asm/byteorder.h>
30 #include "ext4_jbd2.h"
34 #include <trace/events/ext4.h>
37 * ialloc.c contains the inodes allocation and deallocation routines
41 * The free inodes are managed by bitmaps. A file system contains several
42 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
43 * block for inodes, N blocks for the inode table and data blocks.
45 * The file system contains group descriptors which are located after the
46 * super block. Each descriptor contains the number of the bitmap block and
47 * the free blocks count in the block.
51 * To avoid calling the atomic setbit hundreds or thousands of times, we only
52 * need to use it within a single byte (to ensure we get endianness right).
53 * We can use memset for the rest of the bitmap as there are no other users.
55 void ext4_mark_bitmap_end(int start_bit
, int end_bit
, char *bitmap
)
59 if (start_bit
>= end_bit
)
62 ext4_debug("mark end bits +%d through +%d used\n", start_bit
, end_bit
);
63 for (i
= start_bit
; i
< ((start_bit
+ 7) & ~7UL); i
++)
64 ext4_set_bit(i
, bitmap
);
66 memset(bitmap
+ (i
>> 3), 0xff, (end_bit
- i
) >> 3);
69 void ext4_end_bitmap_read(struct buffer_head
*bh
, int uptodate
)
72 set_buffer_uptodate(bh
);
73 set_bitmap_uptodate(bh
);
79 static int ext4_validate_inode_bitmap(struct super_block
*sb
,
80 struct ext4_group_desc
*desc
,
81 ext4_group_t block_group
,
82 struct buffer_head
*bh
)
85 struct ext4_group_info
*grp
;
87 if (EXT4_SB(sb
)->s_mount_state
& EXT4_FC_REPLAY
)
90 grp
= ext4_get_group_info(sb
, block_group
);
92 if (buffer_verified(bh
))
94 if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp
))
97 ext4_lock_group(sb
, block_group
);
98 if (buffer_verified(bh
))
100 blk
= ext4_inode_bitmap(sb
, desc
);
101 if (!ext4_inode_bitmap_csum_verify(sb
, block_group
, desc
, bh
,
102 EXT4_INODES_PER_GROUP(sb
) / 8) ||
103 ext4_simulate_fail(sb
, EXT4_SIM_IBITMAP_CRC
)) {
104 ext4_unlock_group(sb
, block_group
);
105 ext4_error(sb
, "Corrupt inode bitmap - block_group = %u, "
106 "inode_bitmap = %llu", block_group
, blk
);
107 ext4_mark_group_bitmap_corrupted(sb
, block_group
,
108 EXT4_GROUP_INFO_IBITMAP_CORRUPT
);
111 set_buffer_verified(bh
);
113 ext4_unlock_group(sb
, block_group
);
118 * Read the inode allocation bitmap for a given block_group, reading
119 * into the specified slot in the superblock's bitmap cache.
121 * Return buffer_head of bitmap on success, or an ERR_PTR on error.
123 static struct buffer_head
*
124 ext4_read_inode_bitmap(struct super_block
*sb
, ext4_group_t block_group
)
126 struct ext4_group_desc
*desc
;
127 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
128 struct buffer_head
*bh
= NULL
;
129 ext4_fsblk_t bitmap_blk
;
132 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
134 return ERR_PTR(-EFSCORRUPTED
);
136 bitmap_blk
= ext4_inode_bitmap(sb
, desc
);
137 if ((bitmap_blk
<= le32_to_cpu(sbi
->s_es
->s_first_data_block
)) ||
138 (bitmap_blk
>= ext4_blocks_count(sbi
->s_es
))) {
139 ext4_error(sb
, "Invalid inode bitmap blk %llu in "
140 "block_group %u", bitmap_blk
, block_group
);
141 ext4_mark_group_bitmap_corrupted(sb
, block_group
,
142 EXT4_GROUP_INFO_IBITMAP_CORRUPT
);
143 return ERR_PTR(-EFSCORRUPTED
);
145 bh
= sb_getblk(sb
, bitmap_blk
);
147 ext4_warning(sb
, "Cannot read inode bitmap - "
148 "block_group = %u, inode_bitmap = %llu",
149 block_group
, bitmap_blk
);
150 return ERR_PTR(-ENOMEM
);
152 if (bitmap_uptodate(bh
))
156 if (bitmap_uptodate(bh
)) {
161 ext4_lock_group(sb
, block_group
);
162 if (ext4_has_group_desc_csum(sb
) &&
163 (desc
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_UNINIT
))) {
164 if (block_group
== 0) {
165 ext4_unlock_group(sb
, block_group
);
167 ext4_error(sb
, "Inode bitmap for bg 0 marked "
172 memset(bh
->b_data
, 0, (EXT4_INODES_PER_GROUP(sb
) + 7) / 8);
173 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb
),
174 sb
->s_blocksize
* 8, bh
->b_data
);
175 set_bitmap_uptodate(bh
);
176 set_buffer_uptodate(bh
);
177 set_buffer_verified(bh
);
178 ext4_unlock_group(sb
, block_group
);
182 ext4_unlock_group(sb
, block_group
);
184 if (buffer_uptodate(bh
)) {
186 * if not uninit if bh is uptodate,
187 * bitmap is also uptodate
189 set_bitmap_uptodate(bh
);
194 * submit the buffer_head for reading
196 trace_ext4_load_inode_bitmap(sb
, block_group
);
197 ext4_read_bh(bh
, REQ_META
| REQ_PRIO
, ext4_end_bitmap_read
);
198 ext4_simulate_fail_bh(sb
, bh
, EXT4_SIM_IBITMAP_EIO
);
199 if (!buffer_uptodate(bh
)) {
201 ext4_error_err(sb
, EIO
, "Cannot read inode bitmap - "
202 "block_group = %u, inode_bitmap = %llu",
203 block_group
, bitmap_blk
);
204 ext4_mark_group_bitmap_corrupted(sb
, block_group
,
205 EXT4_GROUP_INFO_IBITMAP_CORRUPT
);
206 return ERR_PTR(-EIO
);
210 err
= ext4_validate_inode_bitmap(sb
, desc
, block_group
, bh
);
220 * NOTE! When we get the inode, we're the only people
221 * that have access to it, and as such there are no
222 * race conditions we have to worry about. The inode
223 * is not on the hash-lists, and it cannot be reached
224 * through the filesystem because the directory entry
225 * has been deleted earlier.
227 * HOWEVER: we must make sure that we get no aliases,
228 * which means that we have to call "clear_inode()"
229 * _before_ we mark the inode not in use in the inode
230 * bitmaps. Otherwise a newly created file might use
231 * the same inode number (not actually the same pointer
232 * though), and then we'd have two inodes sharing the
233 * same inode number and space on the harddisk.
235 void ext4_free_inode(handle_t
*handle
, struct inode
*inode
)
237 struct super_block
*sb
= inode
->i_sb
;
240 struct buffer_head
*bitmap_bh
= NULL
;
241 struct buffer_head
*bh2
;
242 ext4_group_t block_group
;
244 struct ext4_group_desc
*gdp
;
245 struct ext4_super_block
*es
;
246 struct ext4_sb_info
*sbi
;
247 int fatal
= 0, err
, count
, cleared
;
248 struct ext4_group_info
*grp
;
251 printk(KERN_ERR
"EXT4-fs: %s:%d: inode on "
252 "nonexistent device\n", __func__
, __LINE__
);
255 if (atomic_read(&inode
->i_count
) > 1) {
256 ext4_msg(sb
, KERN_ERR
, "%s:%d: inode #%lu: count=%d",
257 __func__
, __LINE__
, inode
->i_ino
,
258 atomic_read(&inode
->i_count
));
261 if (inode
->i_nlink
) {
262 ext4_msg(sb
, KERN_ERR
, "%s:%d: inode #%lu: nlink=%d\n",
263 __func__
, __LINE__
, inode
->i_ino
, inode
->i_nlink
);
269 ext4_debug("freeing inode %lu\n", ino
);
270 trace_ext4_free_inode(inode
);
272 dquot_initialize(inode
);
273 dquot_free_inode(inode
);
275 is_directory
= S_ISDIR(inode
->i_mode
);
277 /* Do this BEFORE marking the inode not in use or returning an error */
278 ext4_clear_inode(inode
);
281 if (ino
< EXT4_FIRST_INO(sb
) || ino
> le32_to_cpu(es
->s_inodes_count
)) {
282 ext4_error(sb
, "reserved or nonexistent inode %lu", ino
);
285 block_group
= (ino
- 1) / EXT4_INODES_PER_GROUP(sb
);
286 bit
= (ino
- 1) % EXT4_INODES_PER_GROUP(sb
);
287 bitmap_bh
= ext4_read_inode_bitmap(sb
, block_group
);
288 /* Don't bother if the inode bitmap is corrupt. */
289 if (IS_ERR(bitmap_bh
)) {
290 fatal
= PTR_ERR(bitmap_bh
);
294 if (!(sbi
->s_mount_state
& EXT4_FC_REPLAY
)) {
295 grp
= ext4_get_group_info(sb
, block_group
);
296 if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp
))) {
297 fatal
= -EFSCORRUPTED
;
302 BUFFER_TRACE(bitmap_bh
, "get_write_access");
303 fatal
= ext4_journal_get_write_access(handle
, bitmap_bh
);
308 gdp
= ext4_get_group_desc(sb
, block_group
, &bh2
);
310 BUFFER_TRACE(bh2
, "get_write_access");
311 fatal
= ext4_journal_get_write_access(handle
, bh2
);
313 ext4_lock_group(sb
, block_group
);
314 cleared
= ext4_test_and_clear_bit(bit
, bitmap_bh
->b_data
);
315 if (fatal
|| !cleared
) {
316 ext4_unlock_group(sb
, block_group
);
320 count
= ext4_free_inodes_count(sb
, gdp
) + 1;
321 ext4_free_inodes_set(sb
, gdp
, count
);
323 count
= ext4_used_dirs_count(sb
, gdp
) - 1;
324 ext4_used_dirs_set(sb
, gdp
, count
);
325 percpu_counter_dec(&sbi
->s_dirs_counter
);
327 ext4_inode_bitmap_csum_set(sb
, block_group
, gdp
, bitmap_bh
,
328 EXT4_INODES_PER_GROUP(sb
) / 8);
329 ext4_group_desc_csum_set(sb
, block_group
, gdp
);
330 ext4_unlock_group(sb
, block_group
);
332 percpu_counter_inc(&sbi
->s_freeinodes_counter
);
333 if (sbi
->s_log_groups_per_flex
) {
334 struct flex_groups
*fg
;
336 fg
= sbi_array_rcu_deref(sbi
, s_flex_groups
,
337 ext4_flex_group(sbi
, block_group
));
338 atomic_inc(&fg
->free_inodes
);
340 atomic_dec(&fg
->used_dirs
);
342 BUFFER_TRACE(bh2
, "call ext4_handle_dirty_metadata");
343 fatal
= ext4_handle_dirty_metadata(handle
, NULL
, bh2
);
346 BUFFER_TRACE(bitmap_bh
, "call ext4_handle_dirty_metadata");
347 err
= ext4_handle_dirty_metadata(handle
, NULL
, bitmap_bh
);
351 ext4_error(sb
, "bit already cleared for inode %lu", ino
);
352 ext4_mark_group_bitmap_corrupted(sb
, block_group
,
353 EXT4_GROUP_INFO_IBITMAP_CORRUPT
);
358 ext4_std_error(sb
, fatal
);
368 * Helper function for Orlov's allocator; returns critical information
369 * for a particular block group or flex_bg. If flex_size is 1, then g
370 * is a block group number; otherwise it is flex_bg number.
372 static void get_orlov_stats(struct super_block
*sb
, ext4_group_t g
,
373 int flex_size
, struct orlov_stats
*stats
)
375 struct ext4_group_desc
*desc
;
378 struct flex_groups
*fg
= sbi_array_rcu_deref(EXT4_SB(sb
),
380 stats
->free_inodes
= atomic_read(&fg
->free_inodes
);
381 stats
->free_clusters
= atomic64_read(&fg
->free_clusters
);
382 stats
->used_dirs
= atomic_read(&fg
->used_dirs
);
386 desc
= ext4_get_group_desc(sb
, g
, NULL
);
388 stats
->free_inodes
= ext4_free_inodes_count(sb
, desc
);
389 stats
->free_clusters
= ext4_free_group_clusters(sb
, desc
);
390 stats
->used_dirs
= ext4_used_dirs_count(sb
, desc
);
392 stats
->free_inodes
= 0;
393 stats
->free_clusters
= 0;
394 stats
->used_dirs
= 0;
399 * Orlov's allocator for directories.
401 * We always try to spread first-level directories.
403 * If there are blockgroups with both free inodes and free blocks counts
404 * not worse than average we return one with smallest directory count.
405 * Otherwise we simply return a random group.
407 * For the rest rules look so:
409 * It's OK to put directory into a group unless
410 * it has too many directories already (max_dirs) or
411 * it has too few free inodes left (min_inodes) or
412 * it has too few free blocks left (min_blocks) or
413 * Parent's group is preferred, if it doesn't satisfy these
414 * conditions we search cyclically through the rest. If none
415 * of the groups look good we just look for a group with more
416 * free inodes than average (starting at parent's group).
419 static int find_group_orlov(struct super_block
*sb
, struct inode
*parent
,
420 ext4_group_t
*group
, umode_t mode
,
421 const struct qstr
*qstr
)
423 ext4_group_t parent_group
= EXT4_I(parent
)->i_block_group
;
424 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
425 ext4_group_t real_ngroups
= ext4_get_groups_count(sb
);
426 int inodes_per_group
= EXT4_INODES_PER_GROUP(sb
);
427 unsigned int freei
, avefreei
, grp_free
;
428 ext4_fsblk_t freeb
, avefreec
;
430 int max_dirs
, min_inodes
;
431 ext4_grpblk_t min_clusters
;
432 ext4_group_t i
, grp
, g
, ngroups
;
433 struct ext4_group_desc
*desc
;
434 struct orlov_stats stats
;
435 int flex_size
= ext4_flex_bg_size(sbi
);
436 struct dx_hash_info hinfo
;
438 ngroups
= real_ngroups
;
440 ngroups
= (real_ngroups
+ flex_size
- 1) >>
441 sbi
->s_log_groups_per_flex
;
442 parent_group
>>= sbi
->s_log_groups_per_flex
;
445 freei
= percpu_counter_read_positive(&sbi
->s_freeinodes_counter
);
446 avefreei
= freei
/ ngroups
;
447 freeb
= EXT4_C2B(sbi
,
448 percpu_counter_read_positive(&sbi
->s_freeclusters_counter
));
450 do_div(avefreec
, ngroups
);
451 ndirs
= percpu_counter_read_positive(&sbi
->s_dirs_counter
);
454 ((parent
== d_inode(sb
->s_root
)) ||
455 (ext4_test_inode_flag(parent
, EXT4_INODE_TOPDIR
)))) {
456 int best_ndir
= inodes_per_group
;
460 hinfo
.hash_version
= DX_HASH_HALF_MD4
;
461 hinfo
.seed
= sbi
->s_hash_seed
;
462 ext4fs_dirhash(parent
, qstr
->name
, qstr
->len
, &hinfo
);
466 parent_group
= (unsigned)grp
% ngroups
;
467 for (i
= 0; i
< ngroups
; i
++) {
468 g
= (parent_group
+ i
) % ngroups
;
469 get_orlov_stats(sb
, g
, flex_size
, &stats
);
470 if (!stats
.free_inodes
)
472 if (stats
.used_dirs
>= best_ndir
)
474 if (stats
.free_inodes
< avefreei
)
476 if (stats
.free_clusters
< avefreec
)
480 best_ndir
= stats
.used_dirs
;
485 if (flex_size
== 1) {
491 * We pack inodes at the beginning of the flexgroup's
492 * inode tables. Block allocation decisions will do
493 * something similar, although regular files will
494 * start at 2nd block group of the flexgroup. See
495 * ext4_ext_find_goal() and ext4_find_near().
498 for (i
= 0; i
< flex_size
; i
++) {
499 if (grp
+i
>= real_ngroups
)
501 desc
= ext4_get_group_desc(sb
, grp
+i
, NULL
);
502 if (desc
&& ext4_free_inodes_count(sb
, desc
)) {
510 max_dirs
= ndirs
/ ngroups
+ inodes_per_group
/ 16;
511 min_inodes
= avefreei
- inodes_per_group
*flex_size
/ 4;
514 min_clusters
= avefreec
- EXT4_CLUSTERS_PER_GROUP(sb
)*flex_size
/ 4;
517 * Start looking in the flex group where we last allocated an
518 * inode for this parent directory
520 if (EXT4_I(parent
)->i_last_alloc_group
!= ~0) {
521 parent_group
= EXT4_I(parent
)->i_last_alloc_group
;
523 parent_group
>>= sbi
->s_log_groups_per_flex
;
526 for (i
= 0; i
< ngroups
; i
++) {
527 grp
= (parent_group
+ i
) % ngroups
;
528 get_orlov_stats(sb
, grp
, flex_size
, &stats
);
529 if (stats
.used_dirs
>= max_dirs
)
531 if (stats
.free_inodes
< min_inodes
)
533 if (stats
.free_clusters
< min_clusters
)
539 ngroups
= real_ngroups
;
540 avefreei
= freei
/ ngroups
;
542 parent_group
= EXT4_I(parent
)->i_block_group
;
543 for (i
= 0; i
< ngroups
; i
++) {
544 grp
= (parent_group
+ i
) % ngroups
;
545 desc
= ext4_get_group_desc(sb
, grp
, NULL
);
547 grp_free
= ext4_free_inodes_count(sb
, desc
);
548 if (grp_free
&& grp_free
>= avefreei
) {
557 * The free-inodes counter is approximate, and for really small
558 * filesystems the above test can fail to find any blockgroups
567 static int find_group_other(struct super_block
*sb
, struct inode
*parent
,
568 ext4_group_t
*group
, umode_t mode
)
570 ext4_group_t parent_group
= EXT4_I(parent
)->i_block_group
;
571 ext4_group_t i
, last
, ngroups
= ext4_get_groups_count(sb
);
572 struct ext4_group_desc
*desc
;
573 int flex_size
= ext4_flex_bg_size(EXT4_SB(sb
));
576 * Try to place the inode is the same flex group as its
577 * parent. If we can't find space, use the Orlov algorithm to
578 * find another flex group, and store that information in the
579 * parent directory's inode information so that use that flex
580 * group for future allocations.
586 parent_group
&= ~(flex_size
-1);
587 last
= parent_group
+ flex_size
;
590 for (i
= parent_group
; i
< last
; i
++) {
591 desc
= ext4_get_group_desc(sb
, i
, NULL
);
592 if (desc
&& ext4_free_inodes_count(sb
, desc
)) {
597 if (!retry
&& EXT4_I(parent
)->i_last_alloc_group
!= ~0) {
599 parent_group
= EXT4_I(parent
)->i_last_alloc_group
;
603 * If this didn't work, use the Orlov search algorithm
604 * to find a new flex group; we pass in the mode to
605 * avoid the topdir algorithms.
607 *group
= parent_group
+ flex_size
;
608 if (*group
> ngroups
)
610 return find_group_orlov(sb
, parent
, group
, mode
, NULL
);
614 * Try to place the inode in its parent directory
616 *group
= parent_group
;
617 desc
= ext4_get_group_desc(sb
, *group
, NULL
);
618 if (desc
&& ext4_free_inodes_count(sb
, desc
) &&
619 ext4_free_group_clusters(sb
, desc
))
623 * We're going to place this inode in a different blockgroup from its
624 * parent. We want to cause files in a common directory to all land in
625 * the same blockgroup. But we want files which are in a different
626 * directory which shares a blockgroup with our parent to land in a
627 * different blockgroup.
629 * So add our directory's i_ino into the starting point for the hash.
631 *group
= (*group
+ parent
->i_ino
) % ngroups
;
634 * Use a quadratic hash to find a group with a free inode and some free
637 for (i
= 1; i
< ngroups
; i
<<= 1) {
639 if (*group
>= ngroups
)
641 desc
= ext4_get_group_desc(sb
, *group
, NULL
);
642 if (desc
&& ext4_free_inodes_count(sb
, desc
) &&
643 ext4_free_group_clusters(sb
, desc
))
648 * That failed: try linear search for a free inode, even if that group
649 * has no free blocks.
651 *group
= parent_group
;
652 for (i
= 0; i
< ngroups
; i
++) {
653 if (++*group
>= ngroups
)
655 desc
= ext4_get_group_desc(sb
, *group
, NULL
);
656 if (desc
&& ext4_free_inodes_count(sb
, desc
))
664 * In no journal mode, if an inode has recently been deleted, we want
665 * to avoid reusing it until we're reasonably sure the inode table
666 * block has been written back to disk. (Yes, these values are
667 * somewhat arbitrary...)
669 #define RECENTCY_MIN 60
670 #define RECENTCY_DIRTY 300
672 static int recently_deleted(struct super_block
*sb
, ext4_group_t group
, int ino
)
674 struct ext4_group_desc
*gdp
;
675 struct ext4_inode
*raw_inode
;
676 struct buffer_head
*bh
;
677 int inodes_per_block
= EXT4_SB(sb
)->s_inodes_per_block
;
679 int recentcy
= RECENTCY_MIN
;
682 gdp
= ext4_get_group_desc(sb
, group
, NULL
);
686 bh
= sb_find_get_block(sb
, ext4_inode_table(sb
, gdp
) +
687 (ino
/ inodes_per_block
));
688 if (!bh
|| !buffer_uptodate(bh
))
690 * If the block is not in the buffer cache, then it
691 * must have been written out.
695 offset
= (ino
% inodes_per_block
) * EXT4_INODE_SIZE(sb
);
696 raw_inode
= (struct ext4_inode
*) (bh
->b_data
+ offset
);
698 /* i_dtime is only 32 bits on disk, but we only care about relative
699 * times in the range of a few minutes (i.e. long enough to sync a
700 * recently-deleted inode to disk), so using the low 32 bits of the
701 * clock (a 68 year range) is enough, see time_before32() */
702 dtime
= le32_to_cpu(raw_inode
->i_dtime
);
703 now
= ktime_get_real_seconds();
704 if (buffer_dirty(bh
))
705 recentcy
+= RECENTCY_DIRTY
;
707 if (dtime
&& time_before32(dtime
, now
) &&
708 time_before32(now
, dtime
+ recentcy
))
715 static int find_inode_bit(struct super_block
*sb
, ext4_group_t group
,
716 struct buffer_head
*bitmap
, unsigned long *ino
)
718 bool check_recently_deleted
= EXT4_SB(sb
)->s_journal
== NULL
;
719 unsigned long recently_deleted_ino
= EXT4_INODES_PER_GROUP(sb
);
722 *ino
= ext4_find_next_zero_bit((unsigned long *)
724 EXT4_INODES_PER_GROUP(sb
), *ino
);
725 if (*ino
>= EXT4_INODES_PER_GROUP(sb
))
728 if (check_recently_deleted
&& recently_deleted(sb
, group
, *ino
)) {
729 recently_deleted_ino
= *ino
;
731 if (*ino
< EXT4_INODES_PER_GROUP(sb
))
737 if (recently_deleted_ino
>= EXT4_INODES_PER_GROUP(sb
))
740 * Not reusing recently deleted inodes is mostly a preference. We don't
741 * want to report ENOSPC or skew allocation patterns because of that.
742 * So return even recently deleted inode if we could find better in the
745 *ino
= recently_deleted_ino
;
749 int ext4_mark_inode_used(struct super_block
*sb
, int ino
)
751 unsigned long max_ino
= le32_to_cpu(EXT4_SB(sb
)->s_es
->s_inodes_count
);
752 struct buffer_head
*inode_bitmap_bh
= NULL
, *group_desc_bh
= NULL
;
753 struct ext4_group_desc
*gdp
;
756 int err
= -EFSCORRUPTED
;
758 if (ino
< EXT4_FIRST_INO(sb
) || ino
> max_ino
)
761 group
= (ino
- 1) / EXT4_INODES_PER_GROUP(sb
);
762 bit
= (ino
- 1) % EXT4_INODES_PER_GROUP(sb
);
763 inode_bitmap_bh
= ext4_read_inode_bitmap(sb
, group
);
764 if (IS_ERR(inode_bitmap_bh
))
765 return PTR_ERR(inode_bitmap_bh
);
767 if (ext4_test_bit(bit
, inode_bitmap_bh
->b_data
)) {
772 gdp
= ext4_get_group_desc(sb
, group
, &group_desc_bh
);
773 if (!gdp
|| !group_desc_bh
) {
778 ext4_set_bit(bit
, inode_bitmap_bh
->b_data
);
780 BUFFER_TRACE(inode_bitmap_bh
, "call ext4_handle_dirty_metadata");
781 err
= ext4_handle_dirty_metadata(NULL
, NULL
, inode_bitmap_bh
);
783 ext4_std_error(sb
, err
);
786 err
= sync_dirty_buffer(inode_bitmap_bh
);
788 ext4_std_error(sb
, err
);
792 /* We may have to initialize the block bitmap if it isn't already */
793 if (ext4_has_group_desc_csum(sb
) &&
794 gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)) {
795 struct buffer_head
*block_bitmap_bh
;
797 block_bitmap_bh
= ext4_read_block_bitmap(sb
, group
);
798 if (IS_ERR(block_bitmap_bh
)) {
799 err
= PTR_ERR(block_bitmap_bh
);
803 BUFFER_TRACE(block_bitmap_bh
, "dirty block bitmap");
804 err
= ext4_handle_dirty_metadata(NULL
, NULL
, block_bitmap_bh
);
805 sync_dirty_buffer(block_bitmap_bh
);
807 /* recheck and clear flag under lock if we still need to */
808 ext4_lock_group(sb
, group
);
809 if (ext4_has_group_desc_csum(sb
) &&
810 (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
))) {
811 gdp
->bg_flags
&= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT
);
812 ext4_free_group_clusters_set(sb
, gdp
,
813 ext4_free_clusters_after_init(sb
, group
, gdp
));
814 ext4_block_bitmap_csum_set(sb
, group
, gdp
,
816 ext4_group_desc_csum_set(sb
, group
, gdp
);
818 ext4_unlock_group(sb
, group
);
819 brelse(block_bitmap_bh
);
822 ext4_std_error(sb
, err
);
827 /* Update the relevant bg descriptor fields */
828 if (ext4_has_group_desc_csum(sb
)) {
831 ext4_lock_group(sb
, group
); /* while we modify the bg desc */
832 free
= EXT4_INODES_PER_GROUP(sb
) -
833 ext4_itable_unused_count(sb
, gdp
);
834 if (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_UNINIT
)) {
835 gdp
->bg_flags
&= cpu_to_le16(~EXT4_BG_INODE_UNINIT
);
840 * Check the relative inode number against the last used
841 * relative inode number in this group. if it is greater
842 * we need to update the bg_itable_unused count
845 ext4_itable_unused_set(sb
, gdp
,
846 (EXT4_INODES_PER_GROUP(sb
) - bit
- 1));
848 ext4_lock_group(sb
, group
);
851 ext4_free_inodes_set(sb
, gdp
, ext4_free_inodes_count(sb
, gdp
) - 1);
852 if (ext4_has_group_desc_csum(sb
)) {
853 ext4_inode_bitmap_csum_set(sb
, group
, gdp
, inode_bitmap_bh
,
854 EXT4_INODES_PER_GROUP(sb
) / 8);
855 ext4_group_desc_csum_set(sb
, group
, gdp
);
858 ext4_unlock_group(sb
, group
);
859 err
= ext4_handle_dirty_metadata(NULL
, NULL
, group_desc_bh
);
860 sync_dirty_buffer(group_desc_bh
);
865 static int ext4_xattr_credits_for_new_inode(struct inode
*dir
, mode_t mode
,
868 struct super_block
*sb
= dir
->i_sb
;
870 #ifdef CONFIG_EXT4_FS_POSIX_ACL
871 struct posix_acl
*p
= get_acl(dir
, ACL_TYPE_DEFAULT
);
876 int acl_size
= p
->a_count
* sizeof(ext4_acl_entry
);
878 nblocks
+= (S_ISDIR(mode
) ? 2 : 1) *
879 __ext4_xattr_set_credits(sb
, NULL
/* inode */,
880 NULL
/* block_bh */, acl_size
,
881 true /* is_create */);
882 posix_acl_release(p
);
886 #ifdef CONFIG_SECURITY
888 int num_security_xattrs
= 1;
890 #ifdef CONFIG_INTEGRITY
891 num_security_xattrs
++;
894 * We assume that security xattrs are never more than 1k.
895 * In practice they are under 128 bytes.
897 nblocks
+= num_security_xattrs
*
898 __ext4_xattr_set_credits(sb
, NULL
/* inode */,
899 NULL
/* block_bh */, 1024,
900 true /* is_create */);
904 nblocks
+= __ext4_xattr_set_credits(sb
,
907 FSCRYPT_SET_CONTEXT_MAX_SIZE
,
908 true /* is_create */);
913 * There are two policies for allocating an inode. If the new inode is
914 * a directory, then a forward search is made for a block group with both
915 * free space and a low directory-to-inode ratio; if that fails, then of
916 * the groups with above-average free space, that group with the fewest
917 * directories already is chosen.
919 * For other inodes, search forward from the parent directory's block
920 * group to find a free inode.
922 struct inode
*__ext4_new_inode(handle_t
*handle
, struct inode
*dir
,
923 umode_t mode
, const struct qstr
*qstr
,
924 __u32 goal
, uid_t
*owner
, __u32 i_flags
,
925 int handle_type
, unsigned int line_no
,
928 struct super_block
*sb
;
929 struct buffer_head
*inode_bitmap_bh
= NULL
;
930 struct buffer_head
*group_desc_bh
;
931 ext4_group_t ngroups
, group
= 0;
932 unsigned long ino
= 0;
934 struct ext4_group_desc
*gdp
= NULL
;
935 struct ext4_inode_info
*ei
;
936 struct ext4_sb_info
*sbi
;
940 ext4_group_t flex_group
;
941 struct ext4_group_info
*grp
= NULL
;
942 bool encrypt
= false;
944 /* Cannot create files in a deleted directory */
945 if (!dir
|| !dir
->i_nlink
)
946 return ERR_PTR(-EPERM
);
951 if (unlikely(ext4_forced_shutdown(sbi
)))
952 return ERR_PTR(-EIO
);
954 ngroups
= ext4_get_groups_count(sb
);
955 trace_ext4_request_inode(dir
, mode
);
956 inode
= new_inode(sb
);
958 return ERR_PTR(-ENOMEM
);
962 * Initialize owners and quota early so that we don't have to account
963 * for quota initialization worst case in standard inode creating
967 inode
->i_mode
= mode
;
968 i_uid_write(inode
, owner
[0]);
969 i_gid_write(inode
, owner
[1]);
970 } else if (test_opt(sb
, GRPID
)) {
971 inode
->i_mode
= mode
;
972 inode
->i_uid
= current_fsuid();
973 inode
->i_gid
= dir
->i_gid
;
975 inode_init_owner(inode
, dir
, mode
);
977 if (ext4_has_feature_project(sb
) &&
978 ext4_test_inode_flag(dir
, EXT4_INODE_PROJINHERIT
))
979 ei
->i_projid
= EXT4_I(dir
)->i_projid
;
981 ei
->i_projid
= make_kprojid(&init_user_ns
, EXT4_DEF_PROJID
);
983 if (!(i_flags
& EXT4_EA_INODE_FL
)) {
984 err
= fscrypt_prepare_new_inode(dir
, inode
, &encrypt
);
989 err
= dquot_initialize(inode
);
993 if (!handle
&& sbi
->s_journal
&& !(i_flags
& EXT4_EA_INODE_FL
)) {
994 ret2
= ext4_xattr_credits_for_new_inode(dir
, mode
, encrypt
);
1003 goal
= sbi
->s_inode_goal
;
1005 if (goal
&& goal
<= le32_to_cpu(sbi
->s_es
->s_inodes_count
)) {
1006 group
= (goal
- 1) / EXT4_INODES_PER_GROUP(sb
);
1007 ino
= (goal
- 1) % EXT4_INODES_PER_GROUP(sb
);
1013 ret2
= find_group_orlov(sb
, dir
, &group
, mode
, qstr
);
1015 ret2
= find_group_other(sb
, dir
, &group
, mode
);
1018 EXT4_I(dir
)->i_last_alloc_group
= group
;
1024 * Normally we will only go through one pass of this loop,
1025 * unless we get unlucky and it turns out the group we selected
1026 * had its last inode grabbed by someone else.
1028 for (i
= 0; i
< ngroups
; i
++, ino
= 0) {
1031 gdp
= ext4_get_group_desc(sb
, group
, &group_desc_bh
);
1036 * Check free inodes count before loading bitmap.
1038 if (ext4_free_inodes_count(sb
, gdp
) == 0)
1041 if (!(sbi
->s_mount_state
& EXT4_FC_REPLAY
)) {
1042 grp
= ext4_get_group_info(sb
, group
);
1044 * Skip groups with already-known suspicious inode
1047 if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp
))
1051 brelse(inode_bitmap_bh
);
1052 inode_bitmap_bh
= ext4_read_inode_bitmap(sb
, group
);
1053 /* Skip groups with suspicious inode tables */
1054 if (((!(sbi
->s_mount_state
& EXT4_FC_REPLAY
))
1055 && EXT4_MB_GRP_IBITMAP_CORRUPT(grp
)) ||
1056 IS_ERR(inode_bitmap_bh
)) {
1057 inode_bitmap_bh
= NULL
;
1061 repeat_in_this_group
:
1062 ret2
= find_inode_bit(sb
, group
, inode_bitmap_bh
, &ino
);
1066 if (group
== 0 && (ino
+ 1) < EXT4_FIRST_INO(sb
)) {
1067 ext4_error(sb
, "reserved inode found cleared - "
1068 "inode=%lu", ino
+ 1);
1069 ext4_mark_group_bitmap_corrupted(sb
, group
,
1070 EXT4_GROUP_INFO_IBITMAP_CORRUPT
);
1074 if ((!(sbi
->s_mount_state
& EXT4_FC_REPLAY
)) && !handle
) {
1075 BUG_ON(nblocks
<= 0);
1076 handle
= __ext4_journal_start_sb(dir
->i_sb
, line_no
,
1077 handle_type
, nblocks
, 0,
1078 ext4_trans_default_revoke_credits(sb
));
1079 if (IS_ERR(handle
)) {
1080 err
= PTR_ERR(handle
);
1081 ext4_std_error(sb
, err
);
1085 BUFFER_TRACE(inode_bitmap_bh
, "get_write_access");
1086 err
= ext4_journal_get_write_access(handle
, inode_bitmap_bh
);
1088 ext4_std_error(sb
, err
);
1091 ext4_lock_group(sb
, group
);
1092 ret2
= ext4_test_and_set_bit(ino
, inode_bitmap_bh
->b_data
);
1094 /* Someone already took the bit. Repeat the search
1097 ret2
= find_inode_bit(sb
, group
, inode_bitmap_bh
, &ino
);
1099 ext4_set_bit(ino
, inode_bitmap_bh
->b_data
);
1102 ret2
= 1; /* we didn't grab the inode */
1105 ext4_unlock_group(sb
, group
);
1106 ino
++; /* the inode bitmap is zero-based */
1108 goto got
; /* we grabbed the inode! */
1110 if (ino
< EXT4_INODES_PER_GROUP(sb
))
1111 goto repeat_in_this_group
;
1113 if (++group
== ngroups
)
1120 BUFFER_TRACE(inode_bitmap_bh
, "call ext4_handle_dirty_metadata");
1121 err
= ext4_handle_dirty_metadata(handle
, NULL
, inode_bitmap_bh
);
1123 ext4_std_error(sb
, err
);
1127 BUFFER_TRACE(group_desc_bh
, "get_write_access");
1128 err
= ext4_journal_get_write_access(handle
, group_desc_bh
);
1130 ext4_std_error(sb
, err
);
1134 /* We may have to initialize the block bitmap if it isn't already */
1135 if (ext4_has_group_desc_csum(sb
) &&
1136 gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)) {
1137 struct buffer_head
*block_bitmap_bh
;
1139 block_bitmap_bh
= ext4_read_block_bitmap(sb
, group
);
1140 if (IS_ERR(block_bitmap_bh
)) {
1141 err
= PTR_ERR(block_bitmap_bh
);
1144 BUFFER_TRACE(block_bitmap_bh
, "get block bitmap access");
1145 err
= ext4_journal_get_write_access(handle
, block_bitmap_bh
);
1147 brelse(block_bitmap_bh
);
1148 ext4_std_error(sb
, err
);
1152 BUFFER_TRACE(block_bitmap_bh
, "dirty block bitmap");
1153 err
= ext4_handle_dirty_metadata(handle
, NULL
, block_bitmap_bh
);
1155 /* recheck and clear flag under lock if we still need to */
1156 ext4_lock_group(sb
, group
);
1157 if (ext4_has_group_desc_csum(sb
) &&
1158 (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
))) {
1159 gdp
->bg_flags
&= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT
);
1160 ext4_free_group_clusters_set(sb
, gdp
,
1161 ext4_free_clusters_after_init(sb
, group
, gdp
));
1162 ext4_block_bitmap_csum_set(sb
, group
, gdp
,
1164 ext4_group_desc_csum_set(sb
, group
, gdp
);
1166 ext4_unlock_group(sb
, group
);
1167 brelse(block_bitmap_bh
);
1170 ext4_std_error(sb
, err
);
1175 /* Update the relevant bg descriptor fields */
1176 if (ext4_has_group_desc_csum(sb
)) {
1178 struct ext4_group_info
*grp
= NULL
;
1180 if (!(sbi
->s_mount_state
& EXT4_FC_REPLAY
)) {
1181 grp
= ext4_get_group_info(sb
, group
);
1182 down_read(&grp
->alloc_sem
); /*
1187 ext4_lock_group(sb
, group
); /* while we modify the bg desc */
1188 free
= EXT4_INODES_PER_GROUP(sb
) -
1189 ext4_itable_unused_count(sb
, gdp
);
1190 if (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_UNINIT
)) {
1191 gdp
->bg_flags
&= cpu_to_le16(~EXT4_BG_INODE_UNINIT
);
1195 * Check the relative inode number against the last used
1196 * relative inode number in this group. if it is greater
1197 * we need to update the bg_itable_unused count
1200 ext4_itable_unused_set(sb
, gdp
,
1201 (EXT4_INODES_PER_GROUP(sb
) - ino
));
1202 if (!(sbi
->s_mount_state
& EXT4_FC_REPLAY
))
1203 up_read(&grp
->alloc_sem
);
1205 ext4_lock_group(sb
, group
);
1208 ext4_free_inodes_set(sb
, gdp
, ext4_free_inodes_count(sb
, gdp
) - 1);
1209 if (S_ISDIR(mode
)) {
1210 ext4_used_dirs_set(sb
, gdp
, ext4_used_dirs_count(sb
, gdp
) + 1);
1211 if (sbi
->s_log_groups_per_flex
) {
1212 ext4_group_t f
= ext4_flex_group(sbi
, group
);
1214 atomic_inc(&sbi_array_rcu_deref(sbi
, s_flex_groups
,
1218 if (ext4_has_group_desc_csum(sb
)) {
1219 ext4_inode_bitmap_csum_set(sb
, group
, gdp
, inode_bitmap_bh
,
1220 EXT4_INODES_PER_GROUP(sb
) / 8);
1221 ext4_group_desc_csum_set(sb
, group
, gdp
);
1223 ext4_unlock_group(sb
, group
);
1225 BUFFER_TRACE(group_desc_bh
, "call ext4_handle_dirty_metadata");
1226 err
= ext4_handle_dirty_metadata(handle
, NULL
, group_desc_bh
);
1228 ext4_std_error(sb
, err
);
1232 percpu_counter_dec(&sbi
->s_freeinodes_counter
);
1234 percpu_counter_inc(&sbi
->s_dirs_counter
);
1236 if (sbi
->s_log_groups_per_flex
) {
1237 flex_group
= ext4_flex_group(sbi
, group
);
1238 atomic_dec(&sbi_array_rcu_deref(sbi
, s_flex_groups
,
1239 flex_group
)->free_inodes
);
1242 inode
->i_ino
= ino
+ group
* EXT4_INODES_PER_GROUP(sb
);
1243 /* This is the optimal IO size (for stat), not the fs block size */
1244 inode
->i_blocks
= 0;
1245 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
1246 ei
->i_crtime
= inode
->i_mtime
;
1248 memset(ei
->i_data
, 0, sizeof(ei
->i_data
));
1249 ei
->i_dir_start_lookup
= 0;
1252 /* Don't inherit extent flag from directory, amongst others. */
1254 ext4_mask_flags(mode
, EXT4_I(dir
)->i_flags
& EXT4_FL_INHERITED
);
1255 ei
->i_flags
|= i_flags
;
1258 ei
->i_block_group
= group
;
1259 ei
->i_last_alloc_group
= ~0;
1261 ext4_set_inode_flags(inode
, true);
1262 if (IS_DIRSYNC(inode
))
1263 ext4_handle_sync(handle
);
1264 if (insert_inode_locked(inode
) < 0) {
1266 * Likely a bitmap corruption causing inode to be allocated
1270 ext4_error(sb
, "failed to insert inode %lu: doubly allocated?",
1272 ext4_mark_group_bitmap_corrupted(sb
, group
,
1273 EXT4_GROUP_INFO_IBITMAP_CORRUPT
);
1276 inode
->i_generation
= prandom_u32();
1278 /* Precompute checksum seed for inode metadata */
1279 if (ext4_has_metadata_csum(sb
)) {
1281 __le32 inum
= cpu_to_le32(inode
->i_ino
);
1282 __le32 gen
= cpu_to_le32(inode
->i_generation
);
1283 csum
= ext4_chksum(sbi
, sbi
->s_csum_seed
, (__u8
*)&inum
,
1285 ei
->i_csum_seed
= ext4_chksum(sbi
, csum
, (__u8
*)&gen
,
1289 ext4_clear_state_flags(ei
); /* Only relevant on 32-bit archs */
1290 ext4_set_inode_state(inode
, EXT4_STATE_NEW
);
1292 ei
->i_extra_isize
= sbi
->s_want_extra_isize
;
1293 ei
->i_inline_off
= 0;
1294 if (ext4_has_feature_inline_data(sb
))
1295 ext4_set_inode_state(inode
, EXT4_STATE_MAY_INLINE_DATA
);
1297 err
= dquot_alloc_inode(inode
);
1302 * Since the encryption xattr will always be unique, create it first so
1303 * that it's less likely to end up in an external xattr block and
1304 * prevent its deduplication.
1307 err
= fscrypt_set_context(inode
, handle
);
1309 goto fail_free_drop
;
1312 if (!(ei
->i_flags
& EXT4_EA_INODE_FL
)) {
1313 err
= ext4_init_acl(handle
, inode
, dir
);
1315 goto fail_free_drop
;
1317 err
= ext4_init_security(handle
, inode
, dir
, qstr
);
1319 goto fail_free_drop
;
1322 if (ext4_has_feature_extents(sb
)) {
1323 /* set extent flag only for directory, file and normal symlink*/
1324 if (S_ISDIR(mode
) || S_ISREG(mode
) || S_ISLNK(mode
)) {
1325 ext4_set_inode_flag(inode
, EXT4_INODE_EXTENTS
);
1326 ext4_ext_tree_init(handle
, inode
);
1330 if (ext4_handle_valid(handle
)) {
1331 ei
->i_sync_tid
= handle
->h_transaction
->t_tid
;
1332 ei
->i_datasync_tid
= handle
->h_transaction
->t_tid
;
1335 err
= ext4_mark_inode_dirty(handle
, inode
);
1337 ext4_std_error(sb
, err
);
1338 goto fail_free_drop
;
1341 ext4_debug("allocating inode %lu\n", inode
->i_ino
);
1342 trace_ext4_allocate_inode(inode
, dir
, mode
);
1343 brelse(inode_bitmap_bh
);
1347 dquot_free_inode(inode
);
1350 unlock_new_inode(inode
);
1353 inode
->i_flags
|= S_NOQUOTA
;
1355 brelse(inode_bitmap_bh
);
1356 return ERR_PTR(err
);
1359 /* Verify that we are loading a valid orphan from disk */
1360 struct inode
*ext4_orphan_get(struct super_block
*sb
, unsigned long ino
)
1362 unsigned long max_ino
= le32_to_cpu(EXT4_SB(sb
)->s_es
->s_inodes_count
);
1363 ext4_group_t block_group
;
1365 struct buffer_head
*bitmap_bh
= NULL
;
1366 struct inode
*inode
= NULL
;
1367 int err
= -EFSCORRUPTED
;
1369 if (ino
< EXT4_FIRST_INO(sb
) || ino
> max_ino
)
1372 block_group
= (ino
- 1) / EXT4_INODES_PER_GROUP(sb
);
1373 bit
= (ino
- 1) % EXT4_INODES_PER_GROUP(sb
);
1374 bitmap_bh
= ext4_read_inode_bitmap(sb
, block_group
);
1375 if (IS_ERR(bitmap_bh
))
1376 return ERR_CAST(bitmap_bh
);
1378 /* Having the inode bit set should be a 100% indicator that this
1379 * is a valid orphan (no e2fsck run on fs). Orphans also include
1380 * inodes that were being truncated, so we can't check i_nlink==0.
1382 if (!ext4_test_bit(bit
, bitmap_bh
->b_data
))
1385 inode
= ext4_iget(sb
, ino
, EXT4_IGET_NORMAL
);
1386 if (IS_ERR(inode
)) {
1387 err
= PTR_ERR(inode
);
1388 ext4_error_err(sb
, -err
,
1389 "couldn't read orphan inode %lu (err %d)",
1396 * If the orphans has i_nlinks > 0 then it should be able to
1397 * be truncated, otherwise it won't be removed from the orphan
1398 * list during processing and an infinite loop will result.
1399 * Similarly, it must not be a bad inode.
1401 if ((inode
->i_nlink
&& !ext4_can_truncate(inode
)) ||
1402 is_bad_inode(inode
))
1405 if (NEXT_ORPHAN(inode
) > max_ino
)
1411 ext4_error(sb
, "bad orphan inode %lu", ino
);
1413 printk(KERN_ERR
"ext4_test_bit(bit=%d, block=%llu) = %d\n",
1414 bit
, (unsigned long long)bitmap_bh
->b_blocknr
,
1415 ext4_test_bit(bit
, bitmap_bh
->b_data
));
1417 printk(KERN_ERR
"is_bad_inode(inode)=%d\n",
1418 is_bad_inode(inode
));
1419 printk(KERN_ERR
"NEXT_ORPHAN(inode)=%u\n",
1420 NEXT_ORPHAN(inode
));
1421 printk(KERN_ERR
"max_ino=%lu\n", max_ino
);
1422 printk(KERN_ERR
"i_nlink=%u\n", inode
->i_nlink
);
1423 /* Avoid freeing blocks if we got a bad deleted inode */
1424 if (inode
->i_nlink
== 0)
1425 inode
->i_blocks
= 0;
1429 return ERR_PTR(err
);
1432 unsigned long ext4_count_free_inodes(struct super_block
*sb
)
1434 unsigned long desc_count
;
1435 struct ext4_group_desc
*gdp
;
1436 ext4_group_t i
, ngroups
= ext4_get_groups_count(sb
);
1438 struct ext4_super_block
*es
;
1439 unsigned long bitmap_count
, x
;
1440 struct buffer_head
*bitmap_bh
= NULL
;
1442 es
= EXT4_SB(sb
)->s_es
;
1446 for (i
= 0; i
< ngroups
; i
++) {
1447 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
1450 desc_count
+= ext4_free_inodes_count(sb
, gdp
);
1452 bitmap_bh
= ext4_read_inode_bitmap(sb
, i
);
1453 if (IS_ERR(bitmap_bh
)) {
1458 x
= ext4_count_free(bitmap_bh
->b_data
,
1459 EXT4_INODES_PER_GROUP(sb
) / 8);
1460 printk(KERN_DEBUG
"group %lu: stored = %d, counted = %lu\n",
1461 (unsigned long) i
, ext4_free_inodes_count(sb
, gdp
), x
);
1465 printk(KERN_DEBUG
"ext4_count_free_inodes: "
1466 "stored = %u, computed = %lu, %lu\n",
1467 le32_to_cpu(es
->s_free_inodes_count
), desc_count
, bitmap_count
);
1471 for (i
= 0; i
< ngroups
; i
++) {
1472 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
1475 desc_count
+= ext4_free_inodes_count(sb
, gdp
);
1482 /* Called at mount-time, super-block is locked */
1483 unsigned long ext4_count_dirs(struct super_block
* sb
)
1485 unsigned long count
= 0;
1486 ext4_group_t i
, ngroups
= ext4_get_groups_count(sb
);
1488 for (i
= 0; i
< ngroups
; i
++) {
1489 struct ext4_group_desc
*gdp
= ext4_get_group_desc(sb
, i
, NULL
);
1492 count
+= ext4_used_dirs_count(sb
, gdp
);
1498 * Zeroes not yet zeroed inode table - just write zeroes through the whole
1499 * inode table. Must be called without any spinlock held. The only place
1500 * where it is called from on active part of filesystem is ext4lazyinit
1501 * thread, so we do not need any special locks, however we have to prevent
1502 * inode allocation from the current group, so we take alloc_sem lock, to
1503 * block ext4_new_inode() until we are finished.
1505 int ext4_init_inode_table(struct super_block
*sb
, ext4_group_t group
,
1508 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, group
);
1509 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1510 struct ext4_group_desc
*gdp
= NULL
;
1511 struct buffer_head
*group_desc_bh
;
1514 int num
, ret
= 0, used_blks
= 0;
1516 /* This should not happen, but just to be sure check this */
1517 if (sb_rdonly(sb
)) {
1522 gdp
= ext4_get_group_desc(sb
, group
, &group_desc_bh
);
1527 * We do not need to lock this, because we are the only one
1528 * handling this flag.
1530 if (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_ZEROED
))
1533 handle
= ext4_journal_start_sb(sb
, EXT4_HT_MISC
, 1);
1534 if (IS_ERR(handle
)) {
1535 ret
= PTR_ERR(handle
);
1539 down_write(&grp
->alloc_sem
);
1541 * If inode bitmap was already initialized there may be some
1542 * used inodes so we need to skip blocks with used inodes in
1545 if (!(gdp
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_UNINIT
)))
1546 used_blks
= DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb
) -
1547 ext4_itable_unused_count(sb
, gdp
)),
1548 sbi
->s_inodes_per_block
);
1550 if ((used_blks
< 0) || (used_blks
> sbi
->s_itb_per_group
) ||
1551 ((group
== 0) && ((EXT4_INODES_PER_GROUP(sb
) -
1552 ext4_itable_unused_count(sb
, gdp
)) <
1553 EXT4_FIRST_INO(sb
)))) {
1554 ext4_error(sb
, "Something is wrong with group %u: "
1555 "used itable blocks: %d; "
1556 "itable unused count: %u",
1558 ext4_itable_unused_count(sb
, gdp
));
1563 blk
= ext4_inode_table(sb
, gdp
) + used_blks
;
1564 num
= sbi
->s_itb_per_group
- used_blks
;
1566 BUFFER_TRACE(group_desc_bh
, "get_write_access");
1567 ret
= ext4_journal_get_write_access(handle
,
1573 * Skip zeroout if the inode table is full. But we set the ZEROED
1574 * flag anyway, because obviously, when it is full it does not need
1577 if (unlikely(num
== 0))
1580 ext4_debug("going to zero out inode table in group %d\n",
1582 ret
= sb_issue_zeroout(sb
, blk
, num
, GFP_NOFS
);
1586 blkdev_issue_flush(sb
->s_bdev
, GFP_NOFS
);
1589 ext4_lock_group(sb
, group
);
1590 gdp
->bg_flags
|= cpu_to_le16(EXT4_BG_INODE_ZEROED
);
1591 ext4_group_desc_csum_set(sb
, group
, gdp
);
1592 ext4_unlock_group(sb
, group
);
1594 BUFFER_TRACE(group_desc_bh
,
1595 "call ext4_handle_dirty_metadata");
1596 ret
= ext4_handle_dirty_metadata(handle
, NULL
,
1600 up_write(&grp
->alloc_sem
);
1601 ext4_journal_stop(handle
);