1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ext4/super.c
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
12 * linux/fs/minix/inode.c
14 * Copyright (C) 1991, 1992 Linus Torvalds
16 * Big-endian to little-endian byte-swapping/bitmaps by
17 * David S. Miller (davem@caip.rutgers.edu), 1995
20 #include <linux/module.h>
21 #include <linux/string.h>
23 #include <linux/time.h>
24 #include <linux/vmalloc.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/parser.h>
30 #include <linux/buffer_head.h>
31 #include <linux/exportfs.h>
32 #include <linux/vfs.h>
33 #include <linux/random.h>
34 #include <linux/mount.h>
35 #include <linux/namei.h>
36 #include <linux/quotaops.h>
37 #include <linux/seq_file.h>
38 #include <linux/ctype.h>
39 #include <linux/log2.h>
40 #include <linux/crc16.h>
41 #include <linux/dax.h>
42 #include <linux/uaccess.h>
43 #include <linux/iversion.h>
44 #include <linux/unicode.h>
45 #include <linux/part_stat.h>
46 #include <linux/kthread.h>
47 #include <linux/freezer.h>
48 #include <linux/fsnotify.h>
49 #include <linux/fs_context.h>
50 #include <linux/fs_parser.h>
53 #include "ext4_extents.h" /* Needed for trace points definition */
54 #include "ext4_jbd2.h"
60 #define CREATE_TRACE_POINTS
61 #include <trace/events/ext4.h>
63 static struct ext4_lazy_init
*ext4_li_info
;
64 static DEFINE_MUTEX(ext4_li_mtx
);
65 static struct ratelimit_state ext4_mount_msg_ratelimit
;
67 static int ext4_load_journal(struct super_block
*, struct ext4_super_block
*,
68 unsigned long journal_devnum
);
69 static int ext4_show_options(struct seq_file
*seq
, struct dentry
*root
);
70 static void ext4_update_super(struct super_block
*sb
);
71 static int ext4_commit_super(struct super_block
*sb
);
72 static int ext4_mark_recovery_complete(struct super_block
*sb
,
73 struct ext4_super_block
*es
);
74 static int ext4_clear_journal_err(struct super_block
*sb
,
75 struct ext4_super_block
*es
);
76 static int ext4_sync_fs(struct super_block
*sb
, int wait
);
77 static int ext4_statfs(struct dentry
*dentry
, struct kstatfs
*buf
);
78 static int ext4_unfreeze(struct super_block
*sb
);
79 static int ext4_freeze(struct super_block
*sb
);
80 static inline int ext2_feature_set_ok(struct super_block
*sb
);
81 static inline int ext3_feature_set_ok(struct super_block
*sb
);
82 static void ext4_destroy_lazyinit_thread(void);
83 static void ext4_unregister_li_request(struct super_block
*sb
);
84 static void ext4_clear_request_list(void);
85 static struct inode
*ext4_get_journal_inode(struct super_block
*sb
,
86 unsigned int journal_inum
);
87 static int ext4_validate_options(struct fs_context
*fc
);
88 static int ext4_check_opt_consistency(struct fs_context
*fc
,
89 struct super_block
*sb
);
90 static void ext4_apply_options(struct fs_context
*fc
, struct super_block
*sb
);
91 static int ext4_parse_param(struct fs_context
*fc
, struct fs_parameter
*param
);
92 static int ext4_get_tree(struct fs_context
*fc
);
93 static int ext4_reconfigure(struct fs_context
*fc
);
94 static void ext4_fc_free(struct fs_context
*fc
);
95 static int ext4_init_fs_context(struct fs_context
*fc
);
96 static void ext4_kill_sb(struct super_block
*sb
);
97 static const struct fs_parameter_spec ext4_param_specs
[];
103 * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start
104 * -> page lock -> i_data_sem (rw)
106 * buffered write path:
107 * sb_start_write -> i_mutex -> mmap_lock
108 * sb_start_write -> i_mutex -> transaction start -> page lock ->
112 * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) ->
114 * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start ->
118 * sb_start_write -> i_mutex -> mmap_lock
119 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
122 * transaction start -> page lock(s) -> i_data_sem (rw)
125 static const struct fs_context_operations ext4_context_ops
= {
126 .parse_param
= ext4_parse_param
,
127 .get_tree
= ext4_get_tree
,
128 .reconfigure
= ext4_reconfigure
,
129 .free
= ext4_fc_free
,
133 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
134 static struct file_system_type ext2_fs_type
= {
135 .owner
= THIS_MODULE
,
137 .init_fs_context
= ext4_init_fs_context
,
138 .parameters
= ext4_param_specs
,
139 .kill_sb
= ext4_kill_sb
,
140 .fs_flags
= FS_REQUIRES_DEV
,
142 MODULE_ALIAS_FS("ext2");
143 MODULE_ALIAS("ext2");
144 #define IS_EXT2_SB(sb) ((sb)->s_type == &ext2_fs_type)
146 #define IS_EXT2_SB(sb) (0)
150 static struct file_system_type ext3_fs_type
= {
151 .owner
= THIS_MODULE
,
153 .init_fs_context
= ext4_init_fs_context
,
154 .parameters
= ext4_param_specs
,
155 .kill_sb
= ext4_kill_sb
,
156 .fs_flags
= FS_REQUIRES_DEV
,
158 MODULE_ALIAS_FS("ext3");
159 MODULE_ALIAS("ext3");
160 #define IS_EXT3_SB(sb) ((sb)->s_type == &ext3_fs_type)
163 static inline void __ext4_read_bh(struct buffer_head
*bh
, blk_opf_t op_flags
,
164 bh_end_io_t
*end_io
, bool simu_fail
)
167 clear_buffer_uptodate(bh
);
173 * buffer's verified bit is no longer valid after reading from
174 * disk again due to write out error, clear it to make sure we
175 * recheck the buffer contents.
177 clear_buffer_verified(bh
);
179 bh
->b_end_io
= end_io
? end_io
: end_buffer_read_sync
;
181 submit_bh(REQ_OP_READ
| op_flags
, bh
);
184 void ext4_read_bh_nowait(struct buffer_head
*bh
, blk_opf_t op_flags
,
185 bh_end_io_t
*end_io
, bool simu_fail
)
187 BUG_ON(!buffer_locked(bh
));
189 if (ext4_buffer_uptodate(bh
)) {
193 __ext4_read_bh(bh
, op_flags
, end_io
, simu_fail
);
196 int ext4_read_bh(struct buffer_head
*bh
, blk_opf_t op_flags
,
197 bh_end_io_t
*end_io
, bool simu_fail
)
199 BUG_ON(!buffer_locked(bh
));
201 if (ext4_buffer_uptodate(bh
)) {
206 __ext4_read_bh(bh
, op_flags
, end_io
, simu_fail
);
209 if (buffer_uptodate(bh
))
214 int ext4_read_bh_lock(struct buffer_head
*bh
, blk_opf_t op_flags
, bool wait
)
218 ext4_read_bh_nowait(bh
, op_flags
, NULL
, false);
221 return ext4_read_bh(bh
, op_flags
, NULL
, false);
225 * This works like __bread_gfp() except it uses ERR_PTR for error
226 * returns. Currently with sb_bread it's impossible to distinguish
227 * between ENOMEM and EIO situations (since both result in a NULL
230 static struct buffer_head
*__ext4_sb_bread_gfp(struct super_block
*sb
,
232 blk_opf_t op_flags
, gfp_t gfp
)
234 struct buffer_head
*bh
;
237 bh
= sb_getblk_gfp(sb
, block
, gfp
);
239 return ERR_PTR(-ENOMEM
);
240 if (ext4_buffer_uptodate(bh
))
243 ret
= ext4_read_bh_lock(bh
, REQ_META
| op_flags
, true);
251 struct buffer_head
*ext4_sb_bread(struct super_block
*sb
, sector_t block
,
254 gfp_t gfp
= mapping_gfp_constraint(sb
->s_bdev
->bd_mapping
,
255 ~__GFP_FS
) | __GFP_MOVABLE
;
257 return __ext4_sb_bread_gfp(sb
, block
, op_flags
, gfp
);
260 struct buffer_head
*ext4_sb_bread_unmovable(struct super_block
*sb
,
263 gfp_t gfp
= mapping_gfp_constraint(sb
->s_bdev
->bd_mapping
,
266 return __ext4_sb_bread_gfp(sb
, block
, 0, gfp
);
269 void ext4_sb_breadahead_unmovable(struct super_block
*sb
, sector_t block
)
271 struct buffer_head
*bh
= bdev_getblk(sb
->s_bdev
, block
,
272 sb
->s_blocksize
, GFP_NOWAIT
| __GFP_NOWARN
);
275 if (trylock_buffer(bh
))
276 ext4_read_bh_nowait(bh
, REQ_RAHEAD
, NULL
, false);
281 static int ext4_verify_csum_type(struct super_block
*sb
,
282 struct ext4_super_block
*es
)
284 if (!ext4_has_feature_metadata_csum(sb
))
287 return es
->s_checksum_type
== EXT4_CRC32C_CHKSUM
;
290 __le32
ext4_superblock_csum(struct super_block
*sb
,
291 struct ext4_super_block
*es
)
293 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
294 int offset
= offsetof(struct ext4_super_block
, s_checksum
);
297 csum
= ext4_chksum(sbi
, ~0, (char *)es
, offset
);
299 return cpu_to_le32(csum
);
302 static int ext4_superblock_csum_verify(struct super_block
*sb
,
303 struct ext4_super_block
*es
)
305 if (!ext4_has_metadata_csum(sb
))
308 return es
->s_checksum
== ext4_superblock_csum(sb
, es
);
311 void ext4_superblock_csum_set(struct super_block
*sb
)
313 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
315 if (!ext4_has_metadata_csum(sb
))
318 es
->s_checksum
= ext4_superblock_csum(sb
, es
);
321 ext4_fsblk_t
ext4_block_bitmap(struct super_block
*sb
,
322 struct ext4_group_desc
*bg
)
324 return le32_to_cpu(bg
->bg_block_bitmap_lo
) |
325 (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
?
326 (ext4_fsblk_t
)le32_to_cpu(bg
->bg_block_bitmap_hi
) << 32 : 0);
329 ext4_fsblk_t
ext4_inode_bitmap(struct super_block
*sb
,
330 struct ext4_group_desc
*bg
)
332 return le32_to_cpu(bg
->bg_inode_bitmap_lo
) |
333 (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
?
334 (ext4_fsblk_t
)le32_to_cpu(bg
->bg_inode_bitmap_hi
) << 32 : 0);
337 ext4_fsblk_t
ext4_inode_table(struct super_block
*sb
,
338 struct ext4_group_desc
*bg
)
340 return le32_to_cpu(bg
->bg_inode_table_lo
) |
341 (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
?
342 (ext4_fsblk_t
)le32_to_cpu(bg
->bg_inode_table_hi
) << 32 : 0);
345 __u32
ext4_free_group_clusters(struct super_block
*sb
,
346 struct ext4_group_desc
*bg
)
348 return le16_to_cpu(bg
->bg_free_blocks_count_lo
) |
349 (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
?
350 (__u32
)le16_to_cpu(bg
->bg_free_blocks_count_hi
) << 16 : 0);
353 __u32
ext4_free_inodes_count(struct super_block
*sb
,
354 struct ext4_group_desc
*bg
)
356 return le16_to_cpu(READ_ONCE(bg
->bg_free_inodes_count_lo
)) |
357 (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
?
358 (__u32
)le16_to_cpu(READ_ONCE(bg
->bg_free_inodes_count_hi
)) << 16 : 0);
361 __u32
ext4_used_dirs_count(struct super_block
*sb
,
362 struct ext4_group_desc
*bg
)
364 return le16_to_cpu(bg
->bg_used_dirs_count_lo
) |
365 (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
?
366 (__u32
)le16_to_cpu(bg
->bg_used_dirs_count_hi
) << 16 : 0);
369 __u32
ext4_itable_unused_count(struct super_block
*sb
,
370 struct ext4_group_desc
*bg
)
372 return le16_to_cpu(bg
->bg_itable_unused_lo
) |
373 (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
?
374 (__u32
)le16_to_cpu(bg
->bg_itable_unused_hi
) << 16 : 0);
377 void ext4_block_bitmap_set(struct super_block
*sb
,
378 struct ext4_group_desc
*bg
, ext4_fsblk_t blk
)
380 bg
->bg_block_bitmap_lo
= cpu_to_le32((u32
)blk
);
381 if (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
)
382 bg
->bg_block_bitmap_hi
= cpu_to_le32(blk
>> 32);
385 void ext4_inode_bitmap_set(struct super_block
*sb
,
386 struct ext4_group_desc
*bg
, ext4_fsblk_t blk
)
388 bg
->bg_inode_bitmap_lo
= cpu_to_le32((u32
)blk
);
389 if (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
)
390 bg
->bg_inode_bitmap_hi
= cpu_to_le32(blk
>> 32);
393 void ext4_inode_table_set(struct super_block
*sb
,
394 struct ext4_group_desc
*bg
, ext4_fsblk_t blk
)
396 bg
->bg_inode_table_lo
= cpu_to_le32((u32
)blk
);
397 if (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
)
398 bg
->bg_inode_table_hi
= cpu_to_le32(blk
>> 32);
401 void ext4_free_group_clusters_set(struct super_block
*sb
,
402 struct ext4_group_desc
*bg
, __u32 count
)
404 bg
->bg_free_blocks_count_lo
= cpu_to_le16((__u16
)count
);
405 if (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
)
406 bg
->bg_free_blocks_count_hi
= cpu_to_le16(count
>> 16);
409 void ext4_free_inodes_set(struct super_block
*sb
,
410 struct ext4_group_desc
*bg
, __u32 count
)
412 WRITE_ONCE(bg
->bg_free_inodes_count_lo
, cpu_to_le16((__u16
)count
));
413 if (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
)
414 WRITE_ONCE(bg
->bg_free_inodes_count_hi
, cpu_to_le16(count
>> 16));
417 void ext4_used_dirs_set(struct super_block
*sb
,
418 struct ext4_group_desc
*bg
, __u32 count
)
420 bg
->bg_used_dirs_count_lo
= cpu_to_le16((__u16
)count
);
421 if (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
)
422 bg
->bg_used_dirs_count_hi
= cpu_to_le16(count
>> 16);
425 void ext4_itable_unused_set(struct super_block
*sb
,
426 struct ext4_group_desc
*bg
, __u32 count
)
428 bg
->bg_itable_unused_lo
= cpu_to_le16((__u16
)count
);
429 if (EXT4_DESC_SIZE(sb
) >= EXT4_MIN_DESC_SIZE_64BIT
)
430 bg
->bg_itable_unused_hi
= cpu_to_le16(count
>> 16);
433 static void __ext4_update_tstamp(__le32
*lo
, __u8
*hi
, time64_t now
)
435 now
= clamp_val(now
, 0, (1ull << 40) - 1);
437 *lo
= cpu_to_le32(lower_32_bits(now
));
438 *hi
= upper_32_bits(now
);
441 static time64_t
__ext4_get_tstamp(__le32
*lo
, __u8
*hi
)
443 return ((time64_t
)(*hi
) << 32) + le32_to_cpu(*lo
);
445 #define ext4_update_tstamp(es, tstamp) \
446 __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \
447 ktime_get_real_seconds())
448 #define ext4_get_tstamp(es, tstamp) \
449 __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
451 #define EXT4_SB_REFRESH_INTERVAL_SEC (3600) /* seconds (1 hour) */
452 #define EXT4_SB_REFRESH_INTERVAL_KB (16384) /* kilobytes (16MB) */
455 * The ext4_maybe_update_superblock() function checks and updates the
456 * superblock if needed.
458 * This function is designed to update the on-disk superblock only under
459 * certain conditions to prevent excessive disk writes and unnecessary
460 * waking of the disk from sleep. The superblock will be updated if:
461 * 1. More than an hour has passed since the last superblock update, and
462 * 2. More than 16MB have been written since the last superblock update.
464 * @sb: The superblock
466 static void ext4_maybe_update_superblock(struct super_block
*sb
)
468 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
469 struct ext4_super_block
*es
= sbi
->s_es
;
470 journal_t
*journal
= sbi
->s_journal
;
473 __u64 lifetime_write_kbytes
;
476 if (sb_rdonly(sb
) || !(sb
->s_flags
& SB_ACTIVE
) ||
477 !journal
|| (journal
->j_flags
& JBD2_UNMOUNT
))
480 now
= ktime_get_real_seconds();
481 last_update
= ext4_get_tstamp(es
, s_wtime
);
483 if (likely(now
- last_update
< EXT4_SB_REFRESH_INTERVAL_SEC
))
486 lifetime_write_kbytes
= sbi
->s_kbytes_written
+
487 ((part_stat_read(sb
->s_bdev
, sectors
[STAT_WRITE
]) -
488 sbi
->s_sectors_written_start
) >> 1);
490 /* Get the number of kilobytes not written to disk to account
491 * for statistics and compare with a multiple of 16 MB. This
492 * is used to determine when the next superblock commit should
493 * occur (i.e. not more often than once per 16MB if there was
494 * less written in an hour).
496 diff_size
= lifetime_write_kbytes
- le64_to_cpu(es
->s_kbytes_written
);
498 if (diff_size
> EXT4_SB_REFRESH_INTERVAL_KB
)
499 schedule_work(&EXT4_SB(sb
)->s_sb_upd_work
);
502 static void ext4_journal_commit_callback(journal_t
*journal
, transaction_t
*txn
)
504 struct super_block
*sb
= journal
->j_private
;
505 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
506 int error
= is_journal_aborted(journal
);
507 struct ext4_journal_cb_entry
*jce
;
509 BUG_ON(txn
->t_state
== T_FINISHED
);
511 ext4_process_freed_data(sb
, txn
->t_tid
);
512 ext4_maybe_update_superblock(sb
);
514 spin_lock(&sbi
->s_md_lock
);
515 while (!list_empty(&txn
->t_private_list
)) {
516 jce
= list_entry(txn
->t_private_list
.next
,
517 struct ext4_journal_cb_entry
, jce_list
);
518 list_del_init(&jce
->jce_list
);
519 spin_unlock(&sbi
->s_md_lock
);
520 jce
->jce_func(sb
, jce
, error
);
521 spin_lock(&sbi
->s_md_lock
);
523 spin_unlock(&sbi
->s_md_lock
);
527 * This writepage callback for write_cache_pages()
528 * takes care of a few cases after page cleaning.
530 * write_cache_pages() already checks for dirty pages
531 * and calls clear_page_dirty_for_io(), which we want,
532 * to write protect the pages.
534 * However, we may have to redirty a page (see below.)
536 static int ext4_journalled_writepage_callback(struct folio
*folio
,
537 struct writeback_control
*wbc
,
540 transaction_t
*transaction
= (transaction_t
*) data
;
541 struct buffer_head
*bh
, *head
;
542 struct journal_head
*jh
;
544 bh
= head
= folio_buffers(folio
);
547 * We have to redirty a page in these cases:
548 * 1) If buffer is dirty, it means the page was dirty because it
549 * contains a buffer that needs checkpointing. So the dirty bit
550 * needs to be preserved so that checkpointing writes the buffer
552 * 2) If buffer is not part of the committing transaction
553 * (we may have just accidentally come across this buffer because
554 * inode range tracking is not exact) or if the currently running
555 * transaction already contains this buffer as well, dirty bit
556 * needs to be preserved so that the buffer gets writeprotected
557 * properly on running transaction's commit.
560 if (buffer_dirty(bh
) ||
561 (jh
&& (jh
->b_transaction
!= transaction
||
562 jh
->b_next_transaction
))) {
563 folio_redirty_for_writepage(wbc
, folio
);
566 } while ((bh
= bh
->b_this_page
) != head
);
569 return AOP_WRITEPAGE_ACTIVATE
;
572 static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode
*jinode
)
574 struct address_space
*mapping
= jinode
->i_vfs_inode
->i_mapping
;
575 struct writeback_control wbc
= {
576 .sync_mode
= WB_SYNC_ALL
,
577 .nr_to_write
= LONG_MAX
,
578 .range_start
= jinode
->i_dirty_start
,
579 .range_end
= jinode
->i_dirty_end
,
582 return write_cache_pages(mapping
, &wbc
,
583 ext4_journalled_writepage_callback
,
584 jinode
->i_transaction
);
587 static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode
*jinode
)
591 if (ext4_should_journal_data(jinode
->i_vfs_inode
))
592 ret
= ext4_journalled_submit_inode_data_buffers(jinode
);
594 ret
= ext4_normal_submit_inode_data_buffers(jinode
);
598 static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode
*jinode
)
602 if (!ext4_should_journal_data(jinode
->i_vfs_inode
))
603 ret
= jbd2_journal_finish_inode_data_buffers(jinode
);
608 static bool system_going_down(void)
610 return system_state
== SYSTEM_HALT
|| system_state
== SYSTEM_POWER_OFF
611 || system_state
== SYSTEM_RESTART
;
614 struct ext4_err_translation
{
619 #define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err }
621 static struct ext4_err_translation err_translation
[] = {
622 EXT4_ERR_TRANSLATE(EIO
),
623 EXT4_ERR_TRANSLATE(ENOMEM
),
624 EXT4_ERR_TRANSLATE(EFSBADCRC
),
625 EXT4_ERR_TRANSLATE(EFSCORRUPTED
),
626 EXT4_ERR_TRANSLATE(ENOSPC
),
627 EXT4_ERR_TRANSLATE(ENOKEY
),
628 EXT4_ERR_TRANSLATE(EROFS
),
629 EXT4_ERR_TRANSLATE(EFBIG
),
630 EXT4_ERR_TRANSLATE(EEXIST
),
631 EXT4_ERR_TRANSLATE(ERANGE
),
632 EXT4_ERR_TRANSLATE(EOVERFLOW
),
633 EXT4_ERR_TRANSLATE(EBUSY
),
634 EXT4_ERR_TRANSLATE(ENOTDIR
),
635 EXT4_ERR_TRANSLATE(ENOTEMPTY
),
636 EXT4_ERR_TRANSLATE(ESHUTDOWN
),
637 EXT4_ERR_TRANSLATE(EFAULT
),
640 static int ext4_errno_to_code(int errno
)
644 for (i
= 0; i
< ARRAY_SIZE(err_translation
); i
++)
645 if (err_translation
[i
].errno
== errno
)
646 return err_translation
[i
].code
;
647 return EXT4_ERR_UNKNOWN
;
650 static void save_error_info(struct super_block
*sb
, int error
,
651 __u32 ino
, __u64 block
,
652 const char *func
, unsigned int line
)
654 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
656 /* We default to EFSCORRUPTED error... */
658 error
= EFSCORRUPTED
;
660 spin_lock(&sbi
->s_error_lock
);
661 sbi
->s_add_error_count
++;
662 sbi
->s_last_error_code
= error
;
663 sbi
->s_last_error_line
= line
;
664 sbi
->s_last_error_ino
= ino
;
665 sbi
->s_last_error_block
= block
;
666 sbi
->s_last_error_func
= func
;
667 sbi
->s_last_error_time
= ktime_get_real_seconds();
668 if (!sbi
->s_first_error_time
) {
669 sbi
->s_first_error_code
= error
;
670 sbi
->s_first_error_line
= line
;
671 sbi
->s_first_error_ino
= ino
;
672 sbi
->s_first_error_block
= block
;
673 sbi
->s_first_error_func
= func
;
674 sbi
->s_first_error_time
= sbi
->s_last_error_time
;
676 spin_unlock(&sbi
->s_error_lock
);
679 /* Deal with the reporting of failure conditions on a filesystem such as
680 * inconsistencies detected or read IO failures.
682 * On ext2, we can store the error state of the filesystem in the
683 * superblock. That is not possible on ext4, because we may have other
684 * write ordering constraints on the superblock which prevent us from
685 * writing it out straight away; and given that the journal is about to
686 * be aborted, we can't rely on the current, or future, transactions to
687 * write out the superblock safely.
689 * We'll just use the jbd2_journal_abort() error code to record an error in
690 * the journal instead. On recovery, the journal will complain about
691 * that error until we've noted it down and cleared it.
693 * If force_ro is set, we unconditionally force the filesystem into an
694 * ABORT|READONLY state, unless the error response on the fs has been set to
695 * panic in which case we take the easy way out and panic immediately. This is
696 * used to deal with unrecoverable failures such as journal IO errors or ENOMEM
697 * at a critical moment in log management.
699 static void ext4_handle_error(struct super_block
*sb
, bool force_ro
, int error
,
700 __u32 ino
, __u64 block
,
701 const char *func
, unsigned int line
)
703 journal_t
*journal
= EXT4_SB(sb
)->s_journal
;
704 bool continue_fs
= !force_ro
&& test_opt(sb
, ERRORS_CONT
);
706 EXT4_SB(sb
)->s_mount_state
|= EXT4_ERROR_FS
;
707 if (test_opt(sb
, WARN_ON_ERROR
))
710 if (!continue_fs
&& !sb_rdonly(sb
)) {
711 set_bit(EXT4_FLAGS_SHUTDOWN
, &EXT4_SB(sb
)->s_ext4_flags
);
713 jbd2_journal_abort(journal
, -EIO
);
716 if (!bdev_read_only(sb
->s_bdev
)) {
717 save_error_info(sb
, error
, ino
, block
, func
, line
);
719 * In case the fs should keep running, we need to writeout
720 * superblock through the journal. Due to lock ordering
721 * constraints, it may not be safe to do it right here so we
722 * defer superblock flushing to a workqueue.
724 if (continue_fs
&& journal
)
725 schedule_work(&EXT4_SB(sb
)->s_sb_upd_work
);
727 ext4_commit_super(sb
);
731 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
732 * could panic during 'reboot -f' as the underlying device got already
735 if (test_opt(sb
, ERRORS_PANIC
) && !system_going_down()) {
736 panic("EXT4-fs (device %s): panic forced after error\n",
740 if (sb_rdonly(sb
) || continue_fs
)
743 ext4_msg(sb
, KERN_CRIT
, "Remounting filesystem read-only");
745 * EXT4_FLAGS_SHUTDOWN was set which stops all filesystem
746 * modifications. We don't set SB_RDONLY because that requires
747 * sb->s_umount semaphore and setting it without proper remount
748 * procedure is confusing code such as freeze_super() leading to
749 * deadlocks and other problems.
753 static void update_super_work(struct work_struct
*work
)
755 struct ext4_sb_info
*sbi
= container_of(work
, struct ext4_sb_info
,
757 journal_t
*journal
= sbi
->s_journal
;
761 * If the journal is still running, we have to write out superblock
762 * through the journal to avoid collisions of other journalled sb
765 * We use directly jbd2 functions here to avoid recursing back into
766 * ext4 error handling code during handling of previous errors.
768 if (!sb_rdonly(sbi
->s_sb
) && journal
) {
769 struct buffer_head
*sbh
= sbi
->s_sbh
;
770 bool call_notify_err
= false;
772 handle
= jbd2_journal_start(journal
, 1);
775 if (jbd2_journal_get_write_access(handle
, sbh
)) {
776 jbd2_journal_stop(handle
);
780 if (sbi
->s_add_error_count
> 0)
781 call_notify_err
= true;
783 ext4_update_super(sbi
->s_sb
);
784 if (buffer_write_io_error(sbh
) || !buffer_uptodate(sbh
)) {
785 ext4_msg(sbi
->s_sb
, KERN_ERR
, "previous I/O error to "
786 "superblock detected");
787 clear_buffer_write_io_error(sbh
);
788 set_buffer_uptodate(sbh
);
791 if (jbd2_journal_dirty_metadata(handle
, sbh
)) {
792 jbd2_journal_stop(handle
);
795 jbd2_journal_stop(handle
);
798 ext4_notify_error_sysfs(sbi
);
804 * Write through journal failed. Write sb directly to get error info
805 * out and hope for the best.
807 ext4_commit_super(sbi
->s_sb
);
808 ext4_notify_error_sysfs(sbi
);
811 #define ext4_error_ratelimit(sb) \
812 ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
815 void __ext4_error(struct super_block
*sb
, const char *function
,
816 unsigned int line
, bool force_ro
, int error
, __u64 block
,
817 const char *fmt
, ...)
819 struct va_format vaf
;
822 if (unlikely(ext4_forced_shutdown(sb
)))
825 trace_ext4_error(sb
, function
, line
);
826 if (ext4_error_ratelimit(sb
)) {
831 "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
832 sb
->s_id
, function
, line
, current
->comm
, &vaf
);
835 fsnotify_sb_error(sb
, NULL
, error
? error
: EFSCORRUPTED
);
837 ext4_handle_error(sb
, force_ro
, error
, 0, block
, function
, line
);
840 void __ext4_error_inode(struct inode
*inode
, const char *function
,
841 unsigned int line
, ext4_fsblk_t block
, int error
,
842 const char *fmt
, ...)
845 struct va_format vaf
;
847 if (unlikely(ext4_forced_shutdown(inode
->i_sb
)))
850 trace_ext4_error(inode
->i_sb
, function
, line
);
851 if (ext4_error_ratelimit(inode
->i_sb
)) {
856 printk(KERN_CRIT
"EXT4-fs error (device %s): %s:%d: "
857 "inode #%lu: block %llu: comm %s: %pV\n",
858 inode
->i_sb
->s_id
, function
, line
, inode
->i_ino
,
859 block
, current
->comm
, &vaf
);
861 printk(KERN_CRIT
"EXT4-fs error (device %s): %s:%d: "
862 "inode #%lu: comm %s: %pV\n",
863 inode
->i_sb
->s_id
, function
, line
, inode
->i_ino
,
864 current
->comm
, &vaf
);
867 fsnotify_sb_error(inode
->i_sb
, inode
, error
? error
: EFSCORRUPTED
);
869 ext4_handle_error(inode
->i_sb
, false, error
, inode
->i_ino
, block
,
873 void __ext4_error_file(struct file
*file
, const char *function
,
874 unsigned int line
, ext4_fsblk_t block
,
875 const char *fmt
, ...)
878 struct va_format vaf
;
879 struct inode
*inode
= file_inode(file
);
880 char pathname
[80], *path
;
882 if (unlikely(ext4_forced_shutdown(inode
->i_sb
)))
885 trace_ext4_error(inode
->i_sb
, function
, line
);
886 if (ext4_error_ratelimit(inode
->i_sb
)) {
887 path
= file_path(file
, pathname
, sizeof(pathname
));
895 "EXT4-fs error (device %s): %s:%d: inode #%lu: "
896 "block %llu: comm %s: path %s: %pV\n",
897 inode
->i_sb
->s_id
, function
, line
, inode
->i_ino
,
898 block
, current
->comm
, path
, &vaf
);
901 "EXT4-fs error (device %s): %s:%d: inode #%lu: "
902 "comm %s: path %s: %pV\n",
903 inode
->i_sb
->s_id
, function
, line
, inode
->i_ino
,
904 current
->comm
, path
, &vaf
);
907 fsnotify_sb_error(inode
->i_sb
, inode
, EFSCORRUPTED
);
909 ext4_handle_error(inode
->i_sb
, false, EFSCORRUPTED
, inode
->i_ino
, block
,
913 const char *ext4_decode_error(struct super_block
*sb
, int errno
,
920 errstr
= "Corrupt filesystem";
923 errstr
= "Filesystem failed CRC";
926 errstr
= "IO failure";
929 errstr
= "Out of memory";
932 if (!sb
|| (EXT4_SB(sb
)->s_journal
&&
933 EXT4_SB(sb
)->s_journal
->j_flags
& JBD2_ABORT
))
934 errstr
= "Journal has aborted";
936 errstr
= "Readonly filesystem";
939 /* If the caller passed in an extra buffer for unknown
940 * errors, textualise them now. Else we just return
943 /* Check for truncated error codes... */
944 if (snprintf(nbuf
, 16, "error %d", -errno
) >= 0)
953 /* __ext4_std_error decodes expected errors from journaling functions
954 * automatically and invokes the appropriate error response. */
956 void __ext4_std_error(struct super_block
*sb
, const char *function
,
957 unsigned int line
, int errno
)
962 if (unlikely(ext4_forced_shutdown(sb
)))
965 /* Special case: if the error is EROFS, and we're not already
966 * inside a transaction, then there's really no point in logging
968 if (errno
== -EROFS
&& journal_current_handle() == NULL
&& sb_rdonly(sb
))
971 if (ext4_error_ratelimit(sb
)) {
972 errstr
= ext4_decode_error(sb
, errno
, nbuf
);
973 printk(KERN_CRIT
"EXT4-fs error (device %s) in %s:%d: %s\n",
974 sb
->s_id
, function
, line
, errstr
);
976 fsnotify_sb_error(sb
, NULL
, errno
? errno
: EFSCORRUPTED
);
978 ext4_handle_error(sb
, false, -errno
, 0, 0, function
, line
);
981 void __ext4_msg(struct super_block
*sb
,
982 const char *prefix
, const char *fmt
, ...)
984 struct va_format vaf
;
988 atomic_inc(&EXT4_SB(sb
)->s_msg_count
);
989 if (!___ratelimit(&(EXT4_SB(sb
)->s_msg_ratelimit_state
),
998 printk("%sEXT4-fs (%s): %pV\n", prefix
, sb
->s_id
, &vaf
);
1000 printk("%sEXT4-fs: %pV\n", prefix
, &vaf
);
1004 static int ext4_warning_ratelimit(struct super_block
*sb
)
1006 atomic_inc(&EXT4_SB(sb
)->s_warning_count
);
1007 return ___ratelimit(&(EXT4_SB(sb
)->s_warning_ratelimit_state
),
1011 void __ext4_warning(struct super_block
*sb
, const char *function
,
1012 unsigned int line
, const char *fmt
, ...)
1014 struct va_format vaf
;
1017 if (!ext4_warning_ratelimit(sb
))
1020 va_start(args
, fmt
);
1023 printk(KERN_WARNING
"EXT4-fs warning (device %s): %s:%d: %pV\n",
1024 sb
->s_id
, function
, line
, &vaf
);
1028 void __ext4_warning_inode(const struct inode
*inode
, const char *function
,
1029 unsigned int line
, const char *fmt
, ...)
1031 struct va_format vaf
;
1034 if (!ext4_warning_ratelimit(inode
->i_sb
))
1037 va_start(args
, fmt
);
1040 printk(KERN_WARNING
"EXT4-fs warning (device %s): %s:%d: "
1041 "inode #%lu: comm %s: %pV\n", inode
->i_sb
->s_id
,
1042 function
, line
, inode
->i_ino
, current
->comm
, &vaf
);
1046 void __ext4_grp_locked_error(const char *function
, unsigned int line
,
1047 struct super_block
*sb
, ext4_group_t grp
,
1048 unsigned long ino
, ext4_fsblk_t block
,
1049 const char *fmt
, ...)
1053 struct va_format vaf
;
1056 if (unlikely(ext4_forced_shutdown(sb
)))
1059 trace_ext4_error(sb
, function
, line
);
1060 if (ext4_error_ratelimit(sb
)) {
1061 va_start(args
, fmt
);
1064 printk(KERN_CRIT
"EXT4-fs error (device %s): %s:%d: group %u, ",
1065 sb
->s_id
, function
, line
, grp
);
1067 printk(KERN_CONT
"inode %lu: ", ino
);
1069 printk(KERN_CONT
"block %llu:",
1070 (unsigned long long) block
);
1071 printk(KERN_CONT
"%pV\n", &vaf
);
1075 if (test_opt(sb
, ERRORS_CONT
)) {
1076 if (test_opt(sb
, WARN_ON_ERROR
))
1078 EXT4_SB(sb
)->s_mount_state
|= EXT4_ERROR_FS
;
1079 if (!bdev_read_only(sb
->s_bdev
)) {
1080 save_error_info(sb
, EFSCORRUPTED
, ino
, block
, function
,
1082 schedule_work(&EXT4_SB(sb
)->s_sb_upd_work
);
1086 ext4_unlock_group(sb
, grp
);
1087 ext4_handle_error(sb
, false, EFSCORRUPTED
, ino
, block
, function
, line
);
1089 * We only get here in the ERRORS_RO case; relocking the group
1090 * may be dangerous, but nothing bad will happen since the
1091 * filesystem will have already been marked read/only and the
1092 * journal has been aborted. We return 1 as a hint to callers
1093 * who might what to use the return value from
1094 * ext4_grp_locked_error() to distinguish between the
1095 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
1096 * aggressively from the ext4 function in question, with a
1097 * more appropriate error code.
1099 ext4_lock_group(sb
, grp
);
1103 void ext4_mark_group_bitmap_corrupted(struct super_block
*sb
,
1107 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1108 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, group
);
1109 struct ext4_group_desc
*gdp
= ext4_get_group_desc(sb
, group
, NULL
);
1114 if (flags
& EXT4_GROUP_INFO_BBITMAP_CORRUPT
) {
1115 ret
= ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT
,
1118 percpu_counter_sub(&sbi
->s_freeclusters_counter
,
1122 if (flags
& EXT4_GROUP_INFO_IBITMAP_CORRUPT
) {
1123 ret
= ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT
,
1128 count
= ext4_free_inodes_count(sb
, gdp
);
1129 percpu_counter_sub(&sbi
->s_freeinodes_counter
,
1135 void ext4_update_dynamic_rev(struct super_block
*sb
)
1137 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
1139 if (le32_to_cpu(es
->s_rev_level
) > EXT4_GOOD_OLD_REV
)
1143 "updating to rev %d because of new feature flag, "
1144 "running e2fsck is recommended",
1147 es
->s_first_ino
= cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO
);
1148 es
->s_inode_size
= cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE
);
1149 es
->s_rev_level
= cpu_to_le32(EXT4_DYNAMIC_REV
);
1150 /* leave es->s_feature_*compat flags alone */
1151 /* es->s_uuid will be set by e2fsck if empty */
1154 * The rest of the superblock fields should be zero, and if not it
1155 * means they are likely already in use, so leave them alone. We
1156 * can leave it up to e2fsck to clean up any inconsistencies there.
1160 static inline struct inode
*orphan_list_entry(struct list_head
*l
)
1162 return &list_entry(l
, struct ext4_inode_info
, i_orphan
)->vfs_inode
;
1165 static void dump_orphan_list(struct super_block
*sb
, struct ext4_sb_info
*sbi
)
1167 struct list_head
*l
;
1169 ext4_msg(sb
, KERN_ERR
, "sb orphan head is %d",
1170 le32_to_cpu(sbi
->s_es
->s_last_orphan
));
1172 printk(KERN_ERR
"sb_info orphan list:\n");
1173 list_for_each(l
, &sbi
->s_orphan
) {
1174 struct inode
*inode
= orphan_list_entry(l
);
1176 "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
1177 inode
->i_sb
->s_id
, inode
->i_ino
, inode
,
1178 inode
->i_mode
, inode
->i_nlink
,
1179 NEXT_ORPHAN(inode
));
1184 static int ext4_quota_off(struct super_block
*sb
, int type
);
1186 static inline void ext4_quotas_off(struct super_block
*sb
, int type
)
1188 BUG_ON(type
> EXT4_MAXQUOTAS
);
1190 /* Use our quota_off function to clear inode flags etc. */
1191 for (type
--; type
>= 0; type
--)
1192 ext4_quota_off(sb
, type
);
1196 * This is a helper function which is used in the mount/remount
1197 * codepaths (which holds s_umount) to fetch the quota file name.
1199 static inline char *get_qf_name(struct super_block
*sb
,
1200 struct ext4_sb_info
*sbi
,
1203 return rcu_dereference_protected(sbi
->s_qf_names
[type
],
1204 lockdep_is_held(&sb
->s_umount
));
1207 static inline void ext4_quotas_off(struct super_block
*sb
, int type
)
1212 static int ext4_percpu_param_init(struct ext4_sb_info
*sbi
)
1217 block
= ext4_count_free_clusters(sbi
->s_sb
);
1218 ext4_free_blocks_count_set(sbi
->s_es
, EXT4_C2B(sbi
, block
));
1219 err
= percpu_counter_init(&sbi
->s_freeclusters_counter
, block
,
1222 unsigned long freei
= ext4_count_free_inodes(sbi
->s_sb
);
1223 sbi
->s_es
->s_free_inodes_count
= cpu_to_le32(freei
);
1224 err
= percpu_counter_init(&sbi
->s_freeinodes_counter
, freei
,
1228 err
= percpu_counter_init(&sbi
->s_dirs_counter
,
1229 ext4_count_dirs(sbi
->s_sb
), GFP_KERNEL
);
1231 err
= percpu_counter_init(&sbi
->s_dirtyclusters_counter
, 0,
1234 err
= percpu_counter_init(&sbi
->s_sra_exceeded_retry_limit
, 0,
1237 err
= percpu_init_rwsem(&sbi
->s_writepages_rwsem
);
1240 ext4_msg(sbi
->s_sb
, KERN_ERR
, "insufficient memory");
1245 static void ext4_percpu_param_destroy(struct ext4_sb_info
*sbi
)
1247 percpu_counter_destroy(&sbi
->s_freeclusters_counter
);
1248 percpu_counter_destroy(&sbi
->s_freeinodes_counter
);
1249 percpu_counter_destroy(&sbi
->s_dirs_counter
);
1250 percpu_counter_destroy(&sbi
->s_dirtyclusters_counter
);
1251 percpu_counter_destroy(&sbi
->s_sra_exceeded_retry_limit
);
1252 percpu_free_rwsem(&sbi
->s_writepages_rwsem
);
1255 static void ext4_group_desc_free(struct ext4_sb_info
*sbi
)
1257 struct buffer_head
**group_desc
;
1261 group_desc
= rcu_dereference(sbi
->s_group_desc
);
1262 for (i
= 0; i
< sbi
->s_gdb_count
; i
++)
1263 brelse(group_desc
[i
]);
1268 static void ext4_flex_groups_free(struct ext4_sb_info
*sbi
)
1270 struct flex_groups
**flex_groups
;
1274 flex_groups
= rcu_dereference(sbi
->s_flex_groups
);
1276 for (i
= 0; i
< sbi
->s_flex_groups_allocated
; i
++)
1277 kvfree(flex_groups
[i
]);
1278 kvfree(flex_groups
);
1283 static void ext4_put_super(struct super_block
*sb
)
1285 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1286 struct ext4_super_block
*es
= sbi
->s_es
;
1291 * Unregister sysfs before destroying jbd2 journal.
1292 * Since we could still access attr_journal_task attribute via sysfs
1293 * path which could have sbi->s_journal->j_task as NULL
1294 * Unregister sysfs before flush sbi->s_sb_upd_work.
1295 * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
1296 * read metadata verify failed then will queue error work.
1297 * update_super_work will call start_this_handle may trigger
1300 ext4_unregister_sysfs(sb
);
1302 if (___ratelimit(&ext4_mount_msg_ratelimit
, "EXT4-fs unmount"))
1303 ext4_msg(sb
, KERN_INFO
, "unmounting filesystem %pU.",
1306 ext4_unregister_li_request(sb
);
1307 ext4_quotas_off(sb
, EXT4_MAXQUOTAS
);
1309 flush_work(&sbi
->s_sb_upd_work
);
1310 destroy_workqueue(sbi
->rsv_conversion_wq
);
1311 ext4_release_orphan_info(sb
);
1313 if (sbi
->s_journal
) {
1314 aborted
= is_journal_aborted(sbi
->s_journal
);
1315 err
= jbd2_journal_destroy(sbi
->s_journal
);
1316 sbi
->s_journal
= NULL
;
1317 if ((err
< 0) && !aborted
) {
1318 ext4_abort(sb
, -err
, "Couldn't clean up the journal");
1322 ext4_es_unregister_shrinker(sbi
);
1323 timer_shutdown_sync(&sbi
->s_err_report
);
1324 ext4_release_system_zone(sb
);
1325 ext4_mb_release(sb
);
1326 ext4_ext_release(sb
);
1328 if (!sb_rdonly(sb
) && !aborted
) {
1329 ext4_clear_feature_journal_needs_recovery(sb
);
1330 ext4_clear_feature_orphan_present(sb
);
1331 es
->s_state
= cpu_to_le16(sbi
->s_mount_state
);
1334 ext4_commit_super(sb
);
1336 ext4_group_desc_free(sbi
);
1337 ext4_flex_groups_free(sbi
);
1339 WARN_ON_ONCE(!(sbi
->s_mount_state
& EXT4_ERROR_FS
) &&
1340 percpu_counter_sum(&sbi
->s_dirtyclusters_counter
));
1341 ext4_percpu_param_destroy(sbi
);
1343 for (int i
= 0; i
< EXT4_MAXQUOTAS
; i
++)
1344 kfree(get_qf_name(sb
, sbi
, i
));
1347 /* Debugging code just in case the in-memory inode orphan list
1348 * isn't empty. The on-disk one can be non-empty if we've
1349 * detected an error and taken the fs readonly, but the
1350 * in-memory list had better be clean by this point. */
1351 if (!list_empty(&sbi
->s_orphan
))
1352 dump_orphan_list(sb
, sbi
);
1353 ASSERT(list_empty(&sbi
->s_orphan
));
1355 sync_blockdev(sb
->s_bdev
);
1356 invalidate_bdev(sb
->s_bdev
);
1357 if (sbi
->s_journal_bdev_file
) {
1359 * Invalidate the journal device's buffers. We don't want them
1360 * floating about in memory - the physical journal device may
1361 * hotswapped, and it breaks the `ro-after' testing code.
1363 sync_blockdev(file_bdev(sbi
->s_journal_bdev_file
));
1364 invalidate_bdev(file_bdev(sbi
->s_journal_bdev_file
));
1367 ext4_xattr_destroy_cache(sbi
->s_ea_inode_cache
);
1368 sbi
->s_ea_inode_cache
= NULL
;
1370 ext4_xattr_destroy_cache(sbi
->s_ea_block_cache
);
1371 sbi
->s_ea_block_cache
= NULL
;
1373 ext4_stop_mmpd(sbi
);
1376 sb
->s_fs_info
= NULL
;
1378 * Now that we are completely done shutting down the
1379 * superblock, we need to actually destroy the kobject.
1381 kobject_put(&sbi
->s_kobj
);
1382 wait_for_completion(&sbi
->s_kobj_unregister
);
1383 if (sbi
->s_chksum_driver
)
1384 crypto_free_shash(sbi
->s_chksum_driver
);
1385 kfree(sbi
->s_blockgroup_lock
);
1386 fs_put_dax(sbi
->s_daxdev
, NULL
);
1387 fscrypt_free_dummy_policy(&sbi
->s_dummy_enc_policy
);
1388 #if IS_ENABLED(CONFIG_UNICODE)
1389 utf8_unload(sb
->s_encoding
);
1394 static struct kmem_cache
*ext4_inode_cachep
;
1397 * Called inside transaction, so use GFP_NOFS
1399 static struct inode
*ext4_alloc_inode(struct super_block
*sb
)
1401 struct ext4_inode_info
*ei
;
1403 ei
= alloc_inode_sb(sb
, ext4_inode_cachep
, GFP_NOFS
);
1407 inode_set_iversion(&ei
->vfs_inode
, 1);
1409 spin_lock_init(&ei
->i_raw_lock
);
1410 ei
->i_prealloc_node
= RB_ROOT
;
1411 atomic_set(&ei
->i_prealloc_active
, 0);
1412 rwlock_init(&ei
->i_prealloc_lock
);
1413 ext4_es_init_tree(&ei
->i_es_tree
);
1414 rwlock_init(&ei
->i_es_lock
);
1415 INIT_LIST_HEAD(&ei
->i_es_list
);
1416 ei
->i_es_all_nr
= 0;
1417 ei
->i_es_shk_nr
= 0;
1418 ei
->i_es_shrink_lblk
= 0;
1419 ei
->i_reserved_data_blocks
= 0;
1420 spin_lock_init(&(ei
->i_block_reservation_lock
));
1421 ext4_init_pending_tree(&ei
->i_pending_tree
);
1423 ei
->i_reserved_quota
= 0;
1424 memset(&ei
->i_dquot
, 0, sizeof(ei
->i_dquot
));
1427 INIT_LIST_HEAD(&ei
->i_rsv_conversion_list
);
1428 spin_lock_init(&ei
->i_completed_io_lock
);
1430 ei
->i_datasync_tid
= 0;
1431 atomic_set(&ei
->i_unwritten
, 0);
1432 INIT_WORK(&ei
->i_rsv_conversion_work
, ext4_end_io_rsv_work
);
1433 ext4_fc_init_inode(&ei
->vfs_inode
);
1434 mutex_init(&ei
->i_fc_lock
);
1435 return &ei
->vfs_inode
;
1438 static int ext4_drop_inode(struct inode
*inode
)
1440 int drop
= generic_drop_inode(inode
);
1443 drop
= fscrypt_drop_inode(inode
);
1445 trace_ext4_drop_inode(inode
, drop
);
1449 static void ext4_free_in_core_inode(struct inode
*inode
)
1451 fscrypt_free_inode(inode
);
1452 if (!list_empty(&(EXT4_I(inode
)->i_fc_list
))) {
1453 pr_warn("%s: inode %ld still in fc list",
1454 __func__
, inode
->i_ino
);
1456 kmem_cache_free(ext4_inode_cachep
, EXT4_I(inode
));
1459 static void ext4_destroy_inode(struct inode
*inode
)
1461 if (!list_empty(&(EXT4_I(inode
)->i_orphan
))) {
1462 ext4_msg(inode
->i_sb
, KERN_ERR
,
1463 "Inode %lu (%p): orphan list check failed!",
1464 inode
->i_ino
, EXT4_I(inode
));
1465 print_hex_dump(KERN_INFO
, "", DUMP_PREFIX_ADDRESS
, 16, 4,
1466 EXT4_I(inode
), sizeof(struct ext4_inode_info
),
1471 if (!(EXT4_SB(inode
->i_sb
)->s_mount_state
& EXT4_ERROR_FS
) &&
1472 WARN_ON_ONCE(EXT4_I(inode
)->i_reserved_data_blocks
))
1473 ext4_msg(inode
->i_sb
, KERN_ERR
,
1474 "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
1475 inode
->i_ino
, EXT4_I(inode
),
1476 EXT4_I(inode
)->i_reserved_data_blocks
);
1479 static void ext4_shutdown(struct super_block
*sb
)
1481 ext4_force_shutdown(sb
, EXT4_GOING_FLAGS_NOLOGFLUSH
);
1484 static void init_once(void *foo
)
1486 struct ext4_inode_info
*ei
= foo
;
1488 INIT_LIST_HEAD(&ei
->i_orphan
);
1489 init_rwsem(&ei
->xattr_sem
);
1490 init_rwsem(&ei
->i_data_sem
);
1491 inode_init_once(&ei
->vfs_inode
);
1492 ext4_fc_init_inode(&ei
->vfs_inode
);
1495 static int __init
init_inodecache(void)
1497 ext4_inode_cachep
= kmem_cache_create_usercopy("ext4_inode_cache",
1498 sizeof(struct ext4_inode_info
), 0,
1499 SLAB_RECLAIM_ACCOUNT
| SLAB_ACCOUNT
,
1500 offsetof(struct ext4_inode_info
, i_data
),
1501 sizeof_field(struct ext4_inode_info
, i_data
),
1503 if (ext4_inode_cachep
== NULL
)
1508 static void destroy_inodecache(void)
1511 * Make sure all delayed rcu free inodes are flushed before we
1515 kmem_cache_destroy(ext4_inode_cachep
);
1518 void ext4_clear_inode(struct inode
*inode
)
1521 invalidate_inode_buffers(inode
);
1523 ext4_discard_preallocations(inode
);
1524 ext4_es_remove_extent(inode
, 0, EXT_MAX_BLOCKS
);
1526 if (EXT4_I(inode
)->jinode
) {
1527 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode
),
1528 EXT4_I(inode
)->jinode
);
1529 jbd2_free_inode(EXT4_I(inode
)->jinode
);
1530 EXT4_I(inode
)->jinode
= NULL
;
1532 fscrypt_put_encryption_info(inode
);
1533 fsverity_cleanup_inode(inode
);
1536 static struct inode
*ext4_nfs_get_inode(struct super_block
*sb
,
1537 u64 ino
, u32 generation
)
1539 struct inode
*inode
;
1542 * Currently we don't know the generation for parent directory, so
1543 * a generation of 0 means "accept any"
1545 inode
= ext4_iget(sb
, ino
, EXT4_IGET_HANDLE
);
1547 return ERR_CAST(inode
);
1548 if (generation
&& inode
->i_generation
!= generation
) {
1550 return ERR_PTR(-ESTALE
);
1556 static struct dentry
*ext4_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
1557 int fh_len
, int fh_type
)
1559 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
1560 ext4_nfs_get_inode
);
1563 static struct dentry
*ext4_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
1564 int fh_len
, int fh_type
)
1566 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
1567 ext4_nfs_get_inode
);
1570 static int ext4_nfs_commit_metadata(struct inode
*inode
)
1572 struct writeback_control wbc
= {
1573 .sync_mode
= WB_SYNC_ALL
1576 trace_ext4_nfs_commit_metadata(inode
);
1577 return ext4_write_inode(inode
, &wbc
);
1581 static const char * const quotatypes
[] = INITQFNAMES
;
1582 #define QTYPE2NAME(t) (quotatypes[t])
1584 static int ext4_write_dquot(struct dquot
*dquot
);
1585 static int ext4_acquire_dquot(struct dquot
*dquot
);
1586 static int ext4_release_dquot(struct dquot
*dquot
);
1587 static int ext4_mark_dquot_dirty(struct dquot
*dquot
);
1588 static int ext4_write_info(struct super_block
*sb
, int type
);
1589 static int ext4_quota_on(struct super_block
*sb
, int type
, int format_id
,
1590 const struct path
*path
);
1591 static ssize_t
ext4_quota_read(struct super_block
*sb
, int type
, char *data
,
1592 size_t len
, loff_t off
);
1593 static ssize_t
ext4_quota_write(struct super_block
*sb
, int type
,
1594 const char *data
, size_t len
, loff_t off
);
1595 static int ext4_quota_enable(struct super_block
*sb
, int type
, int format_id
,
1596 unsigned int flags
);
1598 static struct dquot __rcu
**ext4_get_dquots(struct inode
*inode
)
1600 return EXT4_I(inode
)->i_dquot
;
1603 static const struct dquot_operations ext4_quota_operations
= {
1604 .get_reserved_space
= ext4_get_reserved_space
,
1605 .write_dquot
= ext4_write_dquot
,
1606 .acquire_dquot
= ext4_acquire_dquot
,
1607 .release_dquot
= ext4_release_dquot
,
1608 .mark_dirty
= ext4_mark_dquot_dirty
,
1609 .write_info
= ext4_write_info
,
1610 .alloc_dquot
= dquot_alloc
,
1611 .destroy_dquot
= dquot_destroy
,
1612 .get_projid
= ext4_get_projid
,
1613 .get_inode_usage
= ext4_get_inode_usage
,
1614 .get_next_id
= dquot_get_next_id
,
1617 static const struct quotactl_ops ext4_qctl_operations
= {
1618 .quota_on
= ext4_quota_on
,
1619 .quota_off
= ext4_quota_off
,
1620 .quota_sync
= dquot_quota_sync
,
1621 .get_state
= dquot_get_state
,
1622 .set_info
= dquot_set_dqinfo
,
1623 .get_dqblk
= dquot_get_dqblk
,
1624 .set_dqblk
= dquot_set_dqblk
,
1625 .get_nextdqblk
= dquot_get_next_dqblk
,
1629 static const struct super_operations ext4_sops
= {
1630 .alloc_inode
= ext4_alloc_inode
,
1631 .free_inode
= ext4_free_in_core_inode
,
1632 .destroy_inode
= ext4_destroy_inode
,
1633 .write_inode
= ext4_write_inode
,
1634 .dirty_inode
= ext4_dirty_inode
,
1635 .drop_inode
= ext4_drop_inode
,
1636 .evict_inode
= ext4_evict_inode
,
1637 .put_super
= ext4_put_super
,
1638 .sync_fs
= ext4_sync_fs
,
1639 .freeze_fs
= ext4_freeze
,
1640 .unfreeze_fs
= ext4_unfreeze
,
1641 .statfs
= ext4_statfs
,
1642 .show_options
= ext4_show_options
,
1643 .shutdown
= ext4_shutdown
,
1645 .quota_read
= ext4_quota_read
,
1646 .quota_write
= ext4_quota_write
,
1647 .get_dquots
= ext4_get_dquots
,
1651 static const struct export_operations ext4_export_ops
= {
1652 .encode_fh
= generic_encode_ino32_fh
,
1653 .fh_to_dentry
= ext4_fh_to_dentry
,
1654 .fh_to_parent
= ext4_fh_to_parent
,
1655 .get_parent
= ext4_get_parent
,
1656 .commit_metadata
= ext4_nfs_commit_metadata
,
1660 Opt_bsd_df
, Opt_minix_df
, Opt_grpid
, Opt_nogrpid
,
1661 Opt_resgid
, Opt_resuid
, Opt_sb
,
1662 Opt_nouid32
, Opt_debug
, Opt_removed
,
1663 Opt_user_xattr
, Opt_acl
,
1664 Opt_auto_da_alloc
, Opt_noauto_da_alloc
, Opt_noload
,
1665 Opt_commit
, Opt_min_batch_time
, Opt_max_batch_time
, Opt_journal_dev
,
1666 Opt_journal_path
, Opt_journal_checksum
, Opt_journal_async_commit
,
1667 Opt_abort
, Opt_data_journal
, Opt_data_ordered
, Opt_data_writeback
,
1668 Opt_data_err_abort
, Opt_data_err_ignore
, Opt_test_dummy_encryption
,
1670 Opt_usrjquota
, Opt_grpjquota
, Opt_quota
,
1671 Opt_noquota
, Opt_barrier
, Opt_nobarrier
, Opt_err
,
1672 Opt_usrquota
, Opt_grpquota
, Opt_prjquota
,
1673 Opt_dax
, Opt_dax_always
, Opt_dax_inode
, Opt_dax_never
,
1674 Opt_stripe
, Opt_delalloc
, Opt_nodelalloc
, Opt_warn_on_error
,
1675 Opt_nowarn_on_error
, Opt_mblk_io_submit
, Opt_debug_want_extra_isize
,
1676 Opt_nomblk_io_submit
, Opt_block_validity
, Opt_noblock_validity
,
1677 Opt_inode_readahead_blks
, Opt_journal_ioprio
,
1678 Opt_dioread_nolock
, Opt_dioread_lock
,
1679 Opt_discard
, Opt_nodiscard
, Opt_init_itable
, Opt_noinit_itable
,
1680 Opt_max_dir_size_kb
, Opt_nojournal_checksum
, Opt_nombcache
,
1681 Opt_no_prefetch_block_bitmaps
, Opt_mb_optimize_scan
,
1682 Opt_errors
, Opt_data
, Opt_data_err
, Opt_jqfmt
, Opt_dax_type
,
1683 #ifdef CONFIG_EXT4_DEBUG
1684 Opt_fc_debug_max_replay
, Opt_fc_debug_force
1688 static const struct constant_table ext4_param_errors
[] = {
1689 {"continue", EXT4_MOUNT_ERRORS_CONT
},
1690 {"panic", EXT4_MOUNT_ERRORS_PANIC
},
1691 {"remount-ro", EXT4_MOUNT_ERRORS_RO
},
1695 static const struct constant_table ext4_param_data
[] = {
1696 {"journal", EXT4_MOUNT_JOURNAL_DATA
},
1697 {"ordered", EXT4_MOUNT_ORDERED_DATA
},
1698 {"writeback", EXT4_MOUNT_WRITEBACK_DATA
},
1702 static const struct constant_table ext4_param_data_err
[] = {
1703 {"abort", Opt_data_err_abort
},
1704 {"ignore", Opt_data_err_ignore
},
1708 static const struct constant_table ext4_param_jqfmt
[] = {
1709 {"vfsold", QFMT_VFS_OLD
},
1710 {"vfsv0", QFMT_VFS_V0
},
1711 {"vfsv1", QFMT_VFS_V1
},
1715 static const struct constant_table ext4_param_dax
[] = {
1716 {"always", Opt_dax_always
},
1717 {"inode", Opt_dax_inode
},
1718 {"never", Opt_dax_never
},
1723 * Mount option specification
1724 * We don't use fsparam_flag_no because of the way we set the
1725 * options and the way we show them in _ext4_show_options(). To
1726 * keep the changes to a minimum, let's keep the negative options
1729 static const struct fs_parameter_spec ext4_param_specs
[] = {
1730 fsparam_flag ("bsddf", Opt_bsd_df
),
1731 fsparam_flag ("minixdf", Opt_minix_df
),
1732 fsparam_flag ("grpid", Opt_grpid
),
1733 fsparam_flag ("bsdgroups", Opt_grpid
),
1734 fsparam_flag ("nogrpid", Opt_nogrpid
),
1735 fsparam_flag ("sysvgroups", Opt_nogrpid
),
1736 fsparam_gid ("resgid", Opt_resgid
),
1737 fsparam_uid ("resuid", Opt_resuid
),
1738 fsparam_u32 ("sb", Opt_sb
),
1739 fsparam_enum ("errors", Opt_errors
, ext4_param_errors
),
1740 fsparam_flag ("nouid32", Opt_nouid32
),
1741 fsparam_flag ("debug", Opt_debug
),
1742 fsparam_flag ("oldalloc", Opt_removed
),
1743 fsparam_flag ("orlov", Opt_removed
),
1744 fsparam_flag ("user_xattr", Opt_user_xattr
),
1745 fsparam_flag ("acl", Opt_acl
),
1746 fsparam_flag ("norecovery", Opt_noload
),
1747 fsparam_flag ("noload", Opt_noload
),
1748 fsparam_flag ("bh", Opt_removed
),
1749 fsparam_flag ("nobh", Opt_removed
),
1750 fsparam_u32 ("commit", Opt_commit
),
1751 fsparam_u32 ("min_batch_time", Opt_min_batch_time
),
1752 fsparam_u32 ("max_batch_time", Opt_max_batch_time
),
1753 fsparam_u32 ("journal_dev", Opt_journal_dev
),
1754 fsparam_bdev ("journal_path", Opt_journal_path
),
1755 fsparam_flag ("journal_checksum", Opt_journal_checksum
),
1756 fsparam_flag ("nojournal_checksum", Opt_nojournal_checksum
),
1757 fsparam_flag ("journal_async_commit",Opt_journal_async_commit
),
1758 fsparam_flag ("abort", Opt_abort
),
1759 fsparam_enum ("data", Opt_data
, ext4_param_data
),
1760 fsparam_enum ("data_err", Opt_data_err
,
1761 ext4_param_data_err
),
1762 fsparam_string_empty
1763 ("usrjquota", Opt_usrjquota
),
1764 fsparam_string_empty
1765 ("grpjquota", Opt_grpjquota
),
1766 fsparam_enum ("jqfmt", Opt_jqfmt
, ext4_param_jqfmt
),
1767 fsparam_flag ("grpquota", Opt_grpquota
),
1768 fsparam_flag ("quota", Opt_quota
),
1769 fsparam_flag ("noquota", Opt_noquota
),
1770 fsparam_flag ("usrquota", Opt_usrquota
),
1771 fsparam_flag ("prjquota", Opt_prjquota
),
1772 fsparam_flag ("barrier", Opt_barrier
),
1773 fsparam_u32 ("barrier", Opt_barrier
),
1774 fsparam_flag ("nobarrier", Opt_nobarrier
),
1775 fsparam_flag ("i_version", Opt_removed
),
1776 fsparam_flag ("dax", Opt_dax
),
1777 fsparam_enum ("dax", Opt_dax_type
, ext4_param_dax
),
1778 fsparam_u32 ("stripe", Opt_stripe
),
1779 fsparam_flag ("delalloc", Opt_delalloc
),
1780 fsparam_flag ("nodelalloc", Opt_nodelalloc
),
1781 fsparam_flag ("warn_on_error", Opt_warn_on_error
),
1782 fsparam_flag ("nowarn_on_error", Opt_nowarn_on_error
),
1783 fsparam_u32 ("debug_want_extra_isize",
1784 Opt_debug_want_extra_isize
),
1785 fsparam_flag ("mblk_io_submit", Opt_removed
),
1786 fsparam_flag ("nomblk_io_submit", Opt_removed
),
1787 fsparam_flag ("block_validity", Opt_block_validity
),
1788 fsparam_flag ("noblock_validity", Opt_noblock_validity
),
1789 fsparam_u32 ("inode_readahead_blks",
1790 Opt_inode_readahead_blks
),
1791 fsparam_u32 ("journal_ioprio", Opt_journal_ioprio
),
1792 fsparam_u32 ("auto_da_alloc", Opt_auto_da_alloc
),
1793 fsparam_flag ("auto_da_alloc", Opt_auto_da_alloc
),
1794 fsparam_flag ("noauto_da_alloc", Opt_noauto_da_alloc
),
1795 fsparam_flag ("dioread_nolock", Opt_dioread_nolock
),
1796 fsparam_flag ("nodioread_nolock", Opt_dioread_lock
),
1797 fsparam_flag ("dioread_lock", Opt_dioread_lock
),
1798 fsparam_flag ("discard", Opt_discard
),
1799 fsparam_flag ("nodiscard", Opt_nodiscard
),
1800 fsparam_u32 ("init_itable", Opt_init_itable
),
1801 fsparam_flag ("init_itable", Opt_init_itable
),
1802 fsparam_flag ("noinit_itable", Opt_noinit_itable
),
1803 #ifdef CONFIG_EXT4_DEBUG
1804 fsparam_flag ("fc_debug_force", Opt_fc_debug_force
),
1805 fsparam_u32 ("fc_debug_max_replay", Opt_fc_debug_max_replay
),
1807 fsparam_u32 ("max_dir_size_kb", Opt_max_dir_size_kb
),
1808 fsparam_flag ("test_dummy_encryption",
1809 Opt_test_dummy_encryption
),
1810 fsparam_string ("test_dummy_encryption",
1811 Opt_test_dummy_encryption
),
1812 fsparam_flag ("inlinecrypt", Opt_inlinecrypt
),
1813 fsparam_flag ("nombcache", Opt_nombcache
),
1814 fsparam_flag ("no_mbcache", Opt_nombcache
), /* for backward compatibility */
1815 fsparam_flag ("prefetch_block_bitmaps",
1817 fsparam_flag ("no_prefetch_block_bitmaps",
1818 Opt_no_prefetch_block_bitmaps
),
1819 fsparam_s32 ("mb_optimize_scan", Opt_mb_optimize_scan
),
1820 fsparam_string ("check", Opt_removed
), /* mount option from ext2/3 */
1821 fsparam_flag ("nocheck", Opt_removed
), /* mount option from ext2/3 */
1822 fsparam_flag ("reservation", Opt_removed
), /* mount option from ext2/3 */
1823 fsparam_flag ("noreservation", Opt_removed
), /* mount option from ext2/3 */
1824 fsparam_u32 ("journal", Opt_removed
), /* mount option from ext2/3 */
1828 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1830 #define MOPT_SET 0x0001
1831 #define MOPT_CLEAR 0x0002
1832 #define MOPT_NOSUPPORT 0x0004
1833 #define MOPT_EXPLICIT 0x0008
1836 #define MOPT_QFMT 0x0010
1838 #define MOPT_Q MOPT_NOSUPPORT
1839 #define MOPT_QFMT MOPT_NOSUPPORT
1841 #define MOPT_NO_EXT2 0x0020
1842 #define MOPT_NO_EXT3 0x0040
1843 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
1844 #define MOPT_SKIP 0x0080
1845 #define MOPT_2 0x0100
1847 static const struct mount_opts
{
1851 } ext4_mount_opts
[] = {
1852 {Opt_minix_df
, EXT4_MOUNT_MINIX_DF
, MOPT_SET
},
1853 {Opt_bsd_df
, EXT4_MOUNT_MINIX_DF
, MOPT_CLEAR
},
1854 {Opt_grpid
, EXT4_MOUNT_GRPID
, MOPT_SET
},
1855 {Opt_nogrpid
, EXT4_MOUNT_GRPID
, MOPT_CLEAR
},
1856 {Opt_block_validity
, EXT4_MOUNT_BLOCK_VALIDITY
, MOPT_SET
},
1857 {Opt_noblock_validity
, EXT4_MOUNT_BLOCK_VALIDITY
, MOPT_CLEAR
},
1858 {Opt_dioread_nolock
, EXT4_MOUNT_DIOREAD_NOLOCK
,
1859 MOPT_EXT4_ONLY
| MOPT_SET
},
1860 {Opt_dioread_lock
, EXT4_MOUNT_DIOREAD_NOLOCK
,
1861 MOPT_EXT4_ONLY
| MOPT_CLEAR
},
1862 {Opt_discard
, EXT4_MOUNT_DISCARD
, MOPT_SET
},
1863 {Opt_nodiscard
, EXT4_MOUNT_DISCARD
, MOPT_CLEAR
},
1864 {Opt_delalloc
, EXT4_MOUNT_DELALLOC
,
1865 MOPT_EXT4_ONLY
| MOPT_SET
| MOPT_EXPLICIT
},
1866 {Opt_nodelalloc
, EXT4_MOUNT_DELALLOC
,
1867 MOPT_EXT4_ONLY
| MOPT_CLEAR
},
1868 {Opt_warn_on_error
, EXT4_MOUNT_WARN_ON_ERROR
, MOPT_SET
},
1869 {Opt_nowarn_on_error
, EXT4_MOUNT_WARN_ON_ERROR
, MOPT_CLEAR
},
1870 {Opt_commit
, 0, MOPT_NO_EXT2
},
1871 {Opt_nojournal_checksum
, EXT4_MOUNT_JOURNAL_CHECKSUM
,
1872 MOPT_EXT4_ONLY
| MOPT_CLEAR
},
1873 {Opt_journal_checksum
, EXT4_MOUNT_JOURNAL_CHECKSUM
,
1874 MOPT_EXT4_ONLY
| MOPT_SET
| MOPT_EXPLICIT
},
1875 {Opt_journal_async_commit
, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT
|
1876 EXT4_MOUNT_JOURNAL_CHECKSUM
),
1877 MOPT_EXT4_ONLY
| MOPT_SET
| MOPT_EXPLICIT
},
1878 {Opt_noload
, EXT4_MOUNT_NOLOAD
, MOPT_NO_EXT2
| MOPT_SET
},
1879 {Opt_data_err
, EXT4_MOUNT_DATA_ERR_ABORT
, MOPT_NO_EXT2
},
1880 {Opt_barrier
, EXT4_MOUNT_BARRIER
, MOPT_SET
},
1881 {Opt_nobarrier
, EXT4_MOUNT_BARRIER
, MOPT_CLEAR
},
1882 {Opt_noauto_da_alloc
, EXT4_MOUNT_NO_AUTO_DA_ALLOC
, MOPT_SET
},
1883 {Opt_auto_da_alloc
, EXT4_MOUNT_NO_AUTO_DA_ALLOC
, MOPT_CLEAR
},
1884 {Opt_noinit_itable
, EXT4_MOUNT_INIT_INODE_TABLE
, MOPT_CLEAR
},
1885 {Opt_dax_type
, 0, MOPT_EXT4_ONLY
},
1886 {Opt_journal_dev
, 0, MOPT_NO_EXT2
},
1887 {Opt_journal_path
, 0, MOPT_NO_EXT2
},
1888 {Opt_journal_ioprio
, 0, MOPT_NO_EXT2
},
1889 {Opt_data
, 0, MOPT_NO_EXT2
},
1890 {Opt_user_xattr
, EXT4_MOUNT_XATTR_USER
, MOPT_SET
},
1891 #ifdef CONFIG_EXT4_FS_POSIX_ACL
1892 {Opt_acl
, EXT4_MOUNT_POSIX_ACL
, MOPT_SET
},
1894 {Opt_acl
, 0, MOPT_NOSUPPORT
},
1896 {Opt_nouid32
, EXT4_MOUNT_NO_UID32
, MOPT_SET
},
1897 {Opt_debug
, EXT4_MOUNT_DEBUG
, MOPT_SET
},
1898 {Opt_quota
, EXT4_MOUNT_QUOTA
| EXT4_MOUNT_USRQUOTA
, MOPT_SET
| MOPT_Q
},
1899 {Opt_usrquota
, EXT4_MOUNT_QUOTA
| EXT4_MOUNT_USRQUOTA
,
1901 {Opt_grpquota
, EXT4_MOUNT_QUOTA
| EXT4_MOUNT_GRPQUOTA
,
1903 {Opt_prjquota
, EXT4_MOUNT_QUOTA
| EXT4_MOUNT_PRJQUOTA
,
1905 {Opt_noquota
, (EXT4_MOUNT_QUOTA
| EXT4_MOUNT_USRQUOTA
|
1906 EXT4_MOUNT_GRPQUOTA
| EXT4_MOUNT_PRJQUOTA
),
1907 MOPT_CLEAR
| MOPT_Q
},
1908 {Opt_usrjquota
, 0, MOPT_Q
},
1909 {Opt_grpjquota
, 0, MOPT_Q
},
1910 {Opt_jqfmt
, 0, MOPT_QFMT
},
1911 {Opt_nombcache
, EXT4_MOUNT_NO_MBCACHE
, MOPT_SET
},
1912 {Opt_no_prefetch_block_bitmaps
, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS
,
1914 #ifdef CONFIG_EXT4_DEBUG
1915 {Opt_fc_debug_force
, EXT4_MOUNT2_JOURNAL_FAST_COMMIT
,
1916 MOPT_SET
| MOPT_2
| MOPT_EXT4_ONLY
},
1918 {Opt_abort
, EXT4_MOUNT2_ABORT
, MOPT_SET
| MOPT_2
},
1922 #if IS_ENABLED(CONFIG_UNICODE)
1923 static const struct ext4_sb_encodings
{
1926 unsigned int version
;
1927 } ext4_sb_encoding_map
[] = {
1928 {EXT4_ENC_UTF8_12_1
, "utf8", UNICODE_AGE(12, 1, 0)},
1931 static const struct ext4_sb_encodings
*
1932 ext4_sb_read_encoding(const struct ext4_super_block
*es
)
1934 __u16 magic
= le16_to_cpu(es
->s_encoding
);
1937 for (i
= 0; i
< ARRAY_SIZE(ext4_sb_encoding_map
); i
++)
1938 if (magic
== ext4_sb_encoding_map
[i
].magic
)
1939 return &ext4_sb_encoding_map
[i
];
1945 #define EXT4_SPEC_JQUOTA (1 << 0)
1946 #define EXT4_SPEC_JQFMT (1 << 1)
1947 #define EXT4_SPEC_DATAJ (1 << 2)
1948 #define EXT4_SPEC_SB_BLOCK (1 << 3)
1949 #define EXT4_SPEC_JOURNAL_DEV (1 << 4)
1950 #define EXT4_SPEC_JOURNAL_IOPRIO (1 << 5)
1951 #define EXT4_SPEC_s_want_extra_isize (1 << 7)
1952 #define EXT4_SPEC_s_max_batch_time (1 << 8)
1953 #define EXT4_SPEC_s_min_batch_time (1 << 9)
1954 #define EXT4_SPEC_s_inode_readahead_blks (1 << 10)
1955 #define EXT4_SPEC_s_li_wait_mult (1 << 11)
1956 #define EXT4_SPEC_s_max_dir_size_kb (1 << 12)
1957 #define EXT4_SPEC_s_stripe (1 << 13)
1958 #define EXT4_SPEC_s_resuid (1 << 14)
1959 #define EXT4_SPEC_s_resgid (1 << 15)
1960 #define EXT4_SPEC_s_commit_interval (1 << 16)
1961 #define EXT4_SPEC_s_fc_debug_max_replay (1 << 17)
1962 #define EXT4_SPEC_s_sb_block (1 << 18)
1963 #define EXT4_SPEC_mb_optimize_scan (1 << 19)
1965 struct ext4_fs_context
{
1966 char *s_qf_names
[EXT4_MAXQUOTAS
];
1967 struct fscrypt_dummy_policy dummy_enc_policy
;
1968 int s_jquota_fmt
; /* Format of quota to use */
1969 #ifdef CONFIG_EXT4_DEBUG
1970 int s_fc_debug_max_replay
;
1972 unsigned short qname_spec
;
1973 unsigned long vals_s_flags
; /* Bits to set in s_flags */
1974 unsigned long mask_s_flags
; /* Bits changed in s_flags */
1975 unsigned long journal_devnum
;
1976 unsigned long s_commit_interval
;
1977 unsigned long s_stripe
;
1978 unsigned int s_inode_readahead_blks
;
1979 unsigned int s_want_extra_isize
;
1980 unsigned int s_li_wait_mult
;
1981 unsigned int s_max_dir_size_kb
;
1982 unsigned int journal_ioprio
;
1983 unsigned int vals_s_mount_opt
;
1984 unsigned int mask_s_mount_opt
;
1985 unsigned int vals_s_mount_opt2
;
1986 unsigned int mask_s_mount_opt2
;
1987 unsigned int opt_flags
; /* MOPT flags */
1989 u32 s_max_batch_time
;
1990 u32 s_min_batch_time
;
1993 ext4_fsblk_t s_sb_block
;
1996 static void ext4_fc_free(struct fs_context
*fc
)
1998 struct ext4_fs_context
*ctx
= fc
->fs_private
;
2004 for (i
= 0; i
< EXT4_MAXQUOTAS
; i
++)
2005 kfree(ctx
->s_qf_names
[i
]);
2007 fscrypt_free_dummy_policy(&ctx
->dummy_enc_policy
);
2011 int ext4_init_fs_context(struct fs_context
*fc
)
2013 struct ext4_fs_context
*ctx
;
2015 ctx
= kzalloc(sizeof(struct ext4_fs_context
), GFP_KERNEL
);
2019 fc
->fs_private
= ctx
;
2020 fc
->ops
= &ext4_context_ops
;
2027 * Note the name of the specified quota file.
2029 static int note_qf_name(struct fs_context
*fc
, int qtype
,
2030 struct fs_parameter
*param
)
2032 struct ext4_fs_context
*ctx
= fc
->fs_private
;
2035 if (param
->size
< 1) {
2036 ext4_msg(NULL
, KERN_ERR
, "Missing quota name");
2039 if (strchr(param
->string
, '/')) {
2040 ext4_msg(NULL
, KERN_ERR
,
2041 "quotafile must be on filesystem root");
2044 if (ctx
->s_qf_names
[qtype
]) {
2045 if (strcmp(ctx
->s_qf_names
[qtype
], param
->string
) != 0) {
2046 ext4_msg(NULL
, KERN_ERR
,
2047 "%s quota file already specified",
2054 qname
= kmemdup_nul(param
->string
, param
->size
, GFP_KERNEL
);
2056 ext4_msg(NULL
, KERN_ERR
,
2057 "Not enough memory for storing quotafile name");
2060 ctx
->s_qf_names
[qtype
] = qname
;
2061 ctx
->qname_spec
|= 1 << qtype
;
2062 ctx
->spec
|= EXT4_SPEC_JQUOTA
;
2067 * Clear the name of the specified quota file.
2069 static int unnote_qf_name(struct fs_context
*fc
, int qtype
)
2071 struct ext4_fs_context
*ctx
= fc
->fs_private
;
2073 kfree(ctx
->s_qf_names
[qtype
]);
2075 ctx
->s_qf_names
[qtype
] = NULL
;
2076 ctx
->qname_spec
|= 1 << qtype
;
2077 ctx
->spec
|= EXT4_SPEC_JQUOTA
;
2082 static int ext4_parse_test_dummy_encryption(const struct fs_parameter
*param
,
2083 struct ext4_fs_context
*ctx
)
2087 if (!IS_ENABLED(CONFIG_FS_ENCRYPTION
)) {
2088 ext4_msg(NULL
, KERN_WARNING
,
2089 "test_dummy_encryption option not supported");
2092 err
= fscrypt_parse_test_dummy_encryption(param
,
2093 &ctx
->dummy_enc_policy
);
2094 if (err
== -EINVAL
) {
2095 ext4_msg(NULL
, KERN_WARNING
,
2096 "Value of option \"%s\" is unrecognized", param
->key
);
2097 } else if (err
== -EEXIST
) {
2098 ext4_msg(NULL
, KERN_WARNING
,
2099 "Conflicting test_dummy_encryption options");
2105 #define EXT4_SET_CTX(name) \
2106 static inline __maybe_unused \
2107 void ctx_set_##name(struct ext4_fs_context *ctx, unsigned long flag) \
2109 ctx->mask_s_##name |= flag; \
2110 ctx->vals_s_##name |= flag; \
2113 #define EXT4_CLEAR_CTX(name) \
2114 static inline __maybe_unused \
2115 void ctx_clear_##name(struct ext4_fs_context *ctx, unsigned long flag) \
2117 ctx->mask_s_##name |= flag; \
2118 ctx->vals_s_##name &= ~flag; \
2121 #define EXT4_TEST_CTX(name) \
2122 static inline unsigned long \
2123 ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag) \
2125 return (ctx->vals_s_##name & flag); \
2128 EXT4_SET_CTX(flags
); /* set only */
2129 EXT4_SET_CTX(mount_opt
);
2130 EXT4_CLEAR_CTX(mount_opt
);
2131 EXT4_TEST_CTX(mount_opt
);
2132 EXT4_SET_CTX(mount_opt2
);
2133 EXT4_CLEAR_CTX(mount_opt2
);
2134 EXT4_TEST_CTX(mount_opt2
);
2136 static int ext4_parse_param(struct fs_context
*fc
, struct fs_parameter
*param
)
2138 struct ext4_fs_context
*ctx
= fc
->fs_private
;
2139 struct fs_parse_result result
;
2140 const struct mount_opts
*m
;
2144 token
= fs_parse(fc
, ext4_param_specs
, param
, &result
);
2147 is_remount
= fc
->purpose
== FS_CONTEXT_FOR_RECONFIGURE
;
2149 for (m
= ext4_mount_opts
; m
->token
!= Opt_err
; m
++)
2150 if (token
== m
->token
)
2153 ctx
->opt_flags
|= m
->flags
;
2155 if (m
->flags
& MOPT_EXPLICIT
) {
2156 if (m
->mount_opt
& EXT4_MOUNT_DELALLOC
) {
2157 ctx_set_mount_opt2(ctx
, EXT4_MOUNT2_EXPLICIT_DELALLOC
);
2158 } else if (m
->mount_opt
& EXT4_MOUNT_JOURNAL_CHECKSUM
) {
2159 ctx_set_mount_opt2(ctx
,
2160 EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM
);
2165 if (m
->flags
& MOPT_NOSUPPORT
) {
2166 ext4_msg(NULL
, KERN_ERR
, "%s option not supported",
2174 if (!*param
->string
)
2175 return unnote_qf_name(fc
, USRQUOTA
);
2177 return note_qf_name(fc
, USRQUOTA
, param
);
2179 if (!*param
->string
)
2180 return unnote_qf_name(fc
, GRPQUOTA
);
2182 return note_qf_name(fc
, GRPQUOTA
, param
);
2185 if (fc
->purpose
== FS_CONTEXT_FOR_RECONFIGURE
) {
2186 ext4_msg(NULL
, KERN_WARNING
,
2187 "Ignoring %s option on remount", param
->key
);
2189 ctx
->s_sb_block
= result
.uint_32
;
2190 ctx
->spec
|= EXT4_SPEC_s_sb_block
;
2194 ext4_msg(NULL
, KERN_WARNING
, "Ignoring removed %s option",
2197 case Opt_inlinecrypt
:
2198 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
2199 ctx_set_flags(ctx
, SB_INLINECRYPT
);
2201 ext4_msg(NULL
, KERN_ERR
, "inline encryption not supported");
2205 ctx_clear_mount_opt(ctx
, EXT4_MOUNT_ERRORS_MASK
);
2206 ctx_set_mount_opt(ctx
, result
.uint_32
);
2210 ctx
->s_jquota_fmt
= result
.uint_32
;
2211 ctx
->spec
|= EXT4_SPEC_JQFMT
;
2215 ctx_clear_mount_opt(ctx
, EXT4_MOUNT_DATA_FLAGS
);
2216 ctx_set_mount_opt(ctx
, result
.uint_32
);
2217 ctx
->spec
|= EXT4_SPEC_DATAJ
;
2220 if (result
.uint_32
== 0)
2221 result
.uint_32
= JBD2_DEFAULT_MAX_COMMIT_AGE
;
2222 else if (result
.uint_32
> INT_MAX
/ HZ
) {
2223 ext4_msg(NULL
, KERN_ERR
,
2224 "Invalid commit interval %d, "
2225 "must be smaller than %d",
2226 result
.uint_32
, INT_MAX
/ HZ
);
2229 ctx
->s_commit_interval
= HZ
* result
.uint_32
;
2230 ctx
->spec
|= EXT4_SPEC_s_commit_interval
;
2232 case Opt_debug_want_extra_isize
:
2233 if ((result
.uint_32
& 1) || (result
.uint_32
< 4)) {
2234 ext4_msg(NULL
, KERN_ERR
,
2235 "Invalid want_extra_isize %d", result
.uint_32
);
2238 ctx
->s_want_extra_isize
= result
.uint_32
;
2239 ctx
->spec
|= EXT4_SPEC_s_want_extra_isize
;
2241 case Opt_max_batch_time
:
2242 ctx
->s_max_batch_time
= result
.uint_32
;
2243 ctx
->spec
|= EXT4_SPEC_s_max_batch_time
;
2245 case Opt_min_batch_time
:
2246 ctx
->s_min_batch_time
= result
.uint_32
;
2247 ctx
->spec
|= EXT4_SPEC_s_min_batch_time
;
2249 case Opt_inode_readahead_blks
:
2250 if (result
.uint_32
&&
2251 (result
.uint_32
> (1 << 30) ||
2252 !is_power_of_2(result
.uint_32
))) {
2253 ext4_msg(NULL
, KERN_ERR
,
2254 "EXT4-fs: inode_readahead_blks must be "
2255 "0 or a power of 2 smaller than 2^31");
2258 ctx
->s_inode_readahead_blks
= result
.uint_32
;
2259 ctx
->spec
|= EXT4_SPEC_s_inode_readahead_blks
;
2261 case Opt_init_itable
:
2262 ctx_set_mount_opt(ctx
, EXT4_MOUNT_INIT_INODE_TABLE
);
2263 ctx
->s_li_wait_mult
= EXT4_DEF_LI_WAIT_MULT
;
2264 if (param
->type
== fs_value_is_string
)
2265 ctx
->s_li_wait_mult
= result
.uint_32
;
2266 ctx
->spec
|= EXT4_SPEC_s_li_wait_mult
;
2268 case Opt_max_dir_size_kb
:
2269 ctx
->s_max_dir_size_kb
= result
.uint_32
;
2270 ctx
->spec
|= EXT4_SPEC_s_max_dir_size_kb
;
2272 #ifdef CONFIG_EXT4_DEBUG
2273 case Opt_fc_debug_max_replay
:
2274 ctx
->s_fc_debug_max_replay
= result
.uint_32
;
2275 ctx
->spec
|= EXT4_SPEC_s_fc_debug_max_replay
;
2279 ctx
->s_stripe
= result
.uint_32
;
2280 ctx
->spec
|= EXT4_SPEC_s_stripe
;
2283 ctx
->s_resuid
= result
.uid
;
2284 ctx
->spec
|= EXT4_SPEC_s_resuid
;
2287 ctx
->s_resgid
= result
.gid
;
2288 ctx
->spec
|= EXT4_SPEC_s_resgid
;
2290 case Opt_journal_dev
:
2292 ext4_msg(NULL
, KERN_ERR
,
2293 "Cannot specify journal on remount");
2296 ctx
->journal_devnum
= result
.uint_32
;
2297 ctx
->spec
|= EXT4_SPEC_JOURNAL_DEV
;
2299 case Opt_journal_path
:
2301 struct inode
*journal_inode
;
2306 ext4_msg(NULL
, KERN_ERR
,
2307 "Cannot specify journal on remount");
2311 error
= fs_lookup_param(fc
, param
, 1, LOOKUP_FOLLOW
, &path
);
2313 ext4_msg(NULL
, KERN_ERR
, "error: could not find "
2314 "journal device path");
2318 journal_inode
= d_inode(path
.dentry
);
2319 ctx
->journal_devnum
= new_encode_dev(journal_inode
->i_rdev
);
2320 ctx
->spec
|= EXT4_SPEC_JOURNAL_DEV
;
2324 case Opt_journal_ioprio
:
2325 if (result
.uint_32
> 7) {
2326 ext4_msg(NULL
, KERN_ERR
, "Invalid journal IO priority"
2330 ctx
->journal_ioprio
=
2331 IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE
, result
.uint_32
);
2332 ctx
->spec
|= EXT4_SPEC_JOURNAL_IOPRIO
;
2334 case Opt_test_dummy_encryption
:
2335 return ext4_parse_test_dummy_encryption(param
, ctx
);
2338 #ifdef CONFIG_FS_DAX
2340 int type
= (token
== Opt_dax
) ?
2341 Opt_dax
: result
.uint_32
;
2345 case Opt_dax_always
:
2346 ctx_set_mount_opt(ctx
, EXT4_MOUNT_DAX_ALWAYS
);
2347 ctx_clear_mount_opt2(ctx
, EXT4_MOUNT2_DAX_NEVER
);
2350 ctx_set_mount_opt2(ctx
, EXT4_MOUNT2_DAX_NEVER
);
2351 ctx_clear_mount_opt(ctx
, EXT4_MOUNT_DAX_ALWAYS
);
2354 ctx_clear_mount_opt(ctx
, EXT4_MOUNT_DAX_ALWAYS
);
2355 ctx_clear_mount_opt2(ctx
, EXT4_MOUNT2_DAX_NEVER
);
2356 /* Strictly for printing options */
2357 ctx_set_mount_opt2(ctx
, EXT4_MOUNT2_DAX_INODE
);
2363 ext4_msg(NULL
, KERN_INFO
, "dax option not supported");
2367 if (result
.uint_32
== Opt_data_err_abort
)
2368 ctx_set_mount_opt(ctx
, m
->mount_opt
);
2369 else if (result
.uint_32
== Opt_data_err_ignore
)
2370 ctx_clear_mount_opt(ctx
, m
->mount_opt
);
2372 case Opt_mb_optimize_scan
:
2373 if (result
.int_32
== 1) {
2374 ctx_set_mount_opt2(ctx
, EXT4_MOUNT2_MB_OPTIMIZE_SCAN
);
2375 ctx
->spec
|= EXT4_SPEC_mb_optimize_scan
;
2376 } else if (result
.int_32
== 0) {
2377 ctx_clear_mount_opt2(ctx
, EXT4_MOUNT2_MB_OPTIMIZE_SCAN
);
2378 ctx
->spec
|= EXT4_SPEC_mb_optimize_scan
;
2380 ext4_msg(NULL
, KERN_WARNING
,
2381 "mb_optimize_scan should be set to 0 or 1.");
2388 * At this point we should only be getting options requiring MOPT_SET,
2389 * or MOPT_CLEAR. Anything else is a bug
2391 if (m
->token
== Opt_err
) {
2392 ext4_msg(NULL
, KERN_WARNING
, "buggy handling of option %s",
2399 unsigned int set
= 0;
2401 if ((param
->type
== fs_value_is_flag
) ||
2405 if (m
->flags
& MOPT_CLEAR
)
2407 else if (unlikely(!(m
->flags
& MOPT_SET
))) {
2408 ext4_msg(NULL
, KERN_WARNING
,
2409 "buggy handling of option %s",
2414 if (m
->flags
& MOPT_2
) {
2416 ctx_set_mount_opt2(ctx
, m
->mount_opt
);
2418 ctx_clear_mount_opt2(ctx
, m
->mount_opt
);
2421 ctx_set_mount_opt(ctx
, m
->mount_opt
);
2423 ctx_clear_mount_opt(ctx
, m
->mount_opt
);
2430 static int parse_options(struct fs_context
*fc
, char *options
)
2432 struct fs_parameter param
;
2439 while ((key
= strsep(&options
, ",")) != NULL
) {
2442 char *value
= strchr(key
, '=');
2444 param
.type
= fs_value_is_flag
;
2445 param
.string
= NULL
;
2452 v_len
= strlen(value
);
2453 param
.string
= kmemdup_nul(value
, v_len
,
2457 param
.type
= fs_value_is_string
;
2463 ret
= ext4_parse_param(fc
, ¶m
);
2464 kfree(param
.string
);
2470 ret
= ext4_validate_options(fc
);
2477 static int parse_apply_sb_mount_options(struct super_block
*sb
,
2478 struct ext4_fs_context
*m_ctx
)
2480 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
2481 char *s_mount_opts
= NULL
;
2482 struct ext4_fs_context
*s_ctx
= NULL
;
2483 struct fs_context
*fc
= NULL
;
2486 if (!sbi
->s_es
->s_mount_opts
[0])
2489 s_mount_opts
= kstrndup(sbi
->s_es
->s_mount_opts
,
2490 sizeof(sbi
->s_es
->s_mount_opts
),
2495 fc
= kzalloc(sizeof(struct fs_context
), GFP_KERNEL
);
2499 s_ctx
= kzalloc(sizeof(struct ext4_fs_context
), GFP_KERNEL
);
2503 fc
->fs_private
= s_ctx
;
2504 fc
->s_fs_info
= sbi
;
2506 ret
= parse_options(fc
, s_mount_opts
);
2510 ret
= ext4_check_opt_consistency(fc
, sb
);
2513 ext4_msg(sb
, KERN_WARNING
,
2514 "failed to parse options in superblock: %s",
2520 if (s_ctx
->spec
& EXT4_SPEC_JOURNAL_DEV
)
2521 m_ctx
->journal_devnum
= s_ctx
->journal_devnum
;
2522 if (s_ctx
->spec
& EXT4_SPEC_JOURNAL_IOPRIO
)
2523 m_ctx
->journal_ioprio
= s_ctx
->journal_ioprio
;
2525 ext4_apply_options(fc
, sb
);
2533 kfree(s_mount_opts
);
2537 static void ext4_apply_quota_options(struct fs_context
*fc
,
2538 struct super_block
*sb
)
2541 bool quota_feature
= ext4_has_feature_quota(sb
);
2542 struct ext4_fs_context
*ctx
= fc
->fs_private
;
2543 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
2550 if (ctx
->spec
& EXT4_SPEC_JQUOTA
) {
2551 for (i
= 0; i
< EXT4_MAXQUOTAS
; i
++) {
2552 if (!(ctx
->qname_spec
& (1 << i
)))
2555 qname
= ctx
->s_qf_names
[i
]; /* May be NULL */
2558 ctx
->s_qf_names
[i
] = NULL
;
2559 qname
= rcu_replace_pointer(sbi
->s_qf_names
[i
], qname
,
2560 lockdep_is_held(&sb
->s_umount
));
2562 kfree_rcu_mightsleep(qname
);
2566 if (ctx
->spec
& EXT4_SPEC_JQFMT
)
2567 sbi
->s_jquota_fmt
= ctx
->s_jquota_fmt
;
2572 * Check quota settings consistency.
2574 static int ext4_check_quota_consistency(struct fs_context
*fc
,
2575 struct super_block
*sb
)
2578 struct ext4_fs_context
*ctx
= fc
->fs_private
;
2579 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
2580 bool quota_feature
= ext4_has_feature_quota(sb
);
2581 bool quota_loaded
= sb_any_quota_loaded(sb
);
2582 bool usr_qf_name
, grp_qf_name
, usrquota
, grpquota
;
2586 * We do the test below only for project quotas. 'usrquota' and
2587 * 'grpquota' mount options are allowed even without quota feature
2588 * to support legacy quotas in quota files.
2590 if (ctx_test_mount_opt(ctx
, EXT4_MOUNT_PRJQUOTA
) &&
2591 !ext4_has_feature_project(sb
)) {
2592 ext4_msg(NULL
, KERN_ERR
, "Project quota feature not enabled. "
2593 "Cannot enable project quota enforcement.");
2597 quota_flags
= EXT4_MOUNT_QUOTA
| EXT4_MOUNT_USRQUOTA
|
2598 EXT4_MOUNT_GRPQUOTA
| EXT4_MOUNT_PRJQUOTA
;
2600 ctx
->mask_s_mount_opt
& quota_flags
&&
2601 !ctx_test_mount_opt(ctx
, quota_flags
))
2602 goto err_quota_change
;
2604 if (ctx
->spec
& EXT4_SPEC_JQUOTA
) {
2606 for (i
= 0; i
< EXT4_MAXQUOTAS
; i
++) {
2607 if (!(ctx
->qname_spec
& (1 << i
)))
2611 !!sbi
->s_qf_names
[i
] != !!ctx
->s_qf_names
[i
])
2612 goto err_jquota_change
;
2614 if (sbi
->s_qf_names
[i
] && ctx
->s_qf_names
[i
] &&
2615 strcmp(get_qf_name(sb
, sbi
, i
),
2616 ctx
->s_qf_names
[i
]) != 0)
2617 goto err_jquota_specified
;
2620 if (quota_feature
) {
2621 ext4_msg(NULL
, KERN_INFO
,
2622 "Journaled quota options ignored when "
2623 "QUOTA feature is enabled");
2628 if (ctx
->spec
& EXT4_SPEC_JQFMT
) {
2629 if (sbi
->s_jquota_fmt
!= ctx
->s_jquota_fmt
&& quota_loaded
)
2630 goto err_jquota_change
;
2631 if (quota_feature
) {
2632 ext4_msg(NULL
, KERN_INFO
, "Quota format mount options "
2633 "ignored when QUOTA feature is enabled");
2638 /* Make sure we don't mix old and new quota format */
2639 usr_qf_name
= (get_qf_name(sb
, sbi
, USRQUOTA
) ||
2640 ctx
->s_qf_names
[USRQUOTA
]);
2641 grp_qf_name
= (get_qf_name(sb
, sbi
, GRPQUOTA
) ||
2642 ctx
->s_qf_names
[GRPQUOTA
]);
2644 usrquota
= (ctx_test_mount_opt(ctx
, EXT4_MOUNT_USRQUOTA
) ||
2645 test_opt(sb
, USRQUOTA
));
2647 grpquota
= (ctx_test_mount_opt(ctx
, EXT4_MOUNT_GRPQUOTA
) ||
2648 test_opt(sb
, GRPQUOTA
));
2651 ctx_clear_mount_opt(ctx
, EXT4_MOUNT_USRQUOTA
);
2655 ctx_clear_mount_opt(ctx
, EXT4_MOUNT_GRPQUOTA
);
2659 if (usr_qf_name
|| grp_qf_name
) {
2660 if (usrquota
|| grpquota
) {
2661 ext4_msg(NULL
, KERN_ERR
, "old and new quota "
2666 if (!(ctx
->spec
& EXT4_SPEC_JQFMT
|| sbi
->s_jquota_fmt
)) {
2667 ext4_msg(NULL
, KERN_ERR
, "journaled quota format "
2676 ext4_msg(NULL
, KERN_ERR
,
2677 "Cannot change quota options when quota turned on");
2680 ext4_msg(NULL
, KERN_ERR
, "Cannot change journaled quota "
2681 "options when quota turned on");
2683 err_jquota_specified
:
2684 ext4_msg(NULL
, KERN_ERR
, "%s quota file already specified",
2692 static int ext4_check_test_dummy_encryption(const struct fs_context
*fc
,
2693 struct super_block
*sb
)
2695 const struct ext4_fs_context
*ctx
= fc
->fs_private
;
2696 const struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
2698 if (!fscrypt_is_dummy_policy_set(&ctx
->dummy_enc_policy
))
2701 if (!ext4_has_feature_encrypt(sb
)) {
2702 ext4_msg(NULL
, KERN_WARNING
,
2703 "test_dummy_encryption requires encrypt feature");
2707 * This mount option is just for testing, and it's not worthwhile to
2708 * implement the extra complexity (e.g. RCU protection) that would be
2709 * needed to allow it to be set or changed during remount. We do allow
2710 * it to be specified during remount, but only if there is no change.
2712 if (fc
->purpose
== FS_CONTEXT_FOR_RECONFIGURE
) {
2713 if (fscrypt_dummy_policies_equal(&sbi
->s_dummy_enc_policy
,
2714 &ctx
->dummy_enc_policy
))
2716 ext4_msg(NULL
, KERN_WARNING
,
2717 "Can't set or change test_dummy_encryption on remount");
2720 /* Also make sure s_mount_opts didn't contain a conflicting value. */
2721 if (fscrypt_is_dummy_policy_set(&sbi
->s_dummy_enc_policy
)) {
2722 if (fscrypt_dummy_policies_equal(&sbi
->s_dummy_enc_policy
,
2723 &ctx
->dummy_enc_policy
))
2725 ext4_msg(NULL
, KERN_WARNING
,
2726 "Conflicting test_dummy_encryption options");
2732 static void ext4_apply_test_dummy_encryption(struct ext4_fs_context
*ctx
,
2733 struct super_block
*sb
)
2735 if (!fscrypt_is_dummy_policy_set(&ctx
->dummy_enc_policy
) ||
2736 /* if already set, it was already verified to be the same */
2737 fscrypt_is_dummy_policy_set(&EXT4_SB(sb
)->s_dummy_enc_policy
))
2739 EXT4_SB(sb
)->s_dummy_enc_policy
= ctx
->dummy_enc_policy
;
2740 memset(&ctx
->dummy_enc_policy
, 0, sizeof(ctx
->dummy_enc_policy
));
2741 ext4_msg(sb
, KERN_WARNING
, "Test dummy encryption mode enabled");
2744 static int ext4_check_opt_consistency(struct fs_context
*fc
,
2745 struct super_block
*sb
)
2747 struct ext4_fs_context
*ctx
= fc
->fs_private
;
2748 struct ext4_sb_info
*sbi
= fc
->s_fs_info
;
2749 int is_remount
= fc
->purpose
== FS_CONTEXT_FOR_RECONFIGURE
;
2752 if ((ctx
->opt_flags
& MOPT_NO_EXT2
) && IS_EXT2_SB(sb
)) {
2753 ext4_msg(NULL
, KERN_ERR
,
2754 "Mount option(s) incompatible with ext2");
2757 if ((ctx
->opt_flags
& MOPT_NO_EXT3
) && IS_EXT3_SB(sb
)) {
2758 ext4_msg(NULL
, KERN_ERR
,
2759 "Mount option(s) incompatible with ext3");
2763 if (ctx
->s_want_extra_isize
>
2764 (sbi
->s_inode_size
- EXT4_GOOD_OLD_INODE_SIZE
)) {
2765 ext4_msg(NULL
, KERN_ERR
,
2766 "Invalid want_extra_isize %d",
2767 ctx
->s_want_extra_isize
);
2771 err
= ext4_check_test_dummy_encryption(fc
, sb
);
2775 if ((ctx
->spec
& EXT4_SPEC_DATAJ
) && is_remount
) {
2776 if (!sbi
->s_journal
) {
2777 ext4_msg(NULL
, KERN_WARNING
,
2778 "Remounting file system with no journal "
2779 "so ignoring journalled data option");
2780 ctx_clear_mount_opt(ctx
, EXT4_MOUNT_DATA_FLAGS
);
2781 } else if (ctx_test_mount_opt(ctx
, EXT4_MOUNT_DATA_FLAGS
) !=
2782 test_opt(sb
, DATA_FLAGS
)) {
2783 ext4_msg(NULL
, KERN_ERR
, "Cannot change data mode "
2790 if (ctx_test_mount_opt(ctx
, EXT4_MOUNT_DAX_ALWAYS
) &&
2791 (test_opt(sb
, DATA_FLAGS
) == EXT4_MOUNT_JOURNAL_DATA
)) {
2792 ext4_msg(NULL
, KERN_ERR
, "can't mount with "
2793 "both data=journal and dax");
2797 if (ctx_test_mount_opt(ctx
, EXT4_MOUNT_DAX_ALWAYS
) &&
2798 (!(sbi
->s_mount_opt
& EXT4_MOUNT_DAX_ALWAYS
) ||
2799 (sbi
->s_mount_opt2
& EXT4_MOUNT2_DAX_NEVER
))) {
2800 fail_dax_change_remount
:
2801 ext4_msg(NULL
, KERN_ERR
, "can't change "
2802 "dax mount option while remounting");
2804 } else if (ctx_test_mount_opt2(ctx
, EXT4_MOUNT2_DAX_NEVER
) &&
2805 (!(sbi
->s_mount_opt2
& EXT4_MOUNT2_DAX_NEVER
) ||
2806 (sbi
->s_mount_opt
& EXT4_MOUNT_DAX_ALWAYS
))) {
2807 goto fail_dax_change_remount
;
2808 } else if (ctx_test_mount_opt2(ctx
, EXT4_MOUNT2_DAX_INODE
) &&
2809 ((sbi
->s_mount_opt
& EXT4_MOUNT_DAX_ALWAYS
) ||
2810 (sbi
->s_mount_opt2
& EXT4_MOUNT2_DAX_NEVER
) ||
2811 !(sbi
->s_mount_opt2
& EXT4_MOUNT2_DAX_INODE
))) {
2812 goto fail_dax_change_remount
;
2816 return ext4_check_quota_consistency(fc
, sb
);
2819 static void ext4_apply_options(struct fs_context
*fc
, struct super_block
*sb
)
2821 struct ext4_fs_context
*ctx
= fc
->fs_private
;
2822 struct ext4_sb_info
*sbi
= fc
->s_fs_info
;
2824 sbi
->s_mount_opt
&= ~ctx
->mask_s_mount_opt
;
2825 sbi
->s_mount_opt
|= ctx
->vals_s_mount_opt
;
2826 sbi
->s_mount_opt2
&= ~ctx
->mask_s_mount_opt2
;
2827 sbi
->s_mount_opt2
|= ctx
->vals_s_mount_opt2
;
2828 sb
->s_flags
&= ~ctx
->mask_s_flags
;
2829 sb
->s_flags
|= ctx
->vals_s_flags
;
2831 #define APPLY(X) ({ if (ctx->spec & EXT4_SPEC_##X) sbi->X = ctx->X; })
2832 APPLY(s_commit_interval
);
2834 APPLY(s_max_batch_time
);
2835 APPLY(s_min_batch_time
);
2836 APPLY(s_want_extra_isize
);
2837 APPLY(s_inode_readahead_blks
);
2838 APPLY(s_max_dir_size_kb
);
2839 APPLY(s_li_wait_mult
);
2843 #ifdef CONFIG_EXT4_DEBUG
2844 APPLY(s_fc_debug_max_replay
);
2847 ext4_apply_quota_options(fc
, sb
);
2848 ext4_apply_test_dummy_encryption(ctx
, sb
);
2852 static int ext4_validate_options(struct fs_context
*fc
)
2855 struct ext4_fs_context
*ctx
= fc
->fs_private
;
2856 char *usr_qf_name
, *grp_qf_name
;
2858 usr_qf_name
= ctx
->s_qf_names
[USRQUOTA
];
2859 grp_qf_name
= ctx
->s_qf_names
[GRPQUOTA
];
2861 if (usr_qf_name
|| grp_qf_name
) {
2862 if (ctx_test_mount_opt(ctx
, EXT4_MOUNT_USRQUOTA
) && usr_qf_name
)
2863 ctx_clear_mount_opt(ctx
, EXT4_MOUNT_USRQUOTA
);
2865 if (ctx_test_mount_opt(ctx
, EXT4_MOUNT_GRPQUOTA
) && grp_qf_name
)
2866 ctx_clear_mount_opt(ctx
, EXT4_MOUNT_GRPQUOTA
);
2868 if (ctx_test_mount_opt(ctx
, EXT4_MOUNT_USRQUOTA
) ||
2869 ctx_test_mount_opt(ctx
, EXT4_MOUNT_GRPQUOTA
)) {
2870 ext4_msg(NULL
, KERN_ERR
, "old and new quota "
2879 static inline void ext4_show_quota_options(struct seq_file
*seq
,
2880 struct super_block
*sb
)
2882 #if defined(CONFIG_QUOTA)
2883 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
2884 char *usr_qf_name
, *grp_qf_name
;
2886 if (sbi
->s_jquota_fmt
) {
2889 switch (sbi
->s_jquota_fmt
) {
2900 seq_printf(seq
, ",jqfmt=%s", fmtname
);
2904 usr_qf_name
= rcu_dereference(sbi
->s_qf_names
[USRQUOTA
]);
2905 grp_qf_name
= rcu_dereference(sbi
->s_qf_names
[GRPQUOTA
]);
2907 seq_show_option(seq
, "usrjquota", usr_qf_name
);
2909 seq_show_option(seq
, "grpjquota", grp_qf_name
);
2914 static const char *token2str(int token
)
2916 const struct fs_parameter_spec
*spec
;
2918 for (spec
= ext4_param_specs
; spec
->name
!= NULL
; spec
++)
2919 if (spec
->opt
== token
&& !spec
->type
)
2926 * - it's set to a non-default value OR
2927 * - if the per-sb default is different from the global default
2929 static int _ext4_show_options(struct seq_file
*seq
, struct super_block
*sb
,
2932 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
2933 struct ext4_super_block
*es
= sbi
->s_es
;
2935 const struct mount_opts
*m
;
2936 char sep
= nodefs
? '\n' : ',';
2938 #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
2939 #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2941 if (sbi
->s_sb_block
!= 1)
2942 SEQ_OPTS_PRINT("sb=%llu", sbi
->s_sb_block
);
2944 for (m
= ext4_mount_opts
; m
->token
!= Opt_err
; m
++) {
2945 int want_set
= m
->flags
& MOPT_SET
;
2946 int opt_2
= m
->flags
& MOPT_2
;
2947 unsigned int mount_opt
, def_mount_opt
;
2949 if (((m
->flags
& (MOPT_SET
|MOPT_CLEAR
)) == 0) ||
2950 m
->flags
& MOPT_SKIP
)
2954 mount_opt
= sbi
->s_mount_opt2
;
2955 def_mount_opt
= sbi
->s_def_mount_opt2
;
2957 mount_opt
= sbi
->s_mount_opt
;
2958 def_mount_opt
= sbi
->s_def_mount_opt
;
2960 /* skip if same as the default */
2961 if (!nodefs
&& !(m
->mount_opt
& (mount_opt
^ def_mount_opt
)))
2963 /* select Opt_noFoo vs Opt_Foo */
2965 (mount_opt
& m
->mount_opt
) != m
->mount_opt
) ||
2966 (!want_set
&& (mount_opt
& m
->mount_opt
)))
2968 SEQ_OPTS_PRINT("%s", token2str(m
->token
));
2971 if (nodefs
|| !uid_eq(sbi
->s_resuid
, make_kuid(&init_user_ns
, EXT4_DEF_RESUID
)) ||
2972 le16_to_cpu(es
->s_def_resuid
) != EXT4_DEF_RESUID
)
2973 SEQ_OPTS_PRINT("resuid=%u",
2974 from_kuid_munged(&init_user_ns
, sbi
->s_resuid
));
2975 if (nodefs
|| !gid_eq(sbi
->s_resgid
, make_kgid(&init_user_ns
, EXT4_DEF_RESGID
)) ||
2976 le16_to_cpu(es
->s_def_resgid
) != EXT4_DEF_RESGID
)
2977 SEQ_OPTS_PRINT("resgid=%u",
2978 from_kgid_munged(&init_user_ns
, sbi
->s_resgid
));
2979 def_errors
= nodefs
? -1 : le16_to_cpu(es
->s_errors
);
2980 if (test_opt(sb
, ERRORS_RO
) && def_errors
!= EXT4_ERRORS_RO
)
2981 SEQ_OPTS_PUTS("errors=remount-ro");
2982 if (test_opt(sb
, ERRORS_CONT
) && def_errors
!= EXT4_ERRORS_CONTINUE
)
2983 SEQ_OPTS_PUTS("errors=continue");
2984 if (test_opt(sb
, ERRORS_PANIC
) && def_errors
!= EXT4_ERRORS_PANIC
)
2985 SEQ_OPTS_PUTS("errors=panic");
2986 if (nodefs
|| sbi
->s_commit_interval
!= JBD2_DEFAULT_MAX_COMMIT_AGE
*HZ
)
2987 SEQ_OPTS_PRINT("commit=%lu", sbi
->s_commit_interval
/ HZ
);
2988 if (nodefs
|| sbi
->s_min_batch_time
!= EXT4_DEF_MIN_BATCH_TIME
)
2989 SEQ_OPTS_PRINT("min_batch_time=%u", sbi
->s_min_batch_time
);
2990 if (nodefs
|| sbi
->s_max_batch_time
!= EXT4_DEF_MAX_BATCH_TIME
)
2991 SEQ_OPTS_PRINT("max_batch_time=%u", sbi
->s_max_batch_time
);
2992 if (nodefs
|| sbi
->s_stripe
)
2993 SEQ_OPTS_PRINT("stripe=%lu", sbi
->s_stripe
);
2994 if (nodefs
|| EXT4_MOUNT_DATA_FLAGS
&
2995 (sbi
->s_mount_opt
^ sbi
->s_def_mount_opt
)) {
2996 if (test_opt(sb
, DATA_FLAGS
) == EXT4_MOUNT_JOURNAL_DATA
)
2997 SEQ_OPTS_PUTS("data=journal");
2998 else if (test_opt(sb
, DATA_FLAGS
) == EXT4_MOUNT_ORDERED_DATA
)
2999 SEQ_OPTS_PUTS("data=ordered");
3000 else if (test_opt(sb
, DATA_FLAGS
) == EXT4_MOUNT_WRITEBACK_DATA
)
3001 SEQ_OPTS_PUTS("data=writeback");
3004 sbi
->s_inode_readahead_blks
!= EXT4_DEF_INODE_READAHEAD_BLKS
)
3005 SEQ_OPTS_PRINT("inode_readahead_blks=%u",
3006 sbi
->s_inode_readahead_blks
);
3008 if (test_opt(sb
, INIT_INODE_TABLE
) && (nodefs
||
3009 (sbi
->s_li_wait_mult
!= EXT4_DEF_LI_WAIT_MULT
)))
3010 SEQ_OPTS_PRINT("init_itable=%u", sbi
->s_li_wait_mult
);
3011 if (nodefs
|| sbi
->s_max_dir_size_kb
)
3012 SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi
->s_max_dir_size_kb
);
3013 if (test_opt(sb
, DATA_ERR_ABORT
))
3014 SEQ_OPTS_PUTS("data_err=abort");
3016 fscrypt_show_test_dummy_encryption(seq
, sep
, sb
);
3018 if (sb
->s_flags
& SB_INLINECRYPT
)
3019 SEQ_OPTS_PUTS("inlinecrypt");
3021 if (test_opt(sb
, DAX_ALWAYS
)) {
3023 SEQ_OPTS_PUTS("dax");
3025 SEQ_OPTS_PUTS("dax=always");
3026 } else if (test_opt2(sb
, DAX_NEVER
)) {
3027 SEQ_OPTS_PUTS("dax=never");
3028 } else if (test_opt2(sb
, DAX_INODE
)) {
3029 SEQ_OPTS_PUTS("dax=inode");
3032 if (sbi
->s_groups_count
>= MB_DEFAULT_LINEAR_SCAN_THRESHOLD
&&
3033 !test_opt2(sb
, MB_OPTIMIZE_SCAN
)) {
3034 SEQ_OPTS_PUTS("mb_optimize_scan=0");
3035 } else if (sbi
->s_groups_count
< MB_DEFAULT_LINEAR_SCAN_THRESHOLD
&&
3036 test_opt2(sb
, MB_OPTIMIZE_SCAN
)) {
3037 SEQ_OPTS_PUTS("mb_optimize_scan=1");
3040 if (nodefs
&& !test_opt(sb
, NO_PREFETCH_BLOCK_BITMAPS
))
3041 SEQ_OPTS_PUTS("prefetch_block_bitmaps");
3043 ext4_show_quota_options(seq
, sb
);
3047 static int ext4_show_options(struct seq_file
*seq
, struct dentry
*root
)
3049 return _ext4_show_options(seq
, root
->d_sb
, 0);
3052 int ext4_seq_options_show(struct seq_file
*seq
, void *offset
)
3054 struct super_block
*sb
= seq
->private;
3057 seq_puts(seq
, sb_rdonly(sb
) ? "ro" : "rw");
3058 rc
= _ext4_show_options(seq
, sb
, 1);
3059 seq_putc(seq
, '\n');
3063 static int ext4_setup_super(struct super_block
*sb
, struct ext4_super_block
*es
,
3066 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3069 if (le32_to_cpu(es
->s_rev_level
) > EXT4_MAX_SUPP_REV
) {
3070 ext4_msg(sb
, KERN_ERR
, "revision level too high, "
3071 "forcing read-only mode");
3077 if (!(sbi
->s_mount_state
& EXT4_VALID_FS
))
3078 ext4_msg(sb
, KERN_WARNING
, "warning: mounting unchecked fs, "
3079 "running e2fsck is recommended");
3080 else if (sbi
->s_mount_state
& EXT4_ERROR_FS
)
3081 ext4_msg(sb
, KERN_WARNING
,
3082 "warning: mounting fs with errors, "
3083 "running e2fsck is recommended");
3084 else if ((__s16
) le16_to_cpu(es
->s_max_mnt_count
) > 0 &&
3085 le16_to_cpu(es
->s_mnt_count
) >=
3086 (unsigned short) (__s16
) le16_to_cpu(es
->s_max_mnt_count
))
3087 ext4_msg(sb
, KERN_WARNING
,
3088 "warning: maximal mount count reached, "
3089 "running e2fsck is recommended");
3090 else if (le32_to_cpu(es
->s_checkinterval
) &&
3091 (ext4_get_tstamp(es
, s_lastcheck
) +
3092 le32_to_cpu(es
->s_checkinterval
) <= ktime_get_real_seconds()))
3093 ext4_msg(sb
, KERN_WARNING
,
3094 "warning: checktime reached, "
3095 "running e2fsck is recommended");
3096 if (!sbi
->s_journal
)
3097 es
->s_state
&= cpu_to_le16(~EXT4_VALID_FS
);
3098 if (!(__s16
) le16_to_cpu(es
->s_max_mnt_count
))
3099 es
->s_max_mnt_count
= cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT
);
3100 le16_add_cpu(&es
->s_mnt_count
, 1);
3101 ext4_update_tstamp(es
, s_mtime
);
3102 if (sbi
->s_journal
) {
3103 ext4_set_feature_journal_needs_recovery(sb
);
3104 if (ext4_has_feature_orphan_file(sb
))
3105 ext4_set_feature_orphan_present(sb
);
3108 err
= ext4_commit_super(sb
);
3110 if (test_opt(sb
, DEBUG
))
3111 printk(KERN_INFO
"[EXT4 FS bs=%lu, gc=%u, "
3112 "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
3114 sbi
->s_groups_count
,
3115 EXT4_BLOCKS_PER_GROUP(sb
),
3116 EXT4_INODES_PER_GROUP(sb
),
3117 sbi
->s_mount_opt
, sbi
->s_mount_opt2
);
3121 int ext4_alloc_flex_bg_array(struct super_block
*sb
, ext4_group_t ngroup
)
3123 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3124 struct flex_groups
**old_groups
, **new_groups
;
3127 if (!sbi
->s_log_groups_per_flex
)
3130 size
= ext4_flex_group(sbi
, ngroup
- 1) + 1;
3131 if (size
<= sbi
->s_flex_groups_allocated
)
3134 new_groups
= kvzalloc(roundup_pow_of_two(size
*
3135 sizeof(*sbi
->s_flex_groups
)), GFP_KERNEL
);
3137 ext4_msg(sb
, KERN_ERR
,
3138 "not enough memory for %d flex group pointers", size
);
3141 for (i
= sbi
->s_flex_groups_allocated
; i
< size
; i
++) {
3142 new_groups
[i
] = kvzalloc(roundup_pow_of_two(
3143 sizeof(struct flex_groups
)),
3145 if (!new_groups
[i
]) {
3146 for (j
= sbi
->s_flex_groups_allocated
; j
< i
; j
++)
3147 kvfree(new_groups
[j
]);
3149 ext4_msg(sb
, KERN_ERR
,
3150 "not enough memory for %d flex groups", size
);
3155 old_groups
= rcu_dereference(sbi
->s_flex_groups
);
3157 memcpy(new_groups
, old_groups
,
3158 (sbi
->s_flex_groups_allocated
*
3159 sizeof(struct flex_groups
*)));
3161 rcu_assign_pointer(sbi
->s_flex_groups
, new_groups
);
3162 sbi
->s_flex_groups_allocated
= size
;
3164 ext4_kvfree_array_rcu(old_groups
);
3168 static int ext4_fill_flex_info(struct super_block
*sb
)
3170 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3171 struct ext4_group_desc
*gdp
= NULL
;
3172 struct flex_groups
*fg
;
3173 ext4_group_t flex_group
;
3176 sbi
->s_log_groups_per_flex
= sbi
->s_es
->s_log_groups_per_flex
;
3177 if (sbi
->s_log_groups_per_flex
< 1 || sbi
->s_log_groups_per_flex
> 31) {
3178 sbi
->s_log_groups_per_flex
= 0;
3182 err
= ext4_alloc_flex_bg_array(sb
, sbi
->s_groups_count
);
3186 for (i
= 0; i
< sbi
->s_groups_count
; i
++) {
3187 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
3189 flex_group
= ext4_flex_group(sbi
, i
);
3190 fg
= sbi_array_rcu_deref(sbi
, s_flex_groups
, flex_group
);
3191 atomic_add(ext4_free_inodes_count(sb
, gdp
), &fg
->free_inodes
);
3192 atomic64_add(ext4_free_group_clusters(sb
, gdp
),
3193 &fg
->free_clusters
);
3194 atomic_add(ext4_used_dirs_count(sb
, gdp
), &fg
->used_dirs
);
3202 static __le16
ext4_group_desc_csum(struct super_block
*sb
, __u32 block_group
,
3203 struct ext4_group_desc
*gdp
)
3205 int offset
= offsetof(struct ext4_group_desc
, bg_checksum
);
3207 __le32 le_group
= cpu_to_le32(block_group
);
3208 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3210 if (ext4_has_metadata_csum(sbi
->s_sb
)) {
3211 /* Use new metadata_csum algorithm */
3213 __u16 dummy_csum
= 0;
3215 csum32
= ext4_chksum(sbi
, sbi
->s_csum_seed
, (__u8
*)&le_group
,
3217 csum32
= ext4_chksum(sbi
, csum32
, (__u8
*)gdp
, offset
);
3218 csum32
= ext4_chksum(sbi
, csum32
, (__u8
*)&dummy_csum
,
3219 sizeof(dummy_csum
));
3220 offset
+= sizeof(dummy_csum
);
3221 if (offset
< sbi
->s_desc_size
)
3222 csum32
= ext4_chksum(sbi
, csum32
, (__u8
*)gdp
+ offset
,
3223 sbi
->s_desc_size
- offset
);
3225 crc
= csum32
& 0xFFFF;
3229 /* old crc16 code */
3230 if (!ext4_has_feature_gdt_csum(sb
))
3233 crc
= crc16(~0, sbi
->s_es
->s_uuid
, sizeof(sbi
->s_es
->s_uuid
));
3234 crc
= crc16(crc
, (__u8
*)&le_group
, sizeof(le_group
));
3235 crc
= crc16(crc
, (__u8
*)gdp
, offset
);
3236 offset
+= sizeof(gdp
->bg_checksum
); /* skip checksum */
3237 /* for checksum of struct ext4_group_desc do the rest...*/
3238 if (ext4_has_feature_64bit(sb
) && offset
< sbi
->s_desc_size
)
3239 crc
= crc16(crc
, (__u8
*)gdp
+ offset
,
3240 sbi
->s_desc_size
- offset
);
3243 return cpu_to_le16(crc
);
3246 int ext4_group_desc_csum_verify(struct super_block
*sb
, __u32 block_group
,
3247 struct ext4_group_desc
*gdp
)
3249 if (ext4_has_group_desc_csum(sb
) &&
3250 (gdp
->bg_checksum
!= ext4_group_desc_csum(sb
, block_group
, gdp
)))
3256 void ext4_group_desc_csum_set(struct super_block
*sb
, __u32 block_group
,
3257 struct ext4_group_desc
*gdp
)
3259 if (!ext4_has_group_desc_csum(sb
))
3261 gdp
->bg_checksum
= ext4_group_desc_csum(sb
, block_group
, gdp
);
3264 /* Called at mount-time, super-block is locked */
3265 static int ext4_check_descriptors(struct super_block
*sb
,
3266 ext4_fsblk_t sb_block
,
3267 ext4_group_t
*first_not_zeroed
)
3269 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3270 ext4_fsblk_t first_block
= le32_to_cpu(sbi
->s_es
->s_first_data_block
);
3271 ext4_fsblk_t last_block
;
3272 ext4_fsblk_t last_bg_block
= sb_block
+ ext4_bg_num_gdb(sb
, 0);
3273 ext4_fsblk_t block_bitmap
;
3274 ext4_fsblk_t inode_bitmap
;
3275 ext4_fsblk_t inode_table
;
3276 int flexbg_flag
= 0;
3277 ext4_group_t i
, grp
= sbi
->s_groups_count
;
3279 if (ext4_has_feature_flex_bg(sb
))
3282 ext4_debug("Checking group descriptors");
3284 for (i
= 0; i
< sbi
->s_groups_count
; i
++) {
3285 struct ext4_group_desc
*gdp
= ext4_get_group_desc(sb
, i
, NULL
);
3287 if (i
== sbi
->s_groups_count
- 1 || flexbg_flag
)
3288 last_block
= ext4_blocks_count(sbi
->s_es
) - 1;
3290 last_block
= first_block
+
3291 (EXT4_BLOCKS_PER_GROUP(sb
) - 1);
3293 if ((grp
== sbi
->s_groups_count
) &&
3294 !(gdp
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_ZEROED
)))
3297 block_bitmap
= ext4_block_bitmap(sb
, gdp
);
3298 if (block_bitmap
== sb_block
) {
3299 ext4_msg(sb
, KERN_ERR
, "ext4_check_descriptors: "
3300 "Block bitmap for group %u overlaps "
3305 if (block_bitmap
>= sb_block
+ 1 &&
3306 block_bitmap
<= last_bg_block
) {
3307 ext4_msg(sb
, KERN_ERR
, "ext4_check_descriptors: "
3308 "Block bitmap for group %u overlaps "
3309 "block group descriptors", i
);
3313 if (block_bitmap
< first_block
|| block_bitmap
> last_block
) {
3314 ext4_msg(sb
, KERN_ERR
, "ext4_check_descriptors: "
3315 "Block bitmap for group %u not in group "
3316 "(block %llu)!", i
, block_bitmap
);
3319 inode_bitmap
= ext4_inode_bitmap(sb
, gdp
);
3320 if (inode_bitmap
== sb_block
) {
3321 ext4_msg(sb
, KERN_ERR
, "ext4_check_descriptors: "
3322 "Inode bitmap for group %u overlaps "
3327 if (inode_bitmap
>= sb_block
+ 1 &&
3328 inode_bitmap
<= last_bg_block
) {
3329 ext4_msg(sb
, KERN_ERR
, "ext4_check_descriptors: "
3330 "Inode bitmap for group %u overlaps "
3331 "block group descriptors", i
);
3335 if (inode_bitmap
< first_block
|| inode_bitmap
> last_block
) {
3336 ext4_msg(sb
, KERN_ERR
, "ext4_check_descriptors: "
3337 "Inode bitmap for group %u not in group "
3338 "(block %llu)!", i
, inode_bitmap
);
3341 inode_table
= ext4_inode_table(sb
, gdp
);
3342 if (inode_table
== sb_block
) {
3343 ext4_msg(sb
, KERN_ERR
, "ext4_check_descriptors: "
3344 "Inode table for group %u overlaps "
3349 if (inode_table
>= sb_block
+ 1 &&
3350 inode_table
<= last_bg_block
) {
3351 ext4_msg(sb
, KERN_ERR
, "ext4_check_descriptors: "
3352 "Inode table for group %u overlaps "
3353 "block group descriptors", i
);
3357 if (inode_table
< first_block
||
3358 inode_table
+ sbi
->s_itb_per_group
- 1 > last_block
) {
3359 ext4_msg(sb
, KERN_ERR
, "ext4_check_descriptors: "
3360 "Inode table for group %u not in group "
3361 "(block %llu)!", i
, inode_table
);
3364 ext4_lock_group(sb
, i
);
3365 if (!ext4_group_desc_csum_verify(sb
, i
, gdp
)) {
3366 ext4_msg(sb
, KERN_ERR
, "ext4_check_descriptors: "
3367 "Checksum for group %u failed (%u!=%u)",
3368 i
, le16_to_cpu(ext4_group_desc_csum(sb
, i
,
3369 gdp
)), le16_to_cpu(gdp
->bg_checksum
));
3370 if (!sb_rdonly(sb
)) {
3371 ext4_unlock_group(sb
, i
);
3375 ext4_unlock_group(sb
, i
);
3377 first_block
+= EXT4_BLOCKS_PER_GROUP(sb
);
3379 if (NULL
!= first_not_zeroed
)
3380 *first_not_zeroed
= grp
;
3385 * Maximal extent format file size.
3386 * Resulting logical blkno at s_maxbytes must fit in our on-disk
3387 * extent format containers, within a sector_t, and within i_blocks
3388 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
3389 * so that won't be a limiting factor.
3391 * However there is other limiting factor. We do store extents in the form
3392 * of starting block and length, hence the resulting length of the extent
3393 * covering maximum file size must fit into on-disk format containers as
3394 * well. Given that length is always by 1 unit bigger than max unit (because
3395 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
3397 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
3399 static loff_t
ext4_max_size(int blkbits
, int has_huge_files
)
3402 loff_t upper_limit
= MAX_LFS_FILESIZE
;
3404 BUILD_BUG_ON(sizeof(blkcnt_t
) < sizeof(u64
));
3406 if (!has_huge_files
) {
3407 upper_limit
= (1LL << 32) - 1;
3409 /* total blocks in file system block size */
3410 upper_limit
>>= (blkbits
- 9);
3411 upper_limit
<<= blkbits
;
3415 * 32-bit extent-start container, ee_block. We lower the maxbytes
3416 * by one fs block, so ee_len can cover the extent of maximum file
3419 res
= (1LL << 32) - 1;
3422 /* Sanity check against vm- & vfs- imposed limits */
3423 if (res
> upper_limit
)
3430 * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
3431 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
3432 * We need to be 1 filesystem block less than the 2^48 sector limit.
3434 static loff_t
ext4_max_bitmap_size(int bits
, int has_huge_files
)
3436 loff_t upper_limit
, res
= EXT4_NDIR_BLOCKS
;
3438 unsigned int ppb
= 1 << (bits
- 2);
3441 * This is calculated to be the largest file size for a dense, block
3442 * mapped file such that the file's total number of 512-byte sectors,
3443 * including data and all indirect blocks, does not exceed (2^48 - 1).
3445 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
3446 * number of 512-byte sectors of the file.
3448 if (!has_huge_files
) {
3450 * !has_huge_files or implies that the inode i_block field
3451 * represents total file blocks in 2^32 512-byte sectors ==
3452 * size of vfs inode i_blocks * 8
3454 upper_limit
= (1LL << 32) - 1;
3456 /* total blocks in file system block size */
3457 upper_limit
>>= (bits
- 9);
3461 * We use 48 bit ext4_inode i_blocks
3462 * With EXT4_HUGE_FILE_FL set the i_blocks
3463 * represent total number of blocks in
3464 * file system block size
3466 upper_limit
= (1LL << 48) - 1;
3470 /* Compute how many blocks we can address by block tree */
3473 res
+= ((loff_t
)ppb
) * ppb
* ppb
;
3474 /* Compute how many metadata blocks are needed */
3476 meta_blocks
+= 1 + ppb
;
3477 meta_blocks
+= 1 + ppb
+ ppb
* ppb
;
3478 /* Does block tree limit file size? */
3479 if (res
+ meta_blocks
<= upper_limit
)
3483 /* How many metadata blocks are needed for addressing upper_limit? */
3484 upper_limit
-= EXT4_NDIR_BLOCKS
;
3485 /* indirect blocks */
3488 /* double indirect blocks */
3489 if (upper_limit
< ppb
* ppb
) {
3490 meta_blocks
+= 1 + DIV_ROUND_UP_ULL(upper_limit
, ppb
);
3494 meta_blocks
+= 1 + ppb
;
3495 upper_limit
-= ppb
* ppb
;
3496 /* tripple indirect blocks for the rest */
3497 meta_blocks
+= 1 + DIV_ROUND_UP_ULL(upper_limit
, ppb
) +
3498 DIV_ROUND_UP_ULL(upper_limit
, ppb
*ppb
);
3502 if (res
> MAX_LFS_FILESIZE
)
3503 res
= MAX_LFS_FILESIZE
;
3508 static ext4_fsblk_t
descriptor_loc(struct super_block
*sb
,
3509 ext4_fsblk_t logical_sb_block
, int nr
)
3511 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3512 ext4_group_t bg
, first_meta_bg
;
3515 first_meta_bg
= le32_to_cpu(sbi
->s_es
->s_first_meta_bg
);
3517 if (!ext4_has_feature_meta_bg(sb
) || nr
< first_meta_bg
)
3518 return logical_sb_block
+ nr
+ 1;
3519 bg
= sbi
->s_desc_per_block
* nr
;
3520 if (ext4_bg_has_super(sb
, bg
))
3524 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
3525 * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled
3526 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
3529 if (sb
->s_blocksize
== 1024 && nr
== 0 &&
3530 le32_to_cpu(sbi
->s_es
->s_first_data_block
) == 0)
3533 return (has_super
+ ext4_group_first_block_no(sb
, bg
));
3537 * ext4_get_stripe_size: Get the stripe size.
3538 * @sbi: In memory super block info
3540 * If we have specified it via mount option, then
3541 * use the mount option value. If the value specified at mount time is
3542 * greater than the blocks per group use the super block value.
3543 * If the super block value is greater than blocks per group return 0.
3544 * Allocator needs it be less than blocks per group.
3547 static unsigned long ext4_get_stripe_size(struct ext4_sb_info
*sbi
)
3549 unsigned long stride
= le16_to_cpu(sbi
->s_es
->s_raid_stride
);
3550 unsigned long stripe_width
=
3551 le32_to_cpu(sbi
->s_es
->s_raid_stripe_width
);
3554 if (sbi
->s_stripe
&& sbi
->s_stripe
<= sbi
->s_blocks_per_group
)
3555 ret
= sbi
->s_stripe
;
3556 else if (stripe_width
&& stripe_width
<= sbi
->s_blocks_per_group
)
3558 else if (stride
&& stride
<= sbi
->s_blocks_per_group
)
3564 * If the stripe width is 1, this makes no sense and
3565 * we set it to 0 to turn off stripe handling code.
3574 * Check whether this filesystem can be mounted based on
3575 * the features present and the RDONLY/RDWR mount requested.
3576 * Returns 1 if this filesystem can be mounted as requested,
3577 * 0 if it cannot be.
3579 int ext4_feature_set_ok(struct super_block
*sb
, int readonly
)
3581 if (ext4_has_unknown_ext4_incompat_features(sb
)) {
3582 ext4_msg(sb
, KERN_ERR
,
3583 "Couldn't mount because of "
3584 "unsupported optional features (%x)",
3585 (le32_to_cpu(EXT4_SB(sb
)->s_es
->s_feature_incompat
) &
3586 ~EXT4_FEATURE_INCOMPAT_SUPP
));
3590 if (!IS_ENABLED(CONFIG_UNICODE
) && ext4_has_feature_casefold(sb
)) {
3591 ext4_msg(sb
, KERN_ERR
,
3592 "Filesystem with casefold feature cannot be "
3593 "mounted without CONFIG_UNICODE");
3600 if (ext4_has_feature_readonly(sb
)) {
3601 ext4_msg(sb
, KERN_INFO
, "filesystem is read-only");
3602 sb
->s_flags
|= SB_RDONLY
;
3606 /* Check that feature set is OK for a read-write mount */
3607 if (ext4_has_unknown_ext4_ro_compat_features(sb
)) {
3608 ext4_msg(sb
, KERN_ERR
, "couldn't mount RDWR because of "
3609 "unsupported optional features (%x)",
3610 (le32_to_cpu(EXT4_SB(sb
)->s_es
->s_feature_ro_compat
) &
3611 ~EXT4_FEATURE_RO_COMPAT_SUPP
));
3614 if (ext4_has_feature_bigalloc(sb
) && !ext4_has_feature_extents(sb
)) {
3615 ext4_msg(sb
, KERN_ERR
,
3616 "Can't support bigalloc feature without "
3617 "extents feature\n");
3621 #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
3622 if (!readonly
&& (ext4_has_feature_quota(sb
) ||
3623 ext4_has_feature_project(sb
))) {
3624 ext4_msg(sb
, KERN_ERR
,
3625 "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
3628 #endif /* CONFIG_QUOTA */
3633 * This function is called once a day if we have errors logged
3634 * on the file system
3636 static void print_daily_error_info(struct timer_list
*t
)
3638 struct ext4_sb_info
*sbi
= from_timer(sbi
, t
, s_err_report
);
3639 struct super_block
*sb
= sbi
->s_sb
;
3640 struct ext4_super_block
*es
= sbi
->s_es
;
3642 if (es
->s_error_count
)
3643 /* fsck newer than v1.41.13 is needed to clean this condition. */
3644 ext4_msg(sb
, KERN_NOTICE
, "error count since last fsck: %u",
3645 le32_to_cpu(es
->s_error_count
));
3646 if (es
->s_first_error_time
) {
3647 printk(KERN_NOTICE
"EXT4-fs (%s): initial error at time %llu: %.*s:%d",
3649 ext4_get_tstamp(es
, s_first_error_time
),
3650 (int) sizeof(es
->s_first_error_func
),
3651 es
->s_first_error_func
,
3652 le32_to_cpu(es
->s_first_error_line
));
3653 if (es
->s_first_error_ino
)
3654 printk(KERN_CONT
": inode %u",
3655 le32_to_cpu(es
->s_first_error_ino
));
3656 if (es
->s_first_error_block
)
3657 printk(KERN_CONT
": block %llu", (unsigned long long)
3658 le64_to_cpu(es
->s_first_error_block
));
3659 printk(KERN_CONT
"\n");
3661 if (es
->s_last_error_time
) {
3662 printk(KERN_NOTICE
"EXT4-fs (%s): last error at time %llu: %.*s:%d",
3664 ext4_get_tstamp(es
, s_last_error_time
),
3665 (int) sizeof(es
->s_last_error_func
),
3666 es
->s_last_error_func
,
3667 le32_to_cpu(es
->s_last_error_line
));
3668 if (es
->s_last_error_ino
)
3669 printk(KERN_CONT
": inode %u",
3670 le32_to_cpu(es
->s_last_error_ino
));
3671 if (es
->s_last_error_block
)
3672 printk(KERN_CONT
": block %llu", (unsigned long long)
3673 le64_to_cpu(es
->s_last_error_block
));
3674 printk(KERN_CONT
"\n");
3676 mod_timer(&sbi
->s_err_report
, jiffies
+ 24*60*60*HZ
); /* Once a day */
3679 /* Find next suitable group and run ext4_init_inode_table */
3680 static int ext4_run_li_request(struct ext4_li_request
*elr
)
3682 struct ext4_group_desc
*gdp
= NULL
;
3683 struct super_block
*sb
= elr
->lr_super
;
3684 ext4_group_t ngroups
= EXT4_SB(sb
)->s_groups_count
;
3685 ext4_group_t group
= elr
->lr_next_group
;
3686 unsigned int prefetch_ios
= 0;
3688 int nr
= EXT4_SB(sb
)->s_mb_prefetch
;
3691 if (elr
->lr_mode
== EXT4_LI_MODE_PREFETCH_BBITMAP
) {
3692 elr
->lr_next_group
= ext4_mb_prefetch(sb
, group
, nr
, &prefetch_ios
);
3693 ext4_mb_prefetch_fini(sb
, elr
->lr_next_group
, nr
);
3694 trace_ext4_prefetch_bitmaps(sb
, group
, elr
->lr_next_group
, nr
);
3695 if (group
>= elr
->lr_next_group
) {
3697 if (elr
->lr_first_not_zeroed
!= ngroups
&&
3698 !sb_rdonly(sb
) && test_opt(sb
, INIT_INODE_TABLE
)) {
3699 elr
->lr_next_group
= elr
->lr_first_not_zeroed
;
3700 elr
->lr_mode
= EXT4_LI_MODE_ITABLE
;
3707 for (; group
< ngroups
; group
++) {
3708 gdp
= ext4_get_group_desc(sb
, group
, NULL
);
3714 if (!(gdp
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_ZEROED
)))
3718 if (group
>= ngroups
)
3722 start_time
= ktime_get_ns();
3723 ret
= ext4_init_inode_table(sb
, group
,
3724 elr
->lr_timeout
? 0 : 1);
3725 trace_ext4_lazy_itable_init(sb
, group
);
3726 if (elr
->lr_timeout
== 0) {
3727 elr
->lr_timeout
= nsecs_to_jiffies((ktime_get_ns() - start_time
) *
3728 EXT4_SB(elr
->lr_super
)->s_li_wait_mult
);
3730 elr
->lr_next_sched
= jiffies
+ elr
->lr_timeout
;
3731 elr
->lr_next_group
= group
+ 1;
3737 * Remove lr_request from the list_request and free the
3738 * request structure. Should be called with li_list_mtx held
3740 static void ext4_remove_li_request(struct ext4_li_request
*elr
)
3745 list_del(&elr
->lr_request
);
3746 EXT4_SB(elr
->lr_super
)->s_li_request
= NULL
;
3750 static void ext4_unregister_li_request(struct super_block
*sb
)
3752 mutex_lock(&ext4_li_mtx
);
3753 if (!ext4_li_info
) {
3754 mutex_unlock(&ext4_li_mtx
);
3758 mutex_lock(&ext4_li_info
->li_list_mtx
);
3759 ext4_remove_li_request(EXT4_SB(sb
)->s_li_request
);
3760 mutex_unlock(&ext4_li_info
->li_list_mtx
);
3761 mutex_unlock(&ext4_li_mtx
);
3764 static struct task_struct
*ext4_lazyinit_task
;
3767 * This is the function where ext4lazyinit thread lives. It walks
3768 * through the request list searching for next scheduled filesystem.
3769 * When such a fs is found, run the lazy initialization request
3770 * (ext4_rn_li_request) and keep track of the time spend in this
3771 * function. Based on that time we compute next schedule time of
3772 * the request. When walking through the list is complete, compute
3773 * next waking time and put itself into sleep.
3775 static int ext4_lazyinit_thread(void *arg
)
3777 struct ext4_lazy_init
*eli
= arg
;
3778 struct list_head
*pos
, *n
;
3779 struct ext4_li_request
*elr
;
3780 unsigned long next_wakeup
, cur
;
3782 BUG_ON(NULL
== eli
);
3787 bool next_wakeup_initialized
= false;
3790 mutex_lock(&eli
->li_list_mtx
);
3791 if (list_empty(&eli
->li_request_list
)) {
3792 mutex_unlock(&eli
->li_list_mtx
);
3795 list_for_each_safe(pos
, n
, &eli
->li_request_list
) {
3798 elr
= list_entry(pos
, struct ext4_li_request
,
3801 if (time_before(jiffies
, elr
->lr_next_sched
)) {
3802 if (!next_wakeup_initialized
||
3803 time_before(elr
->lr_next_sched
, next_wakeup
)) {
3804 next_wakeup
= elr
->lr_next_sched
;
3805 next_wakeup_initialized
= true;
3809 if (down_read_trylock(&elr
->lr_super
->s_umount
)) {
3810 if (sb_start_write_trylock(elr
->lr_super
)) {
3813 * We hold sb->s_umount, sb can not
3814 * be removed from the list, it is
3815 * now safe to drop li_list_mtx
3817 mutex_unlock(&eli
->li_list_mtx
);
3818 err
= ext4_run_li_request(elr
);
3819 sb_end_write(elr
->lr_super
);
3820 mutex_lock(&eli
->li_list_mtx
);
3823 up_read((&elr
->lr_super
->s_umount
));
3825 /* error, remove the lazy_init job */
3827 ext4_remove_li_request(elr
);
3831 elr
->lr_next_sched
= jiffies
+
3832 get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY
* HZ
);
3834 if (!next_wakeup_initialized
||
3835 time_before(elr
->lr_next_sched
, next_wakeup
)) {
3836 next_wakeup
= elr
->lr_next_sched
;
3837 next_wakeup_initialized
= true;
3840 mutex_unlock(&eli
->li_list_mtx
);
3845 if (!next_wakeup_initialized
|| time_after_eq(cur
, next_wakeup
)) {
3850 schedule_timeout_interruptible(next_wakeup
- cur
);
3852 if (kthread_should_stop()) {
3853 ext4_clear_request_list();
3860 * It looks like the request list is empty, but we need
3861 * to check it under the li_list_mtx lock, to prevent any
3862 * additions into it, and of course we should lock ext4_li_mtx
3863 * to atomically free the list and ext4_li_info, because at
3864 * this point another ext4 filesystem could be registering
3867 mutex_lock(&ext4_li_mtx
);
3868 mutex_lock(&eli
->li_list_mtx
);
3869 if (!list_empty(&eli
->li_request_list
)) {
3870 mutex_unlock(&eli
->li_list_mtx
);
3871 mutex_unlock(&ext4_li_mtx
);
3874 mutex_unlock(&eli
->li_list_mtx
);
3875 kfree(ext4_li_info
);
3876 ext4_li_info
= NULL
;
3877 mutex_unlock(&ext4_li_mtx
);
3882 static void ext4_clear_request_list(void)
3884 struct list_head
*pos
, *n
;
3885 struct ext4_li_request
*elr
;
3887 mutex_lock(&ext4_li_info
->li_list_mtx
);
3888 list_for_each_safe(pos
, n
, &ext4_li_info
->li_request_list
) {
3889 elr
= list_entry(pos
, struct ext4_li_request
,
3891 ext4_remove_li_request(elr
);
3893 mutex_unlock(&ext4_li_info
->li_list_mtx
);
3896 static int ext4_run_lazyinit_thread(void)
3898 ext4_lazyinit_task
= kthread_run(ext4_lazyinit_thread
,
3899 ext4_li_info
, "ext4lazyinit");
3900 if (IS_ERR(ext4_lazyinit_task
)) {
3901 int err
= PTR_ERR(ext4_lazyinit_task
);
3902 ext4_clear_request_list();
3903 kfree(ext4_li_info
);
3904 ext4_li_info
= NULL
;
3905 printk(KERN_CRIT
"EXT4-fs: error %d creating inode table "
3906 "initialization thread\n",
3910 ext4_li_info
->li_state
|= EXT4_LAZYINIT_RUNNING
;
3915 * Check whether it make sense to run itable init. thread or not.
3916 * If there is at least one uninitialized inode table, return
3917 * corresponding group number, else the loop goes through all
3918 * groups and return total number of groups.
3920 static ext4_group_t
ext4_has_uninit_itable(struct super_block
*sb
)
3922 ext4_group_t group
, ngroups
= EXT4_SB(sb
)->s_groups_count
;
3923 struct ext4_group_desc
*gdp
= NULL
;
3925 if (!ext4_has_group_desc_csum(sb
))
3928 for (group
= 0; group
< ngroups
; group
++) {
3929 gdp
= ext4_get_group_desc(sb
, group
, NULL
);
3933 if (!(gdp
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_ZEROED
)))
3940 static int ext4_li_info_new(void)
3942 struct ext4_lazy_init
*eli
= NULL
;
3944 eli
= kzalloc(sizeof(*eli
), GFP_KERNEL
);
3948 INIT_LIST_HEAD(&eli
->li_request_list
);
3949 mutex_init(&eli
->li_list_mtx
);
3951 eli
->li_state
|= EXT4_LAZYINIT_QUIT
;
3958 static struct ext4_li_request
*ext4_li_request_new(struct super_block
*sb
,
3961 struct ext4_li_request
*elr
;
3963 elr
= kzalloc(sizeof(*elr
), GFP_KERNEL
);
3968 elr
->lr_first_not_zeroed
= start
;
3969 if (test_opt(sb
, NO_PREFETCH_BLOCK_BITMAPS
)) {
3970 elr
->lr_mode
= EXT4_LI_MODE_ITABLE
;
3971 elr
->lr_next_group
= start
;
3973 elr
->lr_mode
= EXT4_LI_MODE_PREFETCH_BBITMAP
;
3977 * Randomize first schedule time of the request to
3978 * spread the inode table initialization requests
3981 elr
->lr_next_sched
= jiffies
+ get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY
* HZ
);
3985 int ext4_register_li_request(struct super_block
*sb
,
3986 ext4_group_t first_not_zeroed
)
3988 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3989 struct ext4_li_request
*elr
= NULL
;
3990 ext4_group_t ngroups
= sbi
->s_groups_count
;
3993 mutex_lock(&ext4_li_mtx
);
3994 if (sbi
->s_li_request
!= NULL
) {
3996 * Reset timeout so it can be computed again, because
3997 * s_li_wait_mult might have changed.
3999 sbi
->s_li_request
->lr_timeout
= 0;
4003 if (sb_rdonly(sb
) ||
4004 (test_opt(sb
, NO_PREFETCH_BLOCK_BITMAPS
) &&
4005 (first_not_zeroed
== ngroups
|| !test_opt(sb
, INIT_INODE_TABLE
))))
4008 elr
= ext4_li_request_new(sb
, first_not_zeroed
);
4014 if (NULL
== ext4_li_info
) {
4015 ret
= ext4_li_info_new();
4020 mutex_lock(&ext4_li_info
->li_list_mtx
);
4021 list_add(&elr
->lr_request
, &ext4_li_info
->li_request_list
);
4022 mutex_unlock(&ext4_li_info
->li_list_mtx
);
4024 sbi
->s_li_request
= elr
;
4026 * set elr to NULL here since it has been inserted to
4027 * the request_list and the removal and free of it is
4028 * handled by ext4_clear_request_list from now on.
4032 if (!(ext4_li_info
->li_state
& EXT4_LAZYINIT_RUNNING
)) {
4033 ret
= ext4_run_lazyinit_thread();
4038 mutex_unlock(&ext4_li_mtx
);
4045 * We do not need to lock anything since this is called on
4048 static void ext4_destroy_lazyinit_thread(void)
4051 * If thread exited earlier
4052 * there's nothing to be done.
4054 if (!ext4_li_info
|| !ext4_lazyinit_task
)
4057 kthread_stop(ext4_lazyinit_task
);
4060 static int set_journal_csum_feature_set(struct super_block
*sb
)
4063 int compat
, incompat
;
4064 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4066 if (ext4_has_metadata_csum(sb
)) {
4067 /* journal checksum v3 */
4069 incompat
= JBD2_FEATURE_INCOMPAT_CSUM_V3
;
4071 /* journal checksum v1 */
4072 compat
= JBD2_FEATURE_COMPAT_CHECKSUM
;
4076 jbd2_journal_clear_features(sbi
->s_journal
,
4077 JBD2_FEATURE_COMPAT_CHECKSUM
, 0,
4078 JBD2_FEATURE_INCOMPAT_CSUM_V3
|
4079 JBD2_FEATURE_INCOMPAT_CSUM_V2
);
4080 if (test_opt(sb
, JOURNAL_ASYNC_COMMIT
)) {
4081 ret
= jbd2_journal_set_features(sbi
->s_journal
,
4083 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT
|
4085 } else if (test_opt(sb
, JOURNAL_CHECKSUM
)) {
4086 ret
= jbd2_journal_set_features(sbi
->s_journal
,
4089 jbd2_journal_clear_features(sbi
->s_journal
, 0, 0,
4090 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT
);
4092 jbd2_journal_clear_features(sbi
->s_journal
, 0, 0,
4093 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT
);
4100 * Note: calculating the overhead so we can be compatible with
4101 * historical BSD practice is quite difficult in the face of
4102 * clusters/bigalloc. This is because multiple metadata blocks from
4103 * different block group can end up in the same allocation cluster.
4104 * Calculating the exact overhead in the face of clustered allocation
4105 * requires either O(all block bitmaps) in memory or O(number of block
4106 * groups**2) in time. We will still calculate the superblock for
4107 * older file systems --- and if we come across with a bigalloc file
4108 * system with zero in s_overhead_clusters the estimate will be close to
4109 * correct especially for very large cluster sizes --- but for newer
4110 * file systems, it's better to calculate this figure once at mkfs
4111 * time, and store it in the superblock. If the superblock value is
4112 * present (even for non-bigalloc file systems), we will use it.
4114 static int count_overhead(struct super_block
*sb
, ext4_group_t grp
,
4117 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4118 struct ext4_group_desc
*gdp
;
4119 ext4_fsblk_t first_block
, last_block
, b
;
4120 ext4_group_t i
, ngroups
= ext4_get_groups_count(sb
);
4121 int s
, j
, count
= 0;
4122 int has_super
= ext4_bg_has_super(sb
, grp
);
4124 if (!ext4_has_feature_bigalloc(sb
))
4125 return (has_super
+ ext4_bg_num_gdb(sb
, grp
) +
4126 (has_super
? le16_to_cpu(sbi
->s_es
->s_reserved_gdt_blocks
) : 0) +
4127 sbi
->s_itb_per_group
+ 2);
4129 first_block
= le32_to_cpu(sbi
->s_es
->s_first_data_block
) +
4130 (grp
* EXT4_BLOCKS_PER_GROUP(sb
));
4131 last_block
= first_block
+ EXT4_BLOCKS_PER_GROUP(sb
) - 1;
4132 for (i
= 0; i
< ngroups
; i
++) {
4133 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
4134 b
= ext4_block_bitmap(sb
, gdp
);
4135 if (b
>= first_block
&& b
<= last_block
) {
4136 ext4_set_bit(EXT4_B2C(sbi
, b
- first_block
), buf
);
4139 b
= ext4_inode_bitmap(sb
, gdp
);
4140 if (b
>= first_block
&& b
<= last_block
) {
4141 ext4_set_bit(EXT4_B2C(sbi
, b
- first_block
), buf
);
4144 b
= ext4_inode_table(sb
, gdp
);
4145 if (b
>= first_block
&& b
+ sbi
->s_itb_per_group
<= last_block
)
4146 for (j
= 0; j
< sbi
->s_itb_per_group
; j
++, b
++) {
4147 int c
= EXT4_B2C(sbi
, b
- first_block
);
4148 ext4_set_bit(c
, buf
);
4154 if (ext4_bg_has_super(sb
, grp
)) {
4155 ext4_set_bit(s
++, buf
);
4158 j
= ext4_bg_num_gdb(sb
, grp
);
4159 if (s
+ j
> EXT4_BLOCKS_PER_GROUP(sb
)) {
4160 ext4_error(sb
, "Invalid number of block group "
4161 "descriptor blocks: %d", j
);
4162 j
= EXT4_BLOCKS_PER_GROUP(sb
) - s
;
4166 ext4_set_bit(EXT4_B2C(sbi
, s
++), buf
);
4170 return EXT4_CLUSTERS_PER_GROUP(sb
) -
4171 ext4_count_free(buf
, EXT4_CLUSTERS_PER_GROUP(sb
) / 8);
4175 * Compute the overhead and stash it in sbi->s_overhead
4177 int ext4_calculate_overhead(struct super_block
*sb
)
4179 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4180 struct ext4_super_block
*es
= sbi
->s_es
;
4181 struct inode
*j_inode
;
4182 unsigned int j_blocks
, j_inum
= le32_to_cpu(es
->s_journal_inum
);
4183 ext4_group_t i
, ngroups
= ext4_get_groups_count(sb
);
4184 ext4_fsblk_t overhead
= 0;
4185 char *buf
= (char *) get_zeroed_page(GFP_NOFS
);
4191 * Compute the overhead (FS structures). This is constant
4192 * for a given filesystem unless the number of block groups
4193 * changes so we cache the previous value until it does.
4197 * All of the blocks before first_data_block are overhead
4199 overhead
= EXT4_B2C(sbi
, le32_to_cpu(es
->s_first_data_block
));
4202 * Add the overhead found in each block group
4204 for (i
= 0; i
< ngroups
; i
++) {
4207 blks
= count_overhead(sb
, i
, buf
);
4210 memset(buf
, 0, PAGE_SIZE
);
4215 * Add the internal journal blocks whether the journal has been
4218 if (sbi
->s_journal
&& !sbi
->s_journal_bdev_file
)
4219 overhead
+= EXT4_NUM_B2C(sbi
, sbi
->s_journal
->j_total_len
);
4220 else if (ext4_has_feature_journal(sb
) && !sbi
->s_journal
&& j_inum
) {
4221 /* j_inum for internal journal is non-zero */
4222 j_inode
= ext4_get_journal_inode(sb
, j_inum
);
4223 if (!IS_ERR(j_inode
)) {
4224 j_blocks
= j_inode
->i_size
>> sb
->s_blocksize_bits
;
4225 overhead
+= EXT4_NUM_B2C(sbi
, j_blocks
);
4228 ext4_msg(sb
, KERN_ERR
, "can't get journal size");
4231 sbi
->s_overhead
= overhead
;
4233 free_page((unsigned long) buf
);
4237 static void ext4_set_resv_clusters(struct super_block
*sb
)
4239 ext4_fsblk_t resv_clusters
;
4240 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4243 * There's no need to reserve anything when we aren't using extents.
4244 * The space estimates are exact, there are no unwritten extents,
4245 * hole punching doesn't need new metadata... This is needed especially
4246 * to keep ext2/3 backward compatibility.
4248 if (!ext4_has_feature_extents(sb
))
4251 * By default we reserve 2% or 4096 clusters, whichever is smaller.
4252 * This should cover the situations where we can not afford to run
4253 * out of space like for example punch hole, or converting
4254 * unwritten extents in delalloc path. In most cases such
4255 * allocation would require 1, or 2 blocks, higher numbers are
4258 resv_clusters
= (ext4_blocks_count(sbi
->s_es
) >>
4259 sbi
->s_cluster_bits
);
4261 do_div(resv_clusters
, 50);
4262 resv_clusters
= min_t(ext4_fsblk_t
, resv_clusters
, 4096);
4264 atomic64_set(&sbi
->s_resv_clusters
, resv_clusters
);
4267 static const char *ext4_quota_mode(struct super_block
*sb
)
4270 if (!ext4_quota_capable(sb
))
4273 if (EXT4_SB(sb
)->s_journal
&& ext4_is_quota_journalled(sb
))
4274 return "journalled";
4282 static void ext4_setup_csum_trigger(struct super_block
*sb
,
4283 enum ext4_journal_trigger_type type
,
4285 struct jbd2_buffer_trigger_type
*type
,
4286 struct buffer_head
*bh
,
4290 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4292 sbi
->s_journal_triggers
[type
].sb
= sb
;
4293 sbi
->s_journal_triggers
[type
].tr_triggers
.t_frozen
= trigger
;
4296 static void ext4_free_sbi(struct ext4_sb_info
*sbi
)
4301 kfree(sbi
->s_blockgroup_lock
);
4302 fs_put_dax(sbi
->s_daxdev
, NULL
);
4306 static struct ext4_sb_info
*ext4_alloc_sbi(struct super_block
*sb
)
4308 struct ext4_sb_info
*sbi
;
4310 sbi
= kzalloc(sizeof(*sbi
), GFP_KERNEL
);
4314 sbi
->s_daxdev
= fs_dax_get_by_bdev(sb
->s_bdev
, &sbi
->s_dax_part_off
,
4317 sbi
->s_blockgroup_lock
=
4318 kzalloc(sizeof(struct blockgroup_lock
), GFP_KERNEL
);
4320 if (!sbi
->s_blockgroup_lock
)
4323 sb
->s_fs_info
= sbi
;
4327 fs_put_dax(sbi
->s_daxdev
, NULL
);
4332 static void ext4_set_def_opts(struct super_block
*sb
,
4333 struct ext4_super_block
*es
)
4335 unsigned long def_mount_opts
;
4337 /* Set defaults before we parse the mount options */
4338 def_mount_opts
= le32_to_cpu(es
->s_default_mount_opts
);
4339 set_opt(sb
, INIT_INODE_TABLE
);
4340 if (def_mount_opts
& EXT4_DEFM_DEBUG
)
4342 if (def_mount_opts
& EXT4_DEFM_BSDGROUPS
)
4344 if (def_mount_opts
& EXT4_DEFM_UID16
)
4345 set_opt(sb
, NO_UID32
);
4346 /* xattr user namespace & acls are now defaulted on */
4347 set_opt(sb
, XATTR_USER
);
4348 #ifdef CONFIG_EXT4_FS_POSIX_ACL
4349 set_opt(sb
, POSIX_ACL
);
4351 if (ext4_has_feature_fast_commit(sb
))
4352 set_opt2(sb
, JOURNAL_FAST_COMMIT
);
4353 /* don't forget to enable journal_csum when metadata_csum is enabled. */
4354 if (ext4_has_metadata_csum(sb
))
4355 set_opt(sb
, JOURNAL_CHECKSUM
);
4357 if ((def_mount_opts
& EXT4_DEFM_JMODE
) == EXT4_DEFM_JMODE_DATA
)
4358 set_opt(sb
, JOURNAL_DATA
);
4359 else if ((def_mount_opts
& EXT4_DEFM_JMODE
) == EXT4_DEFM_JMODE_ORDERED
)
4360 set_opt(sb
, ORDERED_DATA
);
4361 else if ((def_mount_opts
& EXT4_DEFM_JMODE
) == EXT4_DEFM_JMODE_WBACK
)
4362 set_opt(sb
, WRITEBACK_DATA
);
4364 if (le16_to_cpu(es
->s_errors
) == EXT4_ERRORS_PANIC
)
4365 set_opt(sb
, ERRORS_PANIC
);
4366 else if (le16_to_cpu(es
->s_errors
) == EXT4_ERRORS_CONTINUE
)
4367 set_opt(sb
, ERRORS_CONT
);
4369 set_opt(sb
, ERRORS_RO
);
4370 /* block_validity enabled by default; disable with noblock_validity */
4371 set_opt(sb
, BLOCK_VALIDITY
);
4372 if (def_mount_opts
& EXT4_DEFM_DISCARD
)
4373 set_opt(sb
, DISCARD
);
4375 if ((def_mount_opts
& EXT4_DEFM_NOBARRIER
) == 0)
4376 set_opt(sb
, BARRIER
);
4379 * enable delayed allocation by default
4380 * Use -o nodelalloc to turn it off
4382 if (!IS_EXT3_SB(sb
) && !IS_EXT2_SB(sb
) &&
4383 ((def_mount_opts
& EXT4_DEFM_NODELALLOC
) == 0))
4384 set_opt(sb
, DELALLOC
);
4386 if (sb
->s_blocksize
<= PAGE_SIZE
)
4387 set_opt(sb
, DIOREAD_NOLOCK
);
4390 static int ext4_handle_clustersize(struct super_block
*sb
)
4392 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4393 struct ext4_super_block
*es
= sbi
->s_es
;
4396 /* Handle clustersize */
4397 clustersize
= BLOCK_SIZE
<< le32_to_cpu(es
->s_log_cluster_size
);
4398 if (ext4_has_feature_bigalloc(sb
)) {
4399 if (clustersize
< sb
->s_blocksize
) {
4400 ext4_msg(sb
, KERN_ERR
,
4401 "cluster size (%d) smaller than "
4402 "block size (%lu)", clustersize
, sb
->s_blocksize
);
4405 sbi
->s_cluster_bits
= le32_to_cpu(es
->s_log_cluster_size
) -
4406 le32_to_cpu(es
->s_log_block_size
);
4408 if (clustersize
!= sb
->s_blocksize
) {
4409 ext4_msg(sb
, KERN_ERR
,
4410 "fragment/cluster size (%d) != "
4411 "block size (%lu)", clustersize
, sb
->s_blocksize
);
4414 if (sbi
->s_blocks_per_group
> sb
->s_blocksize
* 8) {
4415 ext4_msg(sb
, KERN_ERR
,
4416 "#blocks per group too big: %lu",
4417 sbi
->s_blocks_per_group
);
4420 sbi
->s_cluster_bits
= 0;
4422 sbi
->s_clusters_per_group
= le32_to_cpu(es
->s_clusters_per_group
);
4423 if (sbi
->s_clusters_per_group
> sb
->s_blocksize
* 8) {
4424 ext4_msg(sb
, KERN_ERR
, "#clusters per group too big: %lu",
4425 sbi
->s_clusters_per_group
);
4428 if (sbi
->s_blocks_per_group
!=
4429 (sbi
->s_clusters_per_group
* (clustersize
/ sb
->s_blocksize
))) {
4430 ext4_msg(sb
, KERN_ERR
,
4431 "blocks per group (%lu) and clusters per group (%lu) inconsistent",
4432 sbi
->s_blocks_per_group
, sbi
->s_clusters_per_group
);
4435 sbi
->s_cluster_ratio
= clustersize
/ sb
->s_blocksize
;
4437 /* Do we have standard group size of clustersize * 8 blocks ? */
4438 if (sbi
->s_blocks_per_group
== clustersize
<< 3)
4439 set_opt2(sb
, STD_GROUP_SIZE
);
4445 * ext4_atomic_write_init: Initializes filesystem min & max atomic write units.
4447 * TODO: Later add support for bigalloc
4449 static void ext4_atomic_write_init(struct super_block
*sb
)
4451 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4452 struct block_device
*bdev
= sb
->s_bdev
;
4454 if (!bdev_can_atomic_write(bdev
))
4457 if (!ext4_has_feature_extents(sb
))
4460 sbi
->s_awu_min
= max(sb
->s_blocksize
,
4461 bdev_atomic_write_unit_min_bytes(bdev
));
4462 sbi
->s_awu_max
= min(sb
->s_blocksize
,
4463 bdev_atomic_write_unit_max_bytes(bdev
));
4464 if (sbi
->s_awu_min
&& sbi
->s_awu_max
&&
4465 sbi
->s_awu_min
<= sbi
->s_awu_max
) {
4466 ext4_msg(sb
, KERN_NOTICE
, "Supports (experimental) DIO atomic writes awu_min: %u, awu_max: %u",
4467 sbi
->s_awu_min
, sbi
->s_awu_max
);
4474 static void ext4_fast_commit_init(struct super_block
*sb
)
4476 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4478 /* Initialize fast commit stuff */
4479 atomic_set(&sbi
->s_fc_subtid
, 0);
4480 INIT_LIST_HEAD(&sbi
->s_fc_q
[FC_Q_MAIN
]);
4481 INIT_LIST_HEAD(&sbi
->s_fc_q
[FC_Q_STAGING
]);
4482 INIT_LIST_HEAD(&sbi
->s_fc_dentry_q
[FC_Q_MAIN
]);
4483 INIT_LIST_HEAD(&sbi
->s_fc_dentry_q
[FC_Q_STAGING
]);
4484 sbi
->s_fc_bytes
= 0;
4485 ext4_clear_mount_flag(sb
, EXT4_MF_FC_INELIGIBLE
);
4486 sbi
->s_fc_ineligible_tid
= 0;
4487 spin_lock_init(&sbi
->s_fc_lock
);
4488 memset(&sbi
->s_fc_stats
, 0, sizeof(sbi
->s_fc_stats
));
4489 sbi
->s_fc_replay_state
.fc_regions
= NULL
;
4490 sbi
->s_fc_replay_state
.fc_regions_size
= 0;
4491 sbi
->s_fc_replay_state
.fc_regions_used
= 0;
4492 sbi
->s_fc_replay_state
.fc_regions_valid
= 0;
4493 sbi
->s_fc_replay_state
.fc_modified_inodes
= NULL
;
4494 sbi
->s_fc_replay_state
.fc_modified_inodes_size
= 0;
4495 sbi
->s_fc_replay_state
.fc_modified_inodes_used
= 0;
4498 static int ext4_inode_info_init(struct super_block
*sb
,
4499 struct ext4_super_block
*es
)
4501 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4503 if (le32_to_cpu(es
->s_rev_level
) == EXT4_GOOD_OLD_REV
) {
4504 sbi
->s_inode_size
= EXT4_GOOD_OLD_INODE_SIZE
;
4505 sbi
->s_first_ino
= EXT4_GOOD_OLD_FIRST_INO
;
4507 sbi
->s_inode_size
= le16_to_cpu(es
->s_inode_size
);
4508 sbi
->s_first_ino
= le32_to_cpu(es
->s_first_ino
);
4509 if (sbi
->s_first_ino
< EXT4_GOOD_OLD_FIRST_INO
) {
4510 ext4_msg(sb
, KERN_ERR
, "invalid first ino: %u",
4514 if ((sbi
->s_inode_size
< EXT4_GOOD_OLD_INODE_SIZE
) ||
4515 (!is_power_of_2(sbi
->s_inode_size
)) ||
4516 (sbi
->s_inode_size
> sb
->s_blocksize
)) {
4517 ext4_msg(sb
, KERN_ERR
,
4518 "unsupported inode size: %d",
4520 ext4_msg(sb
, KERN_ERR
, "blocksize: %lu", sb
->s_blocksize
);
4524 * i_atime_extra is the last extra field available for
4525 * [acm]times in struct ext4_inode. Checking for that
4526 * field should suffice to ensure we have extra space
4529 if (sbi
->s_inode_size
>= offsetof(struct ext4_inode
, i_atime_extra
) +
4530 sizeof(((struct ext4_inode
*)0)->i_atime_extra
)) {
4531 sb
->s_time_gran
= 1;
4532 sb
->s_time_max
= EXT4_EXTRA_TIMESTAMP_MAX
;
4534 sb
->s_time_gran
= NSEC_PER_SEC
;
4535 sb
->s_time_max
= EXT4_NON_EXTRA_TIMESTAMP_MAX
;
4537 sb
->s_time_min
= EXT4_TIMESTAMP_MIN
;
4540 if (sbi
->s_inode_size
> EXT4_GOOD_OLD_INODE_SIZE
) {
4541 sbi
->s_want_extra_isize
= sizeof(struct ext4_inode
) -
4542 EXT4_GOOD_OLD_INODE_SIZE
;
4543 if (ext4_has_feature_extra_isize(sb
)) {
4544 unsigned v
, max
= (sbi
->s_inode_size
-
4545 EXT4_GOOD_OLD_INODE_SIZE
);
4547 v
= le16_to_cpu(es
->s_want_extra_isize
);
4549 ext4_msg(sb
, KERN_ERR
,
4550 "bad s_want_extra_isize: %d", v
);
4553 if (sbi
->s_want_extra_isize
< v
)
4554 sbi
->s_want_extra_isize
= v
;
4556 v
= le16_to_cpu(es
->s_min_extra_isize
);
4558 ext4_msg(sb
, KERN_ERR
,
4559 "bad s_min_extra_isize: %d", v
);
4562 if (sbi
->s_want_extra_isize
< v
)
4563 sbi
->s_want_extra_isize
= v
;
4570 #if IS_ENABLED(CONFIG_UNICODE)
4571 static int ext4_encoding_init(struct super_block
*sb
, struct ext4_super_block
*es
)
4573 const struct ext4_sb_encodings
*encoding_info
;
4574 struct unicode_map
*encoding
;
4575 __u16 encoding_flags
= le16_to_cpu(es
->s_encoding_flags
);
4577 if (!ext4_has_feature_casefold(sb
) || sb
->s_encoding
)
4580 encoding_info
= ext4_sb_read_encoding(es
);
4581 if (!encoding_info
) {
4582 ext4_msg(sb
, KERN_ERR
,
4583 "Encoding requested by superblock is unknown");
4587 encoding
= utf8_load(encoding_info
->version
);
4588 if (IS_ERR(encoding
)) {
4589 ext4_msg(sb
, KERN_ERR
,
4590 "can't mount with superblock charset: %s-%u.%u.%u "
4591 "not supported by the kernel. flags: 0x%x.",
4592 encoding_info
->name
,
4593 unicode_major(encoding_info
->version
),
4594 unicode_minor(encoding_info
->version
),
4595 unicode_rev(encoding_info
->version
),
4599 ext4_msg(sb
, KERN_INFO
,"Using encoding defined by superblock: "
4600 "%s-%u.%u.%u with flags 0x%hx", encoding_info
->name
,
4601 unicode_major(encoding_info
->version
),
4602 unicode_minor(encoding_info
->version
),
4603 unicode_rev(encoding_info
->version
),
4606 sb
->s_encoding
= encoding
;
4607 sb
->s_encoding_flags
= encoding_flags
;
4612 static inline int ext4_encoding_init(struct super_block
*sb
, struct ext4_super_block
*es
)
4618 static int ext4_init_metadata_csum(struct super_block
*sb
, struct ext4_super_block
*es
)
4620 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4622 /* Warn if metadata_csum and gdt_csum are both set. */
4623 if (ext4_has_feature_metadata_csum(sb
) &&
4624 ext4_has_feature_gdt_csum(sb
))
4625 ext4_warning(sb
, "metadata_csum and uninit_bg are "
4626 "redundant flags; please run fsck.");
4628 /* Check for a known checksum algorithm */
4629 if (!ext4_verify_csum_type(sb
, es
)) {
4630 ext4_msg(sb
, KERN_ERR
, "VFS: Found ext4 filesystem with "
4631 "unknown checksum algorithm.");
4634 ext4_setup_csum_trigger(sb
, EXT4_JTR_ORPHAN_FILE
,
4635 ext4_orphan_file_block_trigger
);
4637 /* Load the checksum driver */
4638 sbi
->s_chksum_driver
= crypto_alloc_shash("crc32c", 0, 0);
4639 if (IS_ERR(sbi
->s_chksum_driver
)) {
4640 int ret
= PTR_ERR(sbi
->s_chksum_driver
);
4641 ext4_msg(sb
, KERN_ERR
, "Cannot load crc32c driver.");
4642 sbi
->s_chksum_driver
= NULL
;
4646 /* Check superblock checksum */
4647 if (!ext4_superblock_csum_verify(sb
, es
)) {
4648 ext4_msg(sb
, KERN_ERR
, "VFS: Found ext4 filesystem with "
4649 "invalid superblock checksum. Run e2fsck?");
4653 /* Precompute checksum seed for all metadata */
4654 if (ext4_has_feature_csum_seed(sb
))
4655 sbi
->s_csum_seed
= le32_to_cpu(es
->s_checksum_seed
);
4656 else if (ext4_has_metadata_csum(sb
) || ext4_has_feature_ea_inode(sb
))
4657 sbi
->s_csum_seed
= ext4_chksum(sbi
, ~0, es
->s_uuid
,
4658 sizeof(es
->s_uuid
));
4662 static int ext4_check_feature_compatibility(struct super_block
*sb
,
4663 struct ext4_super_block
*es
,
4666 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4668 if (le32_to_cpu(es
->s_rev_level
) == EXT4_GOOD_OLD_REV
&&
4669 (ext4_has_compat_features(sb
) ||
4670 ext4_has_ro_compat_features(sb
) ||
4671 ext4_has_incompat_features(sb
)))
4672 ext4_msg(sb
, KERN_WARNING
,
4673 "feature flags set on rev 0 fs, "
4674 "running e2fsck is recommended");
4676 if (es
->s_creator_os
== cpu_to_le32(EXT4_OS_HURD
)) {
4677 set_opt2(sb
, HURD_COMPAT
);
4678 if (ext4_has_feature_64bit(sb
)) {
4679 ext4_msg(sb
, KERN_ERR
,
4680 "The Hurd can't support 64-bit file systems");
4685 * ea_inode feature uses l_i_version field which is not
4686 * available in HURD_COMPAT mode.
4688 if (ext4_has_feature_ea_inode(sb
)) {
4689 ext4_msg(sb
, KERN_ERR
,
4690 "ea_inode feature is not supported for Hurd");
4695 if (IS_EXT2_SB(sb
)) {
4696 if (ext2_feature_set_ok(sb
))
4697 ext4_msg(sb
, KERN_INFO
, "mounting ext2 file system "
4698 "using the ext4 subsystem");
4701 * If we're probing be silent, if this looks like
4702 * it's actually an ext[34] filesystem.
4704 if (silent
&& ext4_feature_set_ok(sb
, sb_rdonly(sb
)))
4706 ext4_msg(sb
, KERN_ERR
, "couldn't mount as ext2 due "
4707 "to feature incompatibilities");
4712 if (IS_EXT3_SB(sb
)) {
4713 if (ext3_feature_set_ok(sb
))
4714 ext4_msg(sb
, KERN_INFO
, "mounting ext3 file system "
4715 "using the ext4 subsystem");
4718 * If we're probing be silent, if this looks like
4719 * it's actually an ext4 filesystem.
4721 if (silent
&& ext4_feature_set_ok(sb
, sb_rdonly(sb
)))
4723 ext4_msg(sb
, KERN_ERR
, "couldn't mount as ext3 due "
4724 "to feature incompatibilities");
4730 * Check feature flags regardless of the revision level, since we
4731 * previously didn't change the revision level when setting the flags,
4732 * so there is a chance incompat flags are set on a rev 0 filesystem.
4734 if (!ext4_feature_set_ok(sb
, (sb_rdonly(sb
))))
4737 if (sbi
->s_daxdev
) {
4738 if (sb
->s_blocksize
== PAGE_SIZE
)
4739 set_bit(EXT4_FLAGS_BDEV_IS_DAX
, &sbi
->s_ext4_flags
);
4741 ext4_msg(sb
, KERN_ERR
, "unsupported blocksize for DAX\n");
4744 if (sbi
->s_mount_opt
& EXT4_MOUNT_DAX_ALWAYS
) {
4745 if (ext4_has_feature_inline_data(sb
)) {
4746 ext4_msg(sb
, KERN_ERR
, "Cannot use DAX on a filesystem"
4747 " that may contain inline data");
4750 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX
, &sbi
->s_ext4_flags
)) {
4751 ext4_msg(sb
, KERN_ERR
,
4752 "DAX unsupported by block device.");
4757 if (ext4_has_feature_encrypt(sb
) && es
->s_encryption_level
) {
4758 ext4_msg(sb
, KERN_ERR
, "Unsupported encryption level %d",
4759 es
->s_encryption_level
);
4766 static int ext4_check_geometry(struct super_block
*sb
,
4767 struct ext4_super_block
*es
)
4769 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4773 if (le16_to_cpu(sbi
->s_es
->s_reserved_gdt_blocks
) > (sb
->s_blocksize
/ 4)) {
4774 ext4_msg(sb
, KERN_ERR
,
4775 "Number of reserved GDT blocks insanely large: %d",
4776 le16_to_cpu(sbi
->s_es
->s_reserved_gdt_blocks
));
4780 * Test whether we have more sectors than will fit in sector_t,
4781 * and whether the max offset is addressable by the page cache.
4783 err
= generic_check_addressable(sb
->s_blocksize_bits
,
4784 ext4_blocks_count(es
));
4786 ext4_msg(sb
, KERN_ERR
, "filesystem"
4787 " too large to mount safely on this system");
4791 /* check blocks count against device size */
4792 blocks_count
= sb_bdev_nr_blocks(sb
);
4793 if (blocks_count
&& ext4_blocks_count(es
) > blocks_count
) {
4794 ext4_msg(sb
, KERN_WARNING
, "bad geometry: block count %llu "
4795 "exceeds size of device (%llu blocks)",
4796 ext4_blocks_count(es
), blocks_count
);
4801 * It makes no sense for the first data block to be beyond the end
4802 * of the filesystem.
4804 if (le32_to_cpu(es
->s_first_data_block
) >= ext4_blocks_count(es
)) {
4805 ext4_msg(sb
, KERN_WARNING
, "bad geometry: first data "
4806 "block %u is beyond end of filesystem (%llu)",
4807 le32_to_cpu(es
->s_first_data_block
),
4808 ext4_blocks_count(es
));
4811 if ((es
->s_first_data_block
== 0) && (es
->s_log_block_size
== 0) &&
4812 (sbi
->s_cluster_ratio
== 1)) {
4813 ext4_msg(sb
, KERN_WARNING
, "bad geometry: first data "
4814 "block is 0 with a 1k block and cluster size");
4818 blocks_count
= (ext4_blocks_count(es
) -
4819 le32_to_cpu(es
->s_first_data_block
) +
4820 EXT4_BLOCKS_PER_GROUP(sb
) - 1);
4821 do_div(blocks_count
, EXT4_BLOCKS_PER_GROUP(sb
));
4822 if (blocks_count
> ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb
)) {
4823 ext4_msg(sb
, KERN_WARNING
, "groups count too large: %llu "
4824 "(block count %llu, first data block %u, "
4825 "blocks per group %lu)", blocks_count
,
4826 ext4_blocks_count(es
),
4827 le32_to_cpu(es
->s_first_data_block
),
4828 EXT4_BLOCKS_PER_GROUP(sb
));
4831 sbi
->s_groups_count
= blocks_count
;
4832 sbi
->s_blockfile_groups
= min_t(ext4_group_t
, sbi
->s_groups_count
,
4833 (EXT4_MAX_BLOCK_FILE_PHYS
/ EXT4_BLOCKS_PER_GROUP(sb
)));
4834 if (((u64
)sbi
->s_groups_count
* sbi
->s_inodes_per_group
) !=
4835 le32_to_cpu(es
->s_inodes_count
)) {
4836 ext4_msg(sb
, KERN_ERR
, "inodes count not valid: %u vs %llu",
4837 le32_to_cpu(es
->s_inodes_count
),
4838 ((u64
)sbi
->s_groups_count
* sbi
->s_inodes_per_group
));
4845 static int ext4_group_desc_init(struct super_block
*sb
,
4846 struct ext4_super_block
*es
,
4847 ext4_fsblk_t logical_sb_block
,
4848 ext4_group_t
*first_not_zeroed
)
4850 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4851 unsigned int db_count
;
4855 db_count
= (sbi
->s_groups_count
+ EXT4_DESC_PER_BLOCK(sb
) - 1) /
4856 EXT4_DESC_PER_BLOCK(sb
);
4857 if (ext4_has_feature_meta_bg(sb
)) {
4858 if (le32_to_cpu(es
->s_first_meta_bg
) > db_count
) {
4859 ext4_msg(sb
, KERN_WARNING
,
4860 "first meta block group too large: %u "
4861 "(group descriptor block count %u)",
4862 le32_to_cpu(es
->s_first_meta_bg
), db_count
);
4866 rcu_assign_pointer(sbi
->s_group_desc
,
4867 kvmalloc_array(db_count
,
4868 sizeof(struct buffer_head
*),
4870 if (sbi
->s_group_desc
== NULL
) {
4871 ext4_msg(sb
, KERN_ERR
, "not enough memory");
4875 bgl_lock_init(sbi
->s_blockgroup_lock
);
4877 /* Pre-read the descriptors into the buffer cache */
4878 for (i
= 0; i
< db_count
; i
++) {
4879 block
= descriptor_loc(sb
, logical_sb_block
, i
);
4880 ext4_sb_breadahead_unmovable(sb
, block
);
4883 for (i
= 0; i
< db_count
; i
++) {
4884 struct buffer_head
*bh
;
4886 block
= descriptor_loc(sb
, logical_sb_block
, i
);
4887 bh
= ext4_sb_bread_unmovable(sb
, block
);
4889 ext4_msg(sb
, KERN_ERR
,
4890 "can't read group descriptor %d", i
);
4891 sbi
->s_gdb_count
= i
;
4895 rcu_dereference(sbi
->s_group_desc
)[i
] = bh
;
4898 sbi
->s_gdb_count
= db_count
;
4899 if (!ext4_check_descriptors(sb
, logical_sb_block
, first_not_zeroed
)) {
4900 ext4_msg(sb
, KERN_ERR
, "group descriptors corrupted!");
4901 return -EFSCORRUPTED
;
4907 static int ext4_load_and_init_journal(struct super_block
*sb
,
4908 struct ext4_super_block
*es
,
4909 struct ext4_fs_context
*ctx
)
4911 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
4914 err
= ext4_load_journal(sb
, es
, ctx
->journal_devnum
);
4918 if (ext4_has_feature_64bit(sb
) &&
4919 !jbd2_journal_set_features(EXT4_SB(sb
)->s_journal
, 0, 0,
4920 JBD2_FEATURE_INCOMPAT_64BIT
)) {
4921 ext4_msg(sb
, KERN_ERR
, "Failed to set 64-bit journal feature");
4925 if (!set_journal_csum_feature_set(sb
)) {
4926 ext4_msg(sb
, KERN_ERR
, "Failed to set journal checksum "
4931 if (test_opt2(sb
, JOURNAL_FAST_COMMIT
) &&
4932 !jbd2_journal_set_features(EXT4_SB(sb
)->s_journal
, 0, 0,
4933 JBD2_FEATURE_INCOMPAT_FAST_COMMIT
)) {
4934 ext4_msg(sb
, KERN_ERR
,
4935 "Failed to set fast commit journal feature");
4939 /* We have now updated the journal if required, so we can
4940 * validate the data journaling mode. */
4941 switch (test_opt(sb
, DATA_FLAGS
)) {
4943 /* No mode set, assume a default based on the journal
4944 * capabilities: ORDERED_DATA if the journal can
4945 * cope, else JOURNAL_DATA
4947 if (jbd2_journal_check_available_features
4948 (sbi
->s_journal
, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE
)) {
4949 set_opt(sb
, ORDERED_DATA
);
4950 sbi
->s_def_mount_opt
|= EXT4_MOUNT_ORDERED_DATA
;
4952 set_opt(sb
, JOURNAL_DATA
);
4953 sbi
->s_def_mount_opt
|= EXT4_MOUNT_JOURNAL_DATA
;
4957 case EXT4_MOUNT_ORDERED_DATA
:
4958 case EXT4_MOUNT_WRITEBACK_DATA
:
4959 if (!jbd2_journal_check_available_features
4960 (sbi
->s_journal
, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE
)) {
4961 ext4_msg(sb
, KERN_ERR
, "Journal does not support "
4962 "requested data journaling mode");
4970 if (test_opt(sb
, DATA_FLAGS
) == EXT4_MOUNT_ORDERED_DATA
&&
4971 test_opt(sb
, JOURNAL_ASYNC_COMMIT
)) {
4972 ext4_msg(sb
, KERN_ERR
, "can't mount with "
4973 "journal_async_commit in data=ordered mode");
4977 set_task_ioprio(sbi
->s_journal
->j_task
, ctx
->journal_ioprio
);
4979 sbi
->s_journal
->j_submit_inode_data_buffers
=
4980 ext4_journal_submit_inode_data_buffers
;
4981 sbi
->s_journal
->j_finish_inode_data_buffers
=
4982 ext4_journal_finish_inode_data_buffers
;
4987 /* flush s_sb_upd_work before destroying the journal. */
4988 flush_work(&sbi
->s_sb_upd_work
);
4989 jbd2_journal_destroy(sbi
->s_journal
);
4990 sbi
->s_journal
= NULL
;
4994 static int ext4_check_journal_data_mode(struct super_block
*sb
)
4996 if (test_opt(sb
, DATA_FLAGS
) == EXT4_MOUNT_JOURNAL_DATA
) {
4997 printk_once(KERN_WARNING
"EXT4-fs: Warning: mounting with "
4998 "data=journal disables delayed allocation, "
4999 "dioread_nolock, O_DIRECT and fast_commit support!\n");
5000 /* can't mount with both data=journal and dioread_nolock. */
5001 clear_opt(sb
, DIOREAD_NOLOCK
);
5002 clear_opt2(sb
, JOURNAL_FAST_COMMIT
);
5003 if (test_opt2(sb
, EXPLICIT_DELALLOC
)) {
5004 ext4_msg(sb
, KERN_ERR
, "can't mount with "
5005 "both data=journal and delalloc");
5008 if (test_opt(sb
, DAX_ALWAYS
)) {
5009 ext4_msg(sb
, KERN_ERR
, "can't mount with "
5010 "both data=journal and dax");
5013 if (ext4_has_feature_encrypt(sb
)) {
5014 ext4_msg(sb
, KERN_WARNING
,
5015 "encrypted files will use data=ordered "
5016 "instead of data journaling mode");
5018 if (test_opt(sb
, DELALLOC
))
5019 clear_opt(sb
, DELALLOC
);
5021 sb
->s_iflags
|= SB_I_CGROUPWB
;
5027 static int ext4_load_super(struct super_block
*sb
, ext4_fsblk_t
*lsb
,
5030 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
5031 struct ext4_super_block
*es
;
5032 ext4_fsblk_t logical_sb_block
;
5033 unsigned long offset
= 0;
5034 struct buffer_head
*bh
;
5038 blocksize
= sb_min_blocksize(sb
, EXT4_MIN_BLOCK_SIZE
);
5040 ext4_msg(sb
, KERN_ERR
, "unable to set blocksize");
5045 * The ext4 superblock will not be buffer aligned for other than 1kB
5046 * block sizes. We need to calculate the offset from buffer start.
5048 if (blocksize
!= EXT4_MIN_BLOCK_SIZE
) {
5049 logical_sb_block
= sbi
->s_sb_block
* EXT4_MIN_BLOCK_SIZE
;
5050 offset
= do_div(logical_sb_block
, blocksize
);
5052 logical_sb_block
= sbi
->s_sb_block
;
5055 bh
= ext4_sb_bread_unmovable(sb
, logical_sb_block
);
5057 ext4_msg(sb
, KERN_ERR
, "unable to read superblock");
5061 * Note: s_es must be initialized as soon as possible because
5062 * some ext4 macro-instructions depend on its value
5064 es
= (struct ext4_super_block
*) (bh
->b_data
+ offset
);
5066 sb
->s_magic
= le16_to_cpu(es
->s_magic
);
5067 if (sb
->s_magic
!= EXT4_SUPER_MAGIC
) {
5069 ext4_msg(sb
, KERN_ERR
, "VFS: Can't find ext4 filesystem");
5073 if (le32_to_cpu(es
->s_log_block_size
) >
5074 (EXT4_MAX_BLOCK_LOG_SIZE
- EXT4_MIN_BLOCK_LOG_SIZE
)) {
5075 ext4_msg(sb
, KERN_ERR
,
5076 "Invalid log block size: %u",
5077 le32_to_cpu(es
->s_log_block_size
));
5080 if (le32_to_cpu(es
->s_log_cluster_size
) >
5081 (EXT4_MAX_CLUSTER_LOG_SIZE
- EXT4_MIN_BLOCK_LOG_SIZE
)) {
5082 ext4_msg(sb
, KERN_ERR
,
5083 "Invalid log cluster size: %u",
5084 le32_to_cpu(es
->s_log_cluster_size
));
5088 blocksize
= EXT4_MIN_BLOCK_SIZE
<< le32_to_cpu(es
->s_log_block_size
);
5091 * If the default block size is not the same as the real block size,
5092 * we need to reload it.
5094 if (sb
->s_blocksize
== blocksize
) {
5095 *lsb
= logical_sb_block
;
5101 * bh must be released before kill_bdev(), otherwise
5102 * it won't be freed and its page also. kill_bdev()
5103 * is called by sb_set_blocksize().
5106 /* Validate the filesystem blocksize */
5107 if (!sb_set_blocksize(sb
, blocksize
)) {
5108 ext4_msg(sb
, KERN_ERR
, "bad block size %d",
5114 logical_sb_block
= sbi
->s_sb_block
* EXT4_MIN_BLOCK_SIZE
;
5115 offset
= do_div(logical_sb_block
, blocksize
);
5116 bh
= ext4_sb_bread_unmovable(sb
, logical_sb_block
);
5118 ext4_msg(sb
, KERN_ERR
, "Can't read superblock on 2nd try");
5123 es
= (struct ext4_super_block
*)(bh
->b_data
+ offset
);
5125 if (es
->s_magic
!= cpu_to_le16(EXT4_SUPER_MAGIC
)) {
5126 ext4_msg(sb
, KERN_ERR
, "Magic mismatch, very weird!");
5129 *lsb
= logical_sb_block
;
5137 static int ext4_hash_info_init(struct super_block
*sb
)
5139 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
5140 struct ext4_super_block
*es
= sbi
->s_es
;
5143 sbi
->s_def_hash_version
= es
->s_def_hash_version
;
5145 if (sbi
->s_def_hash_version
> DX_HASH_LAST
) {
5146 ext4_msg(sb
, KERN_ERR
,
5147 "Invalid default hash set in the superblock");
5149 } else if (sbi
->s_def_hash_version
== DX_HASH_SIPHASH
) {
5150 ext4_msg(sb
, KERN_ERR
,
5151 "SIPHASH is not a valid default hash value");
5155 for (i
= 0; i
< 4; i
++)
5156 sbi
->s_hash_seed
[i
] = le32_to_cpu(es
->s_hash_seed
[i
]);
5158 if (ext4_has_feature_dir_index(sb
)) {
5159 i
= le32_to_cpu(es
->s_flags
);
5160 if (i
& EXT2_FLAGS_UNSIGNED_HASH
)
5161 sbi
->s_hash_unsigned
= 3;
5162 else if ((i
& EXT2_FLAGS_SIGNED_HASH
) == 0) {
5163 #ifdef __CHAR_UNSIGNED__
5166 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH
);
5167 sbi
->s_hash_unsigned
= 3;
5171 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH
);
5178 static int ext4_block_group_meta_init(struct super_block
*sb
, int silent
)
5180 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
5181 struct ext4_super_block
*es
= sbi
->s_es
;
5184 has_huge_files
= ext4_has_feature_huge_file(sb
);
5185 sbi
->s_bitmap_maxbytes
= ext4_max_bitmap_size(sb
->s_blocksize_bits
,
5187 sb
->s_maxbytes
= ext4_max_size(sb
->s_blocksize_bits
, has_huge_files
);
5189 sbi
->s_desc_size
= le16_to_cpu(es
->s_desc_size
);
5190 if (ext4_has_feature_64bit(sb
)) {
5191 if (sbi
->s_desc_size
< EXT4_MIN_DESC_SIZE_64BIT
||
5192 sbi
->s_desc_size
> EXT4_MAX_DESC_SIZE
||
5193 !is_power_of_2(sbi
->s_desc_size
)) {
5194 ext4_msg(sb
, KERN_ERR
,
5195 "unsupported descriptor size %lu",
5200 sbi
->s_desc_size
= EXT4_MIN_DESC_SIZE
;
5202 sbi
->s_blocks_per_group
= le32_to_cpu(es
->s_blocks_per_group
);
5203 sbi
->s_inodes_per_group
= le32_to_cpu(es
->s_inodes_per_group
);
5205 sbi
->s_inodes_per_block
= sb
->s_blocksize
/ EXT4_INODE_SIZE(sb
);
5206 if (sbi
->s_inodes_per_block
== 0 || sbi
->s_blocks_per_group
== 0) {
5208 ext4_msg(sb
, KERN_ERR
, "VFS: Can't find ext4 filesystem");
5211 if (sbi
->s_inodes_per_group
< sbi
->s_inodes_per_block
||
5212 sbi
->s_inodes_per_group
> sb
->s_blocksize
* 8) {
5213 ext4_msg(sb
, KERN_ERR
, "invalid inodes per group: %lu\n",
5214 sbi
->s_inodes_per_group
);
5217 sbi
->s_itb_per_group
= sbi
->s_inodes_per_group
/
5218 sbi
->s_inodes_per_block
;
5219 sbi
->s_desc_per_block
= sb
->s_blocksize
/ EXT4_DESC_SIZE(sb
);
5220 sbi
->s_mount_state
= le16_to_cpu(es
->s_state
) & ~EXT4_FC_REPLAY
;
5221 sbi
->s_addr_per_block_bits
= ilog2(EXT4_ADDR_PER_BLOCK(sb
));
5222 sbi
->s_desc_per_block_bits
= ilog2(EXT4_DESC_PER_BLOCK(sb
));
5228 * It's hard to get stripe aligned blocks if stripe is not aligned with
5229 * cluster, just disable stripe and alert user to simplify code and avoid
5230 * stripe aligned allocation which will rarely succeed.
5232 static bool ext4_is_stripe_incompatible(struct super_block
*sb
, unsigned long stripe
)
5234 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
5235 return (stripe
> 0 && sbi
->s_cluster_ratio
> 1 &&
5236 stripe
% sbi
->s_cluster_ratio
!= 0);
5239 static int __ext4_fill_super(struct fs_context
*fc
, struct super_block
*sb
)
5241 struct ext4_super_block
*es
= NULL
;
5242 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
5243 ext4_fsblk_t logical_sb_block
;
5247 ext4_group_t first_not_zeroed
;
5248 struct ext4_fs_context
*ctx
= fc
->fs_private
;
5249 int silent
= fc
->sb_flags
& SB_SILENT
;
5251 /* Set defaults for the variables that will be set during parsing */
5252 if (!(ctx
->spec
& EXT4_SPEC_JOURNAL_IOPRIO
))
5253 ctx
->journal_ioprio
= DEFAULT_JOURNAL_IOPRIO
;
5255 sbi
->s_inode_readahead_blks
= EXT4_DEF_INODE_READAHEAD_BLKS
;
5256 sbi
->s_sectors_written_start
=
5257 part_stat_read(sb
->s_bdev
, sectors
[STAT_WRITE
]);
5259 err
= ext4_load_super(sb
, &logical_sb_block
, silent
);
5264 sbi
->s_kbytes_written
= le64_to_cpu(es
->s_kbytes_written
);
5266 err
= ext4_init_metadata_csum(sb
, es
);
5270 ext4_set_def_opts(sb
, es
);
5272 sbi
->s_resuid
= make_kuid(&init_user_ns
, le16_to_cpu(es
->s_def_resuid
));
5273 sbi
->s_resgid
= make_kgid(&init_user_ns
, le16_to_cpu(es
->s_def_resgid
));
5274 sbi
->s_commit_interval
= JBD2_DEFAULT_MAX_COMMIT_AGE
* HZ
;
5275 sbi
->s_min_batch_time
= EXT4_DEF_MIN_BATCH_TIME
;
5276 sbi
->s_max_batch_time
= EXT4_DEF_MAX_BATCH_TIME
;
5279 * set default s_li_wait_mult for lazyinit, for the case there is
5280 * no mount option specified.
5282 sbi
->s_li_wait_mult
= EXT4_DEF_LI_WAIT_MULT
;
5284 err
= ext4_inode_info_init(sb
, es
);
5288 err
= parse_apply_sb_mount_options(sb
, ctx
);
5292 sbi
->s_def_mount_opt
= sbi
->s_mount_opt
;
5293 sbi
->s_def_mount_opt2
= sbi
->s_mount_opt2
;
5295 err
= ext4_check_opt_consistency(fc
, sb
);
5299 ext4_apply_options(fc
, sb
);
5301 err
= ext4_encoding_init(sb
, es
);
5305 err
= ext4_check_journal_data_mode(sb
);
5309 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
5310 (test_opt(sb
, POSIX_ACL
) ? SB_POSIXACL
: 0);
5312 /* i_version is always enabled now */
5313 sb
->s_flags
|= SB_I_VERSION
;
5315 err
= ext4_check_feature_compatibility(sb
, es
, silent
);
5319 err
= ext4_block_group_meta_init(sb
, silent
);
5323 err
= ext4_hash_info_init(sb
);
5327 err
= ext4_handle_clustersize(sb
);
5331 err
= ext4_check_geometry(sb
, es
);
5335 timer_setup(&sbi
->s_err_report
, print_daily_error_info
, 0);
5336 spin_lock_init(&sbi
->s_error_lock
);
5337 INIT_WORK(&sbi
->s_sb_upd_work
, update_super_work
);
5339 err
= ext4_group_desc_init(sb
, es
, logical_sb_block
, &first_not_zeroed
);
5343 err
= ext4_es_register_shrinker(sbi
);
5347 sbi
->s_stripe
= ext4_get_stripe_size(sbi
);
5348 if (ext4_is_stripe_incompatible(sb
, sbi
->s_stripe
)) {
5349 ext4_msg(sb
, KERN_WARNING
,
5350 "stripe (%lu) is not aligned with cluster size (%u), "
5351 "stripe is disabled",
5352 sbi
->s_stripe
, sbi
->s_cluster_ratio
);
5355 sbi
->s_extent_max_zeroout_kb
= 32;
5358 * set up enough so that it can read an inode
5360 sb
->s_op
= &ext4_sops
;
5361 sb
->s_export_op
= &ext4_export_ops
;
5362 sb
->s_xattr
= ext4_xattr_handlers
;
5363 #ifdef CONFIG_FS_ENCRYPTION
5364 sb
->s_cop
= &ext4_cryptops
;
5366 #ifdef CONFIG_FS_VERITY
5367 sb
->s_vop
= &ext4_verityops
;
5370 sb
->dq_op
= &ext4_quota_operations
;
5371 if (ext4_has_feature_quota(sb
))
5372 sb
->s_qcop
= &dquot_quotactl_sysfile_ops
;
5374 sb
->s_qcop
= &ext4_qctl_operations
;
5375 sb
->s_quota_types
= QTYPE_MASK_USR
| QTYPE_MASK_GRP
| QTYPE_MASK_PRJ
;
5377 super_set_uuid(sb
, es
->s_uuid
, sizeof(es
->s_uuid
));
5378 super_set_sysfs_name_bdev(sb
);
5380 INIT_LIST_HEAD(&sbi
->s_orphan
); /* unlinked but open files */
5381 mutex_init(&sbi
->s_orphan_lock
);
5383 spin_lock_init(&sbi
->s_bdev_wb_lock
);
5385 ext4_atomic_write_init(sb
);
5386 ext4_fast_commit_init(sb
);
5390 needs_recovery
= (es
->s_last_orphan
!= 0 ||
5391 ext4_has_feature_orphan_present(sb
) ||
5392 ext4_has_feature_journal_needs_recovery(sb
));
5394 if (ext4_has_feature_mmp(sb
) && !sb_rdonly(sb
)) {
5395 err
= ext4_multi_mount_protect(sb
, le64_to_cpu(es
->s_mmp_block
));
5397 goto failed_mount3a
;
5402 * The first inode we look at is the journal inode. Don't try
5403 * root first: it may be modified in the journal!
5405 if (!test_opt(sb
, NOLOAD
) && ext4_has_feature_journal(sb
)) {
5406 err
= ext4_load_and_init_journal(sb
, es
, ctx
);
5408 goto failed_mount3a
;
5409 } else if (test_opt(sb
, NOLOAD
) && !sb_rdonly(sb
) &&
5410 ext4_has_feature_journal_needs_recovery(sb
)) {
5411 ext4_msg(sb
, KERN_ERR
, "required journal recovery "
5412 "suppressed and not mounted read-only");
5413 goto failed_mount3a
;
5415 /* Nojournal mode, all journal mount options are illegal */
5416 if (test_opt(sb
, JOURNAL_ASYNC_COMMIT
)) {
5417 ext4_msg(sb
, KERN_ERR
, "can't mount with "
5418 "journal_async_commit, fs mounted w/o journal");
5419 goto failed_mount3a
;
5422 if (test_opt2(sb
, EXPLICIT_JOURNAL_CHECKSUM
)) {
5423 ext4_msg(sb
, KERN_ERR
, "can't mount with "
5424 "journal_checksum, fs mounted w/o journal");
5425 goto failed_mount3a
;
5427 if (sbi
->s_commit_interval
!= JBD2_DEFAULT_MAX_COMMIT_AGE
*HZ
) {
5428 ext4_msg(sb
, KERN_ERR
, "can't mount with "
5429 "commit=%lu, fs mounted w/o journal",
5430 sbi
->s_commit_interval
/ HZ
);
5431 goto failed_mount3a
;
5433 if (EXT4_MOUNT_DATA_FLAGS
&
5434 (sbi
->s_mount_opt
^ sbi
->s_def_mount_opt
)) {
5435 ext4_msg(sb
, KERN_ERR
, "can't mount with "
5436 "data=, fs mounted w/o journal");
5437 goto failed_mount3a
;
5439 sbi
->s_def_mount_opt
&= ~EXT4_MOUNT_JOURNAL_CHECKSUM
;
5440 clear_opt(sb
, JOURNAL_CHECKSUM
);
5441 clear_opt(sb
, DATA_FLAGS
);
5442 clear_opt2(sb
, JOURNAL_FAST_COMMIT
);
5443 sbi
->s_journal
= NULL
;
5447 if (!test_opt(sb
, NO_MBCACHE
)) {
5448 sbi
->s_ea_block_cache
= ext4_xattr_create_cache();
5449 if (!sbi
->s_ea_block_cache
) {
5450 ext4_msg(sb
, KERN_ERR
,
5451 "Failed to create ea_block_cache");
5453 goto failed_mount_wq
;
5456 if (ext4_has_feature_ea_inode(sb
)) {
5457 sbi
->s_ea_inode_cache
= ext4_xattr_create_cache();
5458 if (!sbi
->s_ea_inode_cache
) {
5459 ext4_msg(sb
, KERN_ERR
,
5460 "Failed to create ea_inode_cache");
5462 goto failed_mount_wq
;
5468 * Get the # of file system overhead blocks from the
5469 * superblock if present.
5471 sbi
->s_overhead
= le32_to_cpu(es
->s_overhead_clusters
);
5472 /* ignore the precalculated value if it is ridiculous */
5473 if (sbi
->s_overhead
> ext4_blocks_count(es
))
5474 sbi
->s_overhead
= 0;
5476 * If the bigalloc feature is not enabled recalculating the
5477 * overhead doesn't take long, so we might as well just redo
5478 * it to make sure we are using the correct value.
5480 if (!ext4_has_feature_bigalloc(sb
))
5481 sbi
->s_overhead
= 0;
5482 if (sbi
->s_overhead
== 0) {
5483 err
= ext4_calculate_overhead(sb
);
5485 goto failed_mount_wq
;
5489 * The maximum number of concurrent works can be high and
5490 * concurrency isn't really necessary. Limit it to 1.
5492 EXT4_SB(sb
)->rsv_conversion_wq
=
5493 alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM
| WQ_UNBOUND
, 1);
5494 if (!EXT4_SB(sb
)->rsv_conversion_wq
) {
5495 printk(KERN_ERR
"EXT4-fs: failed to create workqueue\n");
5501 * The jbd2_journal_load will have done any necessary log recovery,
5502 * so we can safely mount the rest of the filesystem now.
5505 root
= ext4_iget(sb
, EXT4_ROOT_INO
, EXT4_IGET_SPECIAL
);
5507 ext4_msg(sb
, KERN_ERR
, "get root inode failed");
5508 err
= PTR_ERR(root
);
5512 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
|| !root
->i_size
) {
5513 ext4_msg(sb
, KERN_ERR
, "corrupt root inode, run e2fsck");
5515 err
= -EFSCORRUPTED
;
5519 generic_set_sb_d_ops(sb
);
5520 sb
->s_root
= d_make_root(root
);
5522 ext4_msg(sb
, KERN_ERR
, "get root dentry failed");
5527 err
= ext4_setup_super(sb
, es
, sb_rdonly(sb
));
5528 if (err
== -EROFS
) {
5529 sb
->s_flags
|= SB_RDONLY
;
5531 goto failed_mount4a
;
5533 ext4_set_resv_clusters(sb
);
5535 if (test_opt(sb
, BLOCK_VALIDITY
)) {
5536 err
= ext4_setup_system_zone(sb
);
5538 ext4_msg(sb
, KERN_ERR
, "failed to initialize system "
5540 goto failed_mount4a
;
5543 ext4_fc_replay_cleanup(sb
);
5548 * Enable optimize_scan if number of groups is > threshold. This can be
5549 * turned off by passing "mb_optimize_scan=0". This can also be
5550 * turned on forcefully by passing "mb_optimize_scan=1".
5552 if (!(ctx
->spec
& EXT4_SPEC_mb_optimize_scan
)) {
5553 if (sbi
->s_groups_count
>= MB_DEFAULT_LINEAR_SCAN_THRESHOLD
)
5554 set_opt2(sb
, MB_OPTIMIZE_SCAN
);
5556 clear_opt2(sb
, MB_OPTIMIZE_SCAN
);
5559 err
= ext4_mb_init(sb
);
5561 ext4_msg(sb
, KERN_ERR
, "failed to initialize mballoc (%d)",
5567 * We can only set up the journal commit callback once
5568 * mballoc is initialized
5571 sbi
->s_journal
->j_commit_callback
=
5572 ext4_journal_commit_callback
;
5574 err
= ext4_percpu_param_init(sbi
);
5578 if (ext4_has_feature_flex_bg(sb
))
5579 if (!ext4_fill_flex_info(sb
)) {
5580 ext4_msg(sb
, KERN_ERR
,
5581 "unable to initialize "
5582 "flex_bg meta info!");
5587 err
= ext4_register_li_request(sb
, first_not_zeroed
);
5591 err
= ext4_init_orphan_info(sb
);
5595 /* Enable quota usage during mount. */
5596 if (ext4_has_feature_quota(sb
) && !sb_rdonly(sb
)) {
5597 err
= ext4_enable_quotas(sb
);
5601 #endif /* CONFIG_QUOTA */
5604 * Save the original bdev mapping's wb_err value which could be
5605 * used to detect the metadata async write error.
5607 errseq_check_and_advance(&sb
->s_bdev
->bd_mapping
->wb_err
,
5608 &sbi
->s_bdev_wb_err
);
5609 EXT4_SB(sb
)->s_mount_state
|= EXT4_ORPHAN_FS
;
5610 ext4_orphan_cleanup(sb
, es
);
5611 EXT4_SB(sb
)->s_mount_state
&= ~EXT4_ORPHAN_FS
;
5613 * Update the checksum after updating free space/inode counters and
5614 * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect
5615 * checksum in the buffer cache until it is written out and
5616 * e2fsprogs programs trying to open a file system immediately
5617 * after it is mounted can fail.
5619 ext4_superblock_csum_set(sb
);
5620 if (needs_recovery
) {
5621 ext4_msg(sb
, KERN_INFO
, "recovery complete");
5622 err
= ext4_mark_recovery_complete(sb
, es
);
5627 if (test_opt(sb
, DISCARD
) && !bdev_max_discard_sectors(sb
->s_bdev
))
5628 ext4_msg(sb
, KERN_WARNING
,
5629 "mounting with \"discard\" option, but the device does not support discard");
5631 if (es
->s_error_count
)
5632 mod_timer(&sbi
->s_err_report
, jiffies
+ 300*HZ
); /* 5 minutes */
5634 /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
5635 ratelimit_state_init(&sbi
->s_err_ratelimit_state
, 5 * HZ
, 10);
5636 ratelimit_state_init(&sbi
->s_warning_ratelimit_state
, 5 * HZ
, 10);
5637 ratelimit_state_init(&sbi
->s_msg_ratelimit_state
, 5 * HZ
, 10);
5638 atomic_set(&sbi
->s_warning_count
, 0);
5639 atomic_set(&sbi
->s_msg_count
, 0);
5641 /* Register sysfs after all initializations are complete. */
5642 err
= ext4_register_sysfs(sb
);
5649 ext4_quotas_off(sb
, EXT4_MAXQUOTAS
);
5650 failed_mount8
: __maybe_unused
5651 ext4_release_orphan_info(sb
);
5653 ext4_unregister_li_request(sb
);
5655 ext4_mb_release(sb
);
5656 ext4_flex_groups_free(sbi
);
5657 ext4_percpu_param_destroy(sbi
);
5659 ext4_ext_release(sb
);
5660 ext4_release_system_zone(sb
);
5665 ext4_msg(sb
, KERN_ERR
, "mount failed");
5666 if (EXT4_SB(sb
)->rsv_conversion_wq
)
5667 destroy_workqueue(EXT4_SB(sb
)->rsv_conversion_wq
);
5669 ext4_xattr_destroy_cache(sbi
->s_ea_inode_cache
);
5670 sbi
->s_ea_inode_cache
= NULL
;
5672 ext4_xattr_destroy_cache(sbi
->s_ea_block_cache
);
5673 sbi
->s_ea_block_cache
= NULL
;
5675 if (sbi
->s_journal
) {
5676 /* flush s_sb_upd_work before journal destroy. */
5677 flush_work(&sbi
->s_sb_upd_work
);
5678 jbd2_journal_destroy(sbi
->s_journal
);
5679 sbi
->s_journal
= NULL
;
5682 ext4_es_unregister_shrinker(sbi
);
5684 /* flush s_sb_upd_work before sbi destroy */
5685 flush_work(&sbi
->s_sb_upd_work
);
5686 ext4_stop_mmpd(sbi
);
5687 del_timer_sync(&sbi
->s_err_report
);
5688 ext4_group_desc_free(sbi
);
5690 if (sbi
->s_chksum_driver
)
5691 crypto_free_shash(sbi
->s_chksum_driver
);
5693 #if IS_ENABLED(CONFIG_UNICODE)
5694 utf8_unload(sb
->s_encoding
);
5698 for (unsigned int i
= 0; i
< EXT4_MAXQUOTAS
; i
++)
5699 kfree(get_qf_name(sb
, sbi
, i
));
5701 fscrypt_free_dummy_policy(&sbi
->s_dummy_enc_policy
);
5703 if (sbi
->s_journal_bdev_file
) {
5704 invalidate_bdev(file_bdev(sbi
->s_journal_bdev_file
));
5705 bdev_fput(sbi
->s_journal_bdev_file
);
5708 invalidate_bdev(sb
->s_bdev
);
5709 sb
->s_fs_info
= NULL
;
5713 static int ext4_fill_super(struct super_block
*sb
, struct fs_context
*fc
)
5715 struct ext4_fs_context
*ctx
= fc
->fs_private
;
5716 struct ext4_sb_info
*sbi
;
5720 sbi
= ext4_alloc_sbi(sb
);
5724 fc
->s_fs_info
= sbi
;
5726 /* Cleanup superblock name */
5727 strreplace(sb
->s_id
, '/', '!');
5729 sbi
->s_sb_block
= 1; /* Default super block location */
5730 if (ctx
->spec
& EXT4_SPEC_s_sb_block
)
5731 sbi
->s_sb_block
= ctx
->s_sb_block
;
5733 ret
= __ext4_fill_super(fc
, sb
);
5737 if (sbi
->s_journal
) {
5738 if (test_opt(sb
, DATA_FLAGS
) == EXT4_MOUNT_JOURNAL_DATA
)
5739 descr
= " journalled data mode";
5740 else if (test_opt(sb
, DATA_FLAGS
) == EXT4_MOUNT_ORDERED_DATA
)
5741 descr
= " ordered data mode";
5743 descr
= " writeback data mode";
5745 descr
= "out journal";
5747 if (___ratelimit(&ext4_mount_msg_ratelimit
, "EXT4-fs mount"))
5748 ext4_msg(sb
, KERN_INFO
, "mounted filesystem %pU %s with%s. "
5749 "Quota mode: %s.", &sb
->s_uuid
,
5750 sb_rdonly(sb
) ? "ro" : "r/w", descr
,
5751 ext4_quota_mode(sb
));
5753 /* Update the s_overhead_clusters if necessary */
5754 ext4_update_overhead(sb
, false);
5759 fc
->s_fs_info
= NULL
;
5763 static int ext4_get_tree(struct fs_context
*fc
)
5765 return get_tree_bdev(fc
, ext4_fill_super
);
5769 * Setup any per-fs journal parameters now. We'll do this both on
5770 * initial mount, once the journal has been initialised but before we've
5771 * done any recovery; and again on any subsequent remount.
5773 static void ext4_init_journal_params(struct super_block
*sb
, journal_t
*journal
)
5775 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
5777 journal
->j_commit_interval
= sbi
->s_commit_interval
;
5778 journal
->j_min_batch_time
= sbi
->s_min_batch_time
;
5779 journal
->j_max_batch_time
= sbi
->s_max_batch_time
;
5780 ext4_fc_init(sb
, journal
);
5782 write_lock(&journal
->j_state_lock
);
5783 if (test_opt(sb
, BARRIER
))
5784 journal
->j_flags
|= JBD2_BARRIER
;
5786 journal
->j_flags
&= ~JBD2_BARRIER
;
5787 if (test_opt(sb
, DATA_ERR_ABORT
))
5788 journal
->j_flags
|= JBD2_ABORT_ON_SYNCDATA_ERR
;
5790 journal
->j_flags
&= ~JBD2_ABORT_ON_SYNCDATA_ERR
;
5792 * Always enable journal cycle record option, letting the journal
5793 * records log transactions continuously between each mount.
5795 journal
->j_flags
|= JBD2_CYCLE_RECORD
;
5796 write_unlock(&journal
->j_state_lock
);
5799 static struct inode
*ext4_get_journal_inode(struct super_block
*sb
,
5800 unsigned int journal_inum
)
5802 struct inode
*journal_inode
;
5805 * Test for the existence of a valid inode on disk. Bad things
5806 * happen if we iget() an unused inode, as the subsequent iput()
5807 * will try to delete it.
5809 journal_inode
= ext4_iget(sb
, journal_inum
, EXT4_IGET_SPECIAL
);
5810 if (IS_ERR(journal_inode
)) {
5811 ext4_msg(sb
, KERN_ERR
, "no journal found");
5812 return ERR_CAST(journal_inode
);
5814 if (!journal_inode
->i_nlink
) {
5815 make_bad_inode(journal_inode
);
5816 iput(journal_inode
);
5817 ext4_msg(sb
, KERN_ERR
, "journal inode is deleted");
5818 return ERR_PTR(-EFSCORRUPTED
);
5820 if (!S_ISREG(journal_inode
->i_mode
) || IS_ENCRYPTED(journal_inode
)) {
5821 ext4_msg(sb
, KERN_ERR
, "invalid journal inode");
5822 iput(journal_inode
);
5823 return ERR_PTR(-EFSCORRUPTED
);
5826 ext4_debug("Journal inode found at %p: %lld bytes\n",
5827 journal_inode
, journal_inode
->i_size
);
5828 return journal_inode
;
5831 static int ext4_journal_bmap(journal_t
*journal
, sector_t
*block
)
5833 struct ext4_map_blocks map
;
5836 if (journal
->j_inode
== NULL
)
5839 map
.m_lblk
= *block
;
5841 ret
= ext4_map_blocks(NULL
, journal
->j_inode
, &map
, 0);
5843 ext4_msg(journal
->j_inode
->i_sb
, KERN_CRIT
,
5844 "journal bmap failed: block %llu ret %d\n",
5846 jbd2_journal_abort(journal
, ret
? ret
: -EIO
);
5849 *block
= map
.m_pblk
;
5853 static journal_t
*ext4_open_inode_journal(struct super_block
*sb
,
5854 unsigned int journal_inum
)
5856 struct inode
*journal_inode
;
5859 journal_inode
= ext4_get_journal_inode(sb
, journal_inum
);
5860 if (IS_ERR(journal_inode
))
5861 return ERR_CAST(journal_inode
);
5863 journal
= jbd2_journal_init_inode(journal_inode
);
5864 if (IS_ERR(journal
)) {
5865 ext4_msg(sb
, KERN_ERR
, "Could not load journal inode");
5866 iput(journal_inode
);
5867 return ERR_CAST(journal
);
5869 journal
->j_private
= sb
;
5870 journal
->j_bmap
= ext4_journal_bmap
;
5871 ext4_init_journal_params(sb
, journal
);
5875 static struct file
*ext4_get_journal_blkdev(struct super_block
*sb
,
5876 dev_t j_dev
, ext4_fsblk_t
*j_start
,
5877 ext4_fsblk_t
*j_len
)
5879 struct buffer_head
*bh
;
5880 struct block_device
*bdev
;
5881 struct file
*bdev_file
;
5882 int hblock
, blocksize
;
5883 ext4_fsblk_t sb_block
;
5884 unsigned long offset
;
5885 struct ext4_super_block
*es
;
5888 bdev_file
= bdev_file_open_by_dev(j_dev
,
5889 BLK_OPEN_READ
| BLK_OPEN_WRITE
| BLK_OPEN_RESTRICT_WRITES
,
5890 sb
, &fs_holder_ops
);
5891 if (IS_ERR(bdev_file
)) {
5892 ext4_msg(sb
, KERN_ERR
,
5893 "failed to open journal device unknown-block(%u,%u) %ld",
5894 MAJOR(j_dev
), MINOR(j_dev
), PTR_ERR(bdev_file
));
5898 bdev
= file_bdev(bdev_file
);
5899 blocksize
= sb
->s_blocksize
;
5900 hblock
= bdev_logical_block_size(bdev
);
5901 if (blocksize
< hblock
) {
5902 ext4_msg(sb
, KERN_ERR
,
5903 "blocksize too small for journal device");
5908 sb_block
= EXT4_MIN_BLOCK_SIZE
/ blocksize
;
5909 offset
= EXT4_MIN_BLOCK_SIZE
% blocksize
;
5910 set_blocksize(bdev_file
, blocksize
);
5911 bh
= __bread(bdev
, sb_block
, blocksize
);
5913 ext4_msg(sb
, KERN_ERR
, "couldn't read superblock of "
5914 "external journal");
5919 es
= (struct ext4_super_block
*) (bh
->b_data
+ offset
);
5920 if ((le16_to_cpu(es
->s_magic
) != EXT4_SUPER_MAGIC
) ||
5921 !(le32_to_cpu(es
->s_feature_incompat
) &
5922 EXT4_FEATURE_INCOMPAT_JOURNAL_DEV
)) {
5923 ext4_msg(sb
, KERN_ERR
, "external journal has bad superblock");
5924 errno
= -EFSCORRUPTED
;
5928 if ((le32_to_cpu(es
->s_feature_ro_compat
) &
5929 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM
) &&
5930 es
->s_checksum
!= ext4_superblock_csum(sb
, es
)) {
5931 ext4_msg(sb
, KERN_ERR
, "external journal has corrupt superblock");
5932 errno
= -EFSCORRUPTED
;
5936 if (memcmp(EXT4_SB(sb
)->s_es
->s_journal_uuid
, es
->s_uuid
, 16)) {
5937 ext4_msg(sb
, KERN_ERR
, "journal UUID does not match");
5938 errno
= -EFSCORRUPTED
;
5942 *j_start
= sb_block
+ 1;
5943 *j_len
= ext4_blocks_count(es
);
5950 bdev_fput(bdev_file
);
5951 return ERR_PTR(errno
);
5954 static journal_t
*ext4_open_dev_journal(struct super_block
*sb
,
5958 ext4_fsblk_t j_start
;
5960 struct file
*bdev_file
;
5963 bdev_file
= ext4_get_journal_blkdev(sb
, j_dev
, &j_start
, &j_len
);
5964 if (IS_ERR(bdev_file
))
5965 return ERR_CAST(bdev_file
);
5967 journal
= jbd2_journal_init_dev(file_bdev(bdev_file
), sb
->s_bdev
, j_start
,
5968 j_len
, sb
->s_blocksize
);
5969 if (IS_ERR(journal
)) {
5970 ext4_msg(sb
, KERN_ERR
, "failed to create device journal");
5971 errno
= PTR_ERR(journal
);
5974 if (be32_to_cpu(journal
->j_superblock
->s_nr_users
) != 1) {
5975 ext4_msg(sb
, KERN_ERR
, "External journal has more than one "
5976 "user (unsupported) - %d",
5977 be32_to_cpu(journal
->j_superblock
->s_nr_users
));
5981 journal
->j_private
= sb
;
5982 EXT4_SB(sb
)->s_journal_bdev_file
= bdev_file
;
5983 ext4_init_journal_params(sb
, journal
);
5987 jbd2_journal_destroy(journal
);
5989 bdev_fput(bdev_file
);
5990 return ERR_PTR(errno
);
5993 static int ext4_load_journal(struct super_block
*sb
,
5994 struct ext4_super_block
*es
,
5995 unsigned long journal_devnum
)
5998 unsigned int journal_inum
= le32_to_cpu(es
->s_journal_inum
);
6001 int really_read_only
;
6004 if (WARN_ON_ONCE(!ext4_has_feature_journal(sb
)))
6005 return -EFSCORRUPTED
;
6007 if (journal_devnum
&&
6008 journal_devnum
!= le32_to_cpu(es
->s_journal_dev
)) {
6009 ext4_msg(sb
, KERN_INFO
, "external journal device major/minor "
6010 "numbers have changed");
6011 journal_dev
= new_decode_dev(journal_devnum
);
6013 journal_dev
= new_decode_dev(le32_to_cpu(es
->s_journal_dev
));
6015 if (journal_inum
&& journal_dev
) {
6016 ext4_msg(sb
, KERN_ERR
,
6017 "filesystem has both journal inode and journal device!");
6022 journal
= ext4_open_inode_journal(sb
, journal_inum
);
6023 if (IS_ERR(journal
))
6024 return PTR_ERR(journal
);
6026 journal
= ext4_open_dev_journal(sb
, journal_dev
);
6027 if (IS_ERR(journal
))
6028 return PTR_ERR(journal
);
6031 journal_dev_ro
= bdev_read_only(journal
->j_dev
);
6032 really_read_only
= bdev_read_only(sb
->s_bdev
) | journal_dev_ro
;
6034 if (journal_dev_ro
&& !sb_rdonly(sb
)) {
6035 ext4_msg(sb
, KERN_ERR
,
6036 "journal device read-only, try mounting with '-o ro'");
6042 * Are we loading a blank journal or performing recovery after a
6043 * crash? For recovery, we need to check in advance whether we
6044 * can get read-write access to the device.
6046 if (ext4_has_feature_journal_needs_recovery(sb
)) {
6047 if (sb_rdonly(sb
)) {
6048 ext4_msg(sb
, KERN_INFO
, "INFO: recovery "
6049 "required on readonly filesystem");
6050 if (really_read_only
) {
6051 ext4_msg(sb
, KERN_ERR
, "write access "
6052 "unavailable, cannot proceed "
6053 "(try mounting with noload)");
6057 ext4_msg(sb
, KERN_INFO
, "write access will "
6058 "be enabled during recovery");
6062 if (!(journal
->j_flags
& JBD2_BARRIER
))
6063 ext4_msg(sb
, KERN_INFO
, "barriers disabled");
6065 if (!ext4_has_feature_journal_needs_recovery(sb
))
6066 err
= jbd2_journal_wipe(journal
, !really_read_only
);
6068 char *save
= kmalloc(EXT4_S_ERR_LEN
, GFP_KERNEL
);
6070 bool changed
= false;
6073 memcpy(save
, ((char *) es
) +
6074 EXT4_S_ERR_START
, EXT4_S_ERR_LEN
);
6075 err
= jbd2_journal_load(journal
);
6076 if (save
&& memcmp(((char *) es
) + EXT4_S_ERR_START
,
6077 save
, EXT4_S_ERR_LEN
)) {
6078 memcpy(((char *) es
) + EXT4_S_ERR_START
,
6079 save
, EXT4_S_ERR_LEN
);
6083 orig_state
= es
->s_state
;
6084 es
->s_state
|= cpu_to_le16(EXT4_SB(sb
)->s_mount_state
&
6086 if (orig_state
!= es
->s_state
)
6088 /* Write out restored error information to the superblock */
6089 if (changed
&& !really_read_only
) {
6091 err2
= ext4_commit_super(sb
);
6097 ext4_msg(sb
, KERN_ERR
, "error loading journal");
6101 EXT4_SB(sb
)->s_journal
= journal
;
6102 err
= ext4_clear_journal_err(sb
, es
);
6104 EXT4_SB(sb
)->s_journal
= NULL
;
6105 jbd2_journal_destroy(journal
);
6109 if (!really_read_only
&& journal_devnum
&&
6110 journal_devnum
!= le32_to_cpu(es
->s_journal_dev
)) {
6111 es
->s_journal_dev
= cpu_to_le32(journal_devnum
);
6112 ext4_commit_super(sb
);
6114 if (!really_read_only
&& journal_inum
&&
6115 journal_inum
!= le32_to_cpu(es
->s_journal_inum
)) {
6116 es
->s_journal_inum
= cpu_to_le32(journal_inum
);
6117 ext4_commit_super(sb
);
6123 jbd2_journal_destroy(journal
);
6127 /* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */
6128 static void ext4_update_super(struct super_block
*sb
)
6130 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
6131 struct ext4_super_block
*es
= sbi
->s_es
;
6132 struct buffer_head
*sbh
= sbi
->s_sbh
;
6136 * If the file system is mounted read-only, don't update the
6137 * superblock write time. This avoids updating the superblock
6138 * write time when we are mounting the root file system
6139 * read/only but we need to replay the journal; at that point,
6140 * for people who are east of GMT and who make their clock
6141 * tick in localtime for Windows bug-for-bug compatibility,
6142 * the clock is set in the future, and this will cause e2fsck
6143 * to complain and force a full file system check.
6146 ext4_update_tstamp(es
, s_wtime
);
6147 es
->s_kbytes_written
=
6148 cpu_to_le64(sbi
->s_kbytes_written
+
6149 ((part_stat_read(sb
->s_bdev
, sectors
[STAT_WRITE
]) -
6150 sbi
->s_sectors_written_start
) >> 1));
6151 if (percpu_counter_initialized(&sbi
->s_freeclusters_counter
))
6152 ext4_free_blocks_count_set(es
,
6153 EXT4_C2B(sbi
, percpu_counter_sum_positive(
6154 &sbi
->s_freeclusters_counter
)));
6155 if (percpu_counter_initialized(&sbi
->s_freeinodes_counter
))
6156 es
->s_free_inodes_count
=
6157 cpu_to_le32(percpu_counter_sum_positive(
6158 &sbi
->s_freeinodes_counter
));
6159 /* Copy error information to the on-disk superblock */
6160 spin_lock(&sbi
->s_error_lock
);
6161 if (sbi
->s_add_error_count
> 0) {
6162 es
->s_state
|= cpu_to_le16(EXT4_ERROR_FS
);
6163 if (!es
->s_first_error_time
&& !es
->s_first_error_time_hi
) {
6164 __ext4_update_tstamp(&es
->s_first_error_time
,
6165 &es
->s_first_error_time_hi
,
6166 sbi
->s_first_error_time
);
6167 strtomem_pad(es
->s_first_error_func
,
6168 sbi
->s_first_error_func
, 0);
6169 es
->s_first_error_line
=
6170 cpu_to_le32(sbi
->s_first_error_line
);
6171 es
->s_first_error_ino
=
6172 cpu_to_le32(sbi
->s_first_error_ino
);
6173 es
->s_first_error_block
=
6174 cpu_to_le64(sbi
->s_first_error_block
);
6175 es
->s_first_error_errcode
=
6176 ext4_errno_to_code(sbi
->s_first_error_code
);
6178 __ext4_update_tstamp(&es
->s_last_error_time
,
6179 &es
->s_last_error_time_hi
,
6180 sbi
->s_last_error_time
);
6181 strtomem_pad(es
->s_last_error_func
, sbi
->s_last_error_func
, 0);
6182 es
->s_last_error_line
= cpu_to_le32(sbi
->s_last_error_line
);
6183 es
->s_last_error_ino
= cpu_to_le32(sbi
->s_last_error_ino
);
6184 es
->s_last_error_block
= cpu_to_le64(sbi
->s_last_error_block
);
6185 es
->s_last_error_errcode
=
6186 ext4_errno_to_code(sbi
->s_last_error_code
);
6188 * Start the daily error reporting function if it hasn't been
6191 if (!es
->s_error_count
)
6192 mod_timer(&sbi
->s_err_report
, jiffies
+ 24*60*60*HZ
);
6193 le32_add_cpu(&es
->s_error_count
, sbi
->s_add_error_count
);
6194 sbi
->s_add_error_count
= 0;
6196 spin_unlock(&sbi
->s_error_lock
);
6198 ext4_superblock_csum_set(sb
);
6202 static int ext4_commit_super(struct super_block
*sb
)
6204 struct buffer_head
*sbh
= EXT4_SB(sb
)->s_sbh
;
6209 ext4_update_super(sb
);
6212 /* Buffer got discarded which means block device got invalidated */
6213 if (!buffer_mapped(sbh
)) {
6218 if (buffer_write_io_error(sbh
) || !buffer_uptodate(sbh
)) {
6220 * Oh, dear. A previous attempt to write the
6221 * superblock failed. This could happen because the
6222 * USB device was yanked out. Or it could happen to
6223 * be a transient write error and maybe the block will
6224 * be remapped. Nothing we can do but to retry the
6225 * write and hope for the best.
6227 ext4_msg(sb
, KERN_ERR
, "previous I/O error to "
6228 "superblock detected");
6229 clear_buffer_write_io_error(sbh
);
6230 set_buffer_uptodate(sbh
);
6233 /* Clear potential dirty bit if it was journalled update */
6234 clear_buffer_dirty(sbh
);
6235 sbh
->b_end_io
= end_buffer_write_sync
;
6236 submit_bh(REQ_OP_WRITE
| REQ_SYNC
|
6237 (test_opt(sb
, BARRIER
) ? REQ_FUA
: 0), sbh
);
6238 wait_on_buffer(sbh
);
6239 if (buffer_write_io_error(sbh
)) {
6240 ext4_msg(sb
, KERN_ERR
, "I/O error while writing "
6242 clear_buffer_write_io_error(sbh
);
6243 set_buffer_uptodate(sbh
);
6250 * Have we just finished recovery? If so, and if we are mounting (or
6251 * remounting) the filesystem readonly, then we will end up with a
6252 * consistent fs on disk. Record that fact.
6254 static int ext4_mark_recovery_complete(struct super_block
*sb
,
6255 struct ext4_super_block
*es
)
6258 journal_t
*journal
= EXT4_SB(sb
)->s_journal
;
6260 if (!ext4_has_feature_journal(sb
)) {
6261 if (journal
!= NULL
) {
6262 ext4_error(sb
, "Journal got removed while the fs was "
6264 return -EFSCORRUPTED
;
6268 jbd2_journal_lock_updates(journal
);
6269 err
= jbd2_journal_flush(journal
, 0);
6273 if (sb_rdonly(sb
) && (ext4_has_feature_journal_needs_recovery(sb
) ||
6274 ext4_has_feature_orphan_present(sb
))) {
6275 if (!ext4_orphan_file_empty(sb
)) {
6276 ext4_error(sb
, "Orphan file not empty on read-only fs.");
6277 err
= -EFSCORRUPTED
;
6280 ext4_clear_feature_journal_needs_recovery(sb
);
6281 ext4_clear_feature_orphan_present(sb
);
6282 ext4_commit_super(sb
);
6285 jbd2_journal_unlock_updates(journal
);
6290 * If we are mounting (or read-write remounting) a filesystem whose journal
6291 * has recorded an error from a previous lifetime, move that error to the
6292 * main filesystem now.
6294 static int ext4_clear_journal_err(struct super_block
*sb
,
6295 struct ext4_super_block
*es
)
6301 if (!ext4_has_feature_journal(sb
)) {
6302 ext4_error(sb
, "Journal got removed while the fs was mounted!");
6303 return -EFSCORRUPTED
;
6306 journal
= EXT4_SB(sb
)->s_journal
;
6309 * Now check for any error status which may have been recorded in the
6310 * journal by a prior ext4_error() or ext4_abort()
6313 j_errno
= jbd2_journal_errno(journal
);
6317 errstr
= ext4_decode_error(sb
, j_errno
, nbuf
);
6318 ext4_warning(sb
, "Filesystem error recorded "
6319 "from previous mount: %s", errstr
);
6321 EXT4_SB(sb
)->s_mount_state
|= EXT4_ERROR_FS
;
6322 es
->s_state
|= cpu_to_le16(EXT4_ERROR_FS
);
6323 j_errno
= ext4_commit_super(sb
);
6326 ext4_warning(sb
, "Marked fs in need of filesystem check.");
6328 jbd2_journal_clear_err(journal
);
6329 jbd2_journal_update_sb_errno(journal
);
6335 * Force the running and committing transactions to commit,
6336 * and wait on the commit.
6338 int ext4_force_commit(struct super_block
*sb
)
6340 return ext4_journal_force_commit(EXT4_SB(sb
)->s_journal
);
6343 static int ext4_sync_fs(struct super_block
*sb
, int wait
)
6347 bool needs_barrier
= false;
6348 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
6350 if (unlikely(ext4_forced_shutdown(sb
)))
6353 trace_ext4_sync_fs(sb
, wait
);
6354 flush_workqueue(sbi
->rsv_conversion_wq
);
6356 * Writeback quota in non-journalled quota case - journalled quota has
6359 dquot_writeback_dquots(sb
, -1);
6361 * Data writeback is possible w/o journal transaction, so barrier must
6362 * being sent at the end of the function. But we can skip it if
6363 * transaction_commit will do it for us.
6365 if (sbi
->s_journal
) {
6366 target
= jbd2_get_latest_transaction(sbi
->s_journal
);
6367 if (wait
&& sbi
->s_journal
->j_flags
& JBD2_BARRIER
&&
6368 !jbd2_trans_will_send_data_barrier(sbi
->s_journal
, target
))
6369 needs_barrier
= true;
6371 if (jbd2_journal_start_commit(sbi
->s_journal
, &target
)) {
6373 ret
= jbd2_log_wait_commit(sbi
->s_journal
,
6376 } else if (wait
&& test_opt(sb
, BARRIER
))
6377 needs_barrier
= true;
6378 if (needs_barrier
) {
6380 err
= blkdev_issue_flush(sb
->s_bdev
);
6389 * LVM calls this function before a (read-only) snapshot is created. This
6390 * gives us a chance to flush the journal completely and mark the fs clean.
6392 * Note that only this function cannot bring a filesystem to be in a clean
6393 * state independently. It relies on upper layer to stop all data & metadata
6396 static int ext4_freeze(struct super_block
*sb
)
6399 journal_t
*journal
= EXT4_SB(sb
)->s_journal
;
6402 /* Now we set up the journal barrier. */
6403 jbd2_journal_lock_updates(journal
);
6406 * Don't clear the needs_recovery flag if we failed to
6407 * flush the journal.
6409 error
= jbd2_journal_flush(journal
, 0);
6413 /* Journal blocked and flushed, clear needs_recovery flag. */
6414 ext4_clear_feature_journal_needs_recovery(sb
);
6415 if (ext4_orphan_file_empty(sb
))
6416 ext4_clear_feature_orphan_present(sb
);
6419 error
= ext4_commit_super(sb
);
6422 /* we rely on upper layer to stop further updates */
6423 jbd2_journal_unlock_updates(journal
);
6428 * Called by LVM after the snapshot is done. We need to reset the RECOVER
6429 * flag here, even though the filesystem is not technically dirty yet.
6431 static int ext4_unfreeze(struct super_block
*sb
)
6433 if (ext4_forced_shutdown(sb
))
6436 if (EXT4_SB(sb
)->s_journal
) {
6437 /* Reset the needs_recovery flag before the fs is unlocked. */
6438 ext4_set_feature_journal_needs_recovery(sb
);
6439 if (ext4_has_feature_orphan_file(sb
))
6440 ext4_set_feature_orphan_present(sb
);
6443 ext4_commit_super(sb
);
6448 * Structure to save mount options for ext4_remount's benefit
6450 struct ext4_mount_options
{
6451 unsigned long s_mount_opt
;
6452 unsigned long s_mount_opt2
;
6455 unsigned long s_commit_interval
;
6456 u32 s_min_batch_time
, s_max_batch_time
;
6459 char *s_qf_names
[EXT4_MAXQUOTAS
];
6463 static int __ext4_remount(struct fs_context
*fc
, struct super_block
*sb
)
6465 struct ext4_fs_context
*ctx
= fc
->fs_private
;
6466 struct ext4_super_block
*es
;
6467 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
6468 unsigned long old_sb_flags
;
6469 struct ext4_mount_options old_opts
;
6474 int enable_quota
= 0;
6476 char *to_free
[EXT4_MAXQUOTAS
];
6480 /* Store the original options */
6481 old_sb_flags
= sb
->s_flags
;
6482 old_opts
.s_mount_opt
= sbi
->s_mount_opt
;
6483 old_opts
.s_mount_opt2
= sbi
->s_mount_opt2
;
6484 old_opts
.s_resuid
= sbi
->s_resuid
;
6485 old_opts
.s_resgid
= sbi
->s_resgid
;
6486 old_opts
.s_commit_interval
= sbi
->s_commit_interval
;
6487 old_opts
.s_min_batch_time
= sbi
->s_min_batch_time
;
6488 old_opts
.s_max_batch_time
= sbi
->s_max_batch_time
;
6490 old_opts
.s_jquota_fmt
= sbi
->s_jquota_fmt
;
6491 for (i
= 0; i
< EXT4_MAXQUOTAS
; i
++)
6492 if (sbi
->s_qf_names
[i
]) {
6493 char *qf_name
= get_qf_name(sb
, sbi
, i
);
6495 old_opts
.s_qf_names
[i
] = kstrdup(qf_name
, GFP_KERNEL
);
6496 if (!old_opts
.s_qf_names
[i
]) {
6497 for (j
= 0; j
< i
; j
++)
6498 kfree(old_opts
.s_qf_names
[j
]);
6502 old_opts
.s_qf_names
[i
] = NULL
;
6504 if (!(ctx
->spec
& EXT4_SPEC_JOURNAL_IOPRIO
)) {
6505 if (sbi
->s_journal
&& sbi
->s_journal
->j_task
->io_context
)
6506 ctx
->journal_ioprio
=
6507 sbi
->s_journal
->j_task
->io_context
->ioprio
;
6509 ctx
->journal_ioprio
= DEFAULT_JOURNAL_IOPRIO
;
6513 if ((ctx
->spec
& EXT4_SPEC_s_stripe
) &&
6514 ext4_is_stripe_incompatible(sb
, ctx
->s_stripe
)) {
6515 ext4_msg(sb
, KERN_WARNING
,
6516 "stripe (%lu) is not aligned with cluster size (%u), "
6517 "stripe is disabled",
6518 ctx
->s_stripe
, sbi
->s_cluster_ratio
);
6523 * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
6524 * two calls to ext4_should_dioread_nolock() to return inconsistent
6525 * values, triggering WARN_ON in ext4_add_complete_io(). we grab
6526 * here s_writepages_rwsem to avoid race between writepages ops and
6529 alloc_ctx
= ext4_writepages_down_write(sb
);
6530 ext4_apply_options(fc
, sb
);
6531 ext4_writepages_up_write(sb
, alloc_ctx
);
6533 if ((old_opts
.s_mount_opt
& EXT4_MOUNT_JOURNAL_CHECKSUM
) ^
6534 test_opt(sb
, JOURNAL_CHECKSUM
)) {
6535 ext4_msg(sb
, KERN_ERR
, "changing journal_checksum "
6536 "during remount not supported; ignoring");
6537 sbi
->s_mount_opt
^= EXT4_MOUNT_JOURNAL_CHECKSUM
;
6540 if (test_opt(sb
, DATA_FLAGS
) == EXT4_MOUNT_JOURNAL_DATA
) {
6541 if (test_opt2(sb
, EXPLICIT_DELALLOC
)) {
6542 ext4_msg(sb
, KERN_ERR
, "can't mount with "
6543 "both data=journal and delalloc");
6547 if (test_opt(sb
, DIOREAD_NOLOCK
)) {
6548 ext4_msg(sb
, KERN_ERR
, "can't mount with "
6549 "both data=journal and dioread_nolock");
6553 } else if (test_opt(sb
, DATA_FLAGS
) == EXT4_MOUNT_ORDERED_DATA
) {
6554 if (test_opt(sb
, JOURNAL_ASYNC_COMMIT
)) {
6555 ext4_msg(sb
, KERN_ERR
, "can't mount with "
6556 "journal_async_commit in data=ordered mode");
6562 if ((sbi
->s_mount_opt
^ old_opts
.s_mount_opt
) & EXT4_MOUNT_NO_MBCACHE
) {
6563 ext4_msg(sb
, KERN_ERR
, "can't enable nombcache during remount");
6568 if ((old_opts
.s_mount_opt
& EXT4_MOUNT_DELALLOC
) &&
6569 !test_opt(sb
, DELALLOC
)) {
6570 ext4_msg(sb
, KERN_ERR
, "can't disable delalloc during remount");
6575 sb
->s_flags
= (sb
->s_flags
& ~SB_POSIXACL
) |
6576 (test_opt(sb
, POSIX_ACL
) ? SB_POSIXACL
: 0);
6580 if (sbi
->s_journal
) {
6581 ext4_init_journal_params(sb
, sbi
->s_journal
);
6582 set_task_ioprio(sbi
->s_journal
->j_task
, ctx
->journal_ioprio
);
6585 /* Flush outstanding errors before changing fs state */
6586 flush_work(&sbi
->s_sb_upd_work
);
6588 if ((bool)(fc
->sb_flags
& SB_RDONLY
) != sb_rdonly(sb
)) {
6589 if (ext4_forced_shutdown(sb
)) {
6594 if (fc
->sb_flags
& SB_RDONLY
) {
6595 err
= sync_filesystem(sb
);
6598 err
= dquot_suspend(sb
, -1);
6603 * First of all, the unconditional stuff we have to do
6604 * to disable replay of the journal when we next remount
6606 sb
->s_flags
|= SB_RDONLY
;
6609 * OK, test if we are remounting a valid rw partition
6610 * readonly, and if so set the rdonly flag and then
6611 * mark the partition as valid again.
6613 if (!(es
->s_state
& cpu_to_le16(EXT4_VALID_FS
)) &&
6614 (sbi
->s_mount_state
& EXT4_VALID_FS
))
6615 es
->s_state
= cpu_to_le16(sbi
->s_mount_state
);
6617 if (sbi
->s_journal
) {
6619 * We let remount-ro finish even if marking fs
6620 * as clean failed...
6622 ext4_mark_recovery_complete(sb
, es
);
6625 /* Make sure we can mount this feature set readwrite */
6626 if (ext4_has_feature_readonly(sb
) ||
6627 !ext4_feature_set_ok(sb
, 0)) {
6632 * Make sure the group descriptor checksums
6633 * are sane. If they aren't, refuse to remount r/w.
6635 for (g
= 0; g
< sbi
->s_groups_count
; g
++) {
6636 struct ext4_group_desc
*gdp
=
6637 ext4_get_group_desc(sb
, g
, NULL
);
6639 if (!ext4_group_desc_csum_verify(sb
, g
, gdp
)) {
6640 ext4_msg(sb
, KERN_ERR
,
6641 "ext4_remount: Checksum for group %u failed (%u!=%u)",
6642 g
, le16_to_cpu(ext4_group_desc_csum(sb
, g
, gdp
)),
6643 le16_to_cpu(gdp
->bg_checksum
));
6650 * If we have an unprocessed orphan list hanging
6651 * around from a previously readonly bdev mount,
6652 * require a full umount/remount for now.
6654 if (es
->s_last_orphan
|| !ext4_orphan_file_empty(sb
)) {
6655 ext4_msg(sb
, KERN_WARNING
, "Couldn't "
6656 "remount RDWR because of unprocessed "
6657 "orphan inode list. Please "
6658 "umount/remount instead");
6664 * Mounting a RDONLY partition read-write, so reread
6665 * and store the current valid flag. (It may have
6666 * been changed by e2fsck since we originally mounted
6669 if (sbi
->s_journal
) {
6670 err
= ext4_clear_journal_err(sb
, es
);
6674 sbi
->s_mount_state
= (le16_to_cpu(es
->s_state
) &
6677 err
= ext4_setup_super(sb
, es
, 0);
6681 sb
->s_flags
&= ~SB_RDONLY
;
6682 if (ext4_has_feature_mmp(sb
)) {
6683 err
= ext4_multi_mount_protect(sb
,
6684 le64_to_cpu(es
->s_mmp_block
));
6695 * Handle creation of system zone data early because it can fail.
6696 * Releasing of existing data is done when we are sure remount will
6699 if (test_opt(sb
, BLOCK_VALIDITY
) && !sbi
->s_system_blks
) {
6700 err
= ext4_setup_system_zone(sb
);
6705 if (sbi
->s_journal
== NULL
&& !(old_sb_flags
& SB_RDONLY
)) {
6706 err
= ext4_commit_super(sb
);
6713 if (sb_any_quota_suspended(sb
))
6714 dquot_resume(sb
, -1);
6715 else if (ext4_has_feature_quota(sb
)) {
6716 err
= ext4_enable_quotas(sb
);
6721 /* Release old quota file names */
6722 for (i
= 0; i
< EXT4_MAXQUOTAS
; i
++)
6723 kfree(old_opts
.s_qf_names
[i
]);
6725 if (!test_opt(sb
, BLOCK_VALIDITY
) && sbi
->s_system_blks
)
6726 ext4_release_system_zone(sb
);
6729 * Reinitialize lazy itable initialization thread based on
6732 if (sb_rdonly(sb
) || !test_opt(sb
, INIT_INODE_TABLE
))
6733 ext4_unregister_li_request(sb
);
6735 ext4_group_t first_not_zeroed
;
6736 first_not_zeroed
= ext4_has_uninit_itable(sb
);
6737 ext4_register_li_request(sb
, first_not_zeroed
);
6740 if (!ext4_has_feature_mmp(sb
) || sb_rdonly(sb
))
6741 ext4_stop_mmpd(sbi
);
6744 * Handle aborting the filesystem as the last thing during remount to
6745 * avoid obsure errors during remount when some option changes fail to
6746 * apply due to shutdown filesystem.
6748 if (test_opt2(sb
, ABORT
))
6749 ext4_abort(sb
, ESHUTDOWN
, "Abort forced by user");
6755 * If there was a failing r/w to ro transition, we may need to
6758 if (sb_rdonly(sb
) && !(old_sb_flags
& SB_RDONLY
) &&
6759 sb_any_quota_suspended(sb
))
6760 dquot_resume(sb
, -1);
6762 alloc_ctx
= ext4_writepages_down_write(sb
);
6763 sb
->s_flags
= old_sb_flags
;
6764 sbi
->s_mount_opt
= old_opts
.s_mount_opt
;
6765 sbi
->s_mount_opt2
= old_opts
.s_mount_opt2
;
6766 sbi
->s_resuid
= old_opts
.s_resuid
;
6767 sbi
->s_resgid
= old_opts
.s_resgid
;
6768 sbi
->s_commit_interval
= old_opts
.s_commit_interval
;
6769 sbi
->s_min_batch_time
= old_opts
.s_min_batch_time
;
6770 sbi
->s_max_batch_time
= old_opts
.s_max_batch_time
;
6771 ext4_writepages_up_write(sb
, alloc_ctx
);
6773 if (!test_opt(sb
, BLOCK_VALIDITY
) && sbi
->s_system_blks
)
6774 ext4_release_system_zone(sb
);
6776 sbi
->s_jquota_fmt
= old_opts
.s_jquota_fmt
;
6777 for (i
= 0; i
< EXT4_MAXQUOTAS
; i
++) {
6778 to_free
[i
] = get_qf_name(sb
, sbi
, i
);
6779 rcu_assign_pointer(sbi
->s_qf_names
[i
], old_opts
.s_qf_names
[i
]);
6782 for (i
= 0; i
< EXT4_MAXQUOTAS
; i
++)
6785 if (!ext4_has_feature_mmp(sb
) || sb_rdonly(sb
))
6786 ext4_stop_mmpd(sbi
);
6790 static int ext4_reconfigure(struct fs_context
*fc
)
6792 struct super_block
*sb
= fc
->root
->d_sb
;
6795 fc
->s_fs_info
= EXT4_SB(sb
);
6797 ret
= ext4_check_opt_consistency(fc
, sb
);
6801 ret
= __ext4_remount(fc
, sb
);
6805 ext4_msg(sb
, KERN_INFO
, "re-mounted %pU %s. Quota mode: %s.",
6806 &sb
->s_uuid
, sb_rdonly(sb
) ? "ro" : "r/w",
6807 ext4_quota_mode(sb
));
6813 static int ext4_statfs_project(struct super_block
*sb
,
6814 kprojid_t projid
, struct kstatfs
*buf
)
6817 struct dquot
*dquot
;
6821 qid
= make_kqid_projid(projid
);
6822 dquot
= dqget(sb
, qid
);
6824 return PTR_ERR(dquot
);
6825 spin_lock(&dquot
->dq_dqb_lock
);
6827 limit
= min_not_zero(dquot
->dq_dqb
.dqb_bsoftlimit
,
6828 dquot
->dq_dqb
.dqb_bhardlimit
);
6829 limit
>>= sb
->s_blocksize_bits
;
6831 if (limit
&& buf
->f_blocks
> limit
) {
6832 curblock
= (dquot
->dq_dqb
.dqb_curspace
+
6833 dquot
->dq_dqb
.dqb_rsvspace
) >> sb
->s_blocksize_bits
;
6834 buf
->f_blocks
= limit
;
6835 buf
->f_bfree
= buf
->f_bavail
=
6836 (buf
->f_blocks
> curblock
) ?
6837 (buf
->f_blocks
- curblock
) : 0;
6840 limit
= min_not_zero(dquot
->dq_dqb
.dqb_isoftlimit
,
6841 dquot
->dq_dqb
.dqb_ihardlimit
);
6842 if (limit
&& buf
->f_files
> limit
) {
6843 buf
->f_files
= limit
;
6845 (buf
->f_files
> dquot
->dq_dqb
.dqb_curinodes
) ?
6846 (buf
->f_files
- dquot
->dq_dqb
.dqb_curinodes
) : 0;
6849 spin_unlock(&dquot
->dq_dqb_lock
);
6855 static int ext4_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
6857 struct super_block
*sb
= dentry
->d_sb
;
6858 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
6859 struct ext4_super_block
*es
= sbi
->s_es
;
6860 ext4_fsblk_t overhead
= 0, resv_blocks
;
6862 resv_blocks
= EXT4_C2B(sbi
, atomic64_read(&sbi
->s_resv_clusters
));
6864 if (!test_opt(sb
, MINIX_DF
))
6865 overhead
= sbi
->s_overhead
;
6867 buf
->f_type
= EXT4_SUPER_MAGIC
;
6868 buf
->f_bsize
= sb
->s_blocksize
;
6869 buf
->f_blocks
= ext4_blocks_count(es
) - EXT4_C2B(sbi
, overhead
);
6870 bfree
= percpu_counter_sum_positive(&sbi
->s_freeclusters_counter
) -
6871 percpu_counter_sum_positive(&sbi
->s_dirtyclusters_counter
);
6872 /* prevent underflow in case that few free space is available */
6873 buf
->f_bfree
= EXT4_C2B(sbi
, max_t(s64
, bfree
, 0));
6874 buf
->f_bavail
= buf
->f_bfree
-
6875 (ext4_r_blocks_count(es
) + resv_blocks
);
6876 if (buf
->f_bfree
< (ext4_r_blocks_count(es
) + resv_blocks
))
6878 buf
->f_files
= le32_to_cpu(es
->s_inodes_count
);
6879 buf
->f_ffree
= percpu_counter_sum_positive(&sbi
->s_freeinodes_counter
);
6880 buf
->f_namelen
= EXT4_NAME_LEN
;
6881 buf
->f_fsid
= uuid_to_fsid(es
->s_uuid
);
6884 if (ext4_test_inode_flag(dentry
->d_inode
, EXT4_INODE_PROJINHERIT
) &&
6885 sb_has_quota_limits_enabled(sb
, PRJQUOTA
))
6886 ext4_statfs_project(sb
, EXT4_I(dentry
->d_inode
)->i_projid
, buf
);
6895 * Helper functions so that transaction is started before we acquire dqio_sem
6896 * to keep correct lock ordering of transaction > dqio_sem
6898 static inline struct inode
*dquot_to_inode(struct dquot
*dquot
)
6900 return sb_dqopt(dquot
->dq_sb
)->files
[dquot
->dq_id
.type
];
6903 static int ext4_write_dquot(struct dquot
*dquot
)
6907 struct inode
*inode
;
6909 inode
= dquot_to_inode(dquot
);
6910 handle
= ext4_journal_start(inode
, EXT4_HT_QUOTA
,
6911 EXT4_QUOTA_TRANS_BLOCKS(dquot
->dq_sb
));
6913 return PTR_ERR(handle
);
6914 ret
= dquot_commit(dquot
);
6916 ext4_error_err(dquot
->dq_sb
, -ret
,
6917 "Failed to commit dquot type %d",
6919 err
= ext4_journal_stop(handle
);
6925 static int ext4_acquire_dquot(struct dquot
*dquot
)
6930 handle
= ext4_journal_start(dquot_to_inode(dquot
), EXT4_HT_QUOTA
,
6931 EXT4_QUOTA_INIT_BLOCKS(dquot
->dq_sb
));
6933 return PTR_ERR(handle
);
6934 ret
= dquot_acquire(dquot
);
6936 ext4_error_err(dquot
->dq_sb
, -ret
,
6937 "Failed to acquire dquot type %d",
6939 err
= ext4_journal_stop(handle
);
6945 static int ext4_release_dquot(struct dquot
*dquot
)
6950 handle
= ext4_journal_start(dquot_to_inode(dquot
), EXT4_HT_QUOTA
,
6951 EXT4_QUOTA_DEL_BLOCKS(dquot
->dq_sb
));
6952 if (IS_ERR(handle
)) {
6953 /* Release dquot anyway to avoid endless cycle in dqput() */
6954 dquot_release(dquot
);
6955 return PTR_ERR(handle
);
6957 ret
= dquot_release(dquot
);
6959 ext4_error_err(dquot
->dq_sb
, -ret
,
6960 "Failed to release dquot type %d",
6962 err
= ext4_journal_stop(handle
);
6968 static int ext4_mark_dquot_dirty(struct dquot
*dquot
)
6970 struct super_block
*sb
= dquot
->dq_sb
;
6972 if (ext4_is_quota_journalled(sb
)) {
6973 dquot_mark_dquot_dirty(dquot
);
6974 return ext4_write_dquot(dquot
);
6976 return dquot_mark_dquot_dirty(dquot
);
6980 static int ext4_write_info(struct super_block
*sb
, int type
)
6985 /* Data block + inode block */
6986 handle
= ext4_journal_start_sb(sb
, EXT4_HT_QUOTA
, 2);
6988 return PTR_ERR(handle
);
6989 ret
= dquot_commit_info(sb
, type
);
6990 err
= ext4_journal_stop(handle
);
6996 static void lockdep_set_quota_inode(struct inode
*inode
, int subclass
)
6998 struct ext4_inode_info
*ei
= EXT4_I(inode
);
7000 /* The first argument of lockdep_set_subclass has to be
7001 * *exactly* the same as the argument to init_rwsem() --- in
7002 * this case, in init_once() --- or lockdep gets unhappy
7003 * because the name of the lock is set using the
7004 * stringification of the argument to init_rwsem().
7006 (void) ei
; /* shut up clang warning if !CONFIG_LOCKDEP */
7007 lockdep_set_subclass(&ei
->i_data_sem
, subclass
);
7011 * Standard function to be called on quota_on
7013 static int ext4_quota_on(struct super_block
*sb
, int type
, int format_id
,
7014 const struct path
*path
)
7018 if (!test_opt(sb
, QUOTA
))
7021 /* Quotafile not on the same filesystem? */
7022 if (path
->dentry
->d_sb
!= sb
)
7025 /* Quota already enabled for this file? */
7026 if (IS_NOQUOTA(d_inode(path
->dentry
)))
7029 /* Journaling quota? */
7030 if (EXT4_SB(sb
)->s_qf_names
[type
]) {
7031 /* Quotafile not in fs root? */
7032 if (path
->dentry
->d_parent
!= sb
->s_root
)
7033 ext4_msg(sb
, KERN_WARNING
,
7034 "Quota file not on filesystem root. "
7035 "Journaled quota will not work");
7036 sb_dqopt(sb
)->flags
|= DQUOT_NOLIST_DIRTY
;
7039 * Clear the flag just in case mount options changed since
7042 sb_dqopt(sb
)->flags
&= ~DQUOT_NOLIST_DIRTY
;
7045 lockdep_set_quota_inode(path
->dentry
->d_inode
, I_DATA_SEM_QUOTA
);
7046 err
= dquot_quota_on(sb
, type
, format_id
, path
);
7048 struct inode
*inode
= d_inode(path
->dentry
);
7052 * Set inode flags to prevent userspace from messing with quota
7053 * files. If this fails, we return success anyway since quotas
7054 * are already enabled and this is not a hard failure.
7057 handle
= ext4_journal_start(inode
, EXT4_HT_QUOTA
, 1);
7060 EXT4_I(inode
)->i_flags
|= EXT4_NOATIME_FL
| EXT4_IMMUTABLE_FL
;
7061 inode_set_flags(inode
, S_NOATIME
| S_IMMUTABLE
,
7062 S_NOATIME
| S_IMMUTABLE
);
7063 err
= ext4_mark_inode_dirty(handle
, inode
);
7064 ext4_journal_stop(handle
);
7066 inode_unlock(inode
);
7068 dquot_quota_off(sb
, type
);
7071 lockdep_set_quota_inode(path
->dentry
->d_inode
,
7076 static inline bool ext4_check_quota_inum(int type
, unsigned long qf_inum
)
7080 return qf_inum
== EXT4_USR_QUOTA_INO
;
7082 return qf_inum
== EXT4_GRP_QUOTA_INO
;
7084 return qf_inum
>= EXT4_GOOD_OLD_FIRST_INO
;
7090 static int ext4_quota_enable(struct super_block
*sb
, int type
, int format_id
,
7094 struct inode
*qf_inode
;
7095 unsigned long qf_inums
[EXT4_MAXQUOTAS
] = {
7096 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_usr_quota_inum
),
7097 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_grp_quota_inum
),
7098 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_prj_quota_inum
)
7101 BUG_ON(!ext4_has_feature_quota(sb
));
7103 if (!qf_inums
[type
])
7106 if (!ext4_check_quota_inum(type
, qf_inums
[type
])) {
7107 ext4_error(sb
, "Bad quota inum: %lu, type: %d",
7108 qf_inums
[type
], type
);
7112 qf_inode
= ext4_iget(sb
, qf_inums
[type
], EXT4_IGET_SPECIAL
);
7113 if (IS_ERR(qf_inode
)) {
7114 ext4_error(sb
, "Bad quota inode: %lu, type: %d",
7115 qf_inums
[type
], type
);
7116 return PTR_ERR(qf_inode
);
7119 /* Don't account quota for quota files to avoid recursion */
7120 qf_inode
->i_flags
|= S_NOQUOTA
;
7121 lockdep_set_quota_inode(qf_inode
, I_DATA_SEM_QUOTA
);
7122 err
= dquot_load_quota_inode(qf_inode
, type
, format_id
, flags
);
7124 lockdep_set_quota_inode(qf_inode
, I_DATA_SEM_NORMAL
);
7130 /* Enable usage tracking for all quota types. */
7131 int ext4_enable_quotas(struct super_block
*sb
)
7134 unsigned long qf_inums
[EXT4_MAXQUOTAS
] = {
7135 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_usr_quota_inum
),
7136 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_grp_quota_inum
),
7137 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_prj_quota_inum
)
7139 bool quota_mopt
[EXT4_MAXQUOTAS
] = {
7140 test_opt(sb
, USRQUOTA
),
7141 test_opt(sb
, GRPQUOTA
),
7142 test_opt(sb
, PRJQUOTA
),
7145 sb_dqopt(sb
)->flags
|= DQUOT_QUOTA_SYS_FILE
| DQUOT_NOLIST_DIRTY
;
7146 for (type
= 0; type
< EXT4_MAXQUOTAS
; type
++) {
7147 if (qf_inums
[type
]) {
7148 err
= ext4_quota_enable(sb
, type
, QFMT_VFS_V1
,
7149 DQUOT_USAGE_ENABLED
|
7150 (quota_mopt
[type
] ? DQUOT_LIMITS_ENABLED
: 0));
7153 "Failed to enable quota tracking "
7154 "(type=%d, err=%d, ino=%lu). "
7155 "Please run e2fsck to fix.", type
,
7156 err
, qf_inums
[type
]);
7158 ext4_quotas_off(sb
, type
);
7166 static int ext4_quota_off(struct super_block
*sb
, int type
)
7168 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
7172 /* Force all delayed allocation blocks to be allocated.
7173 * Caller already holds s_umount sem */
7174 if (test_opt(sb
, DELALLOC
))
7175 sync_filesystem(sb
);
7177 if (!inode
|| !igrab(inode
))
7180 err
= dquot_quota_off(sb
, type
);
7181 if (err
|| ext4_has_feature_quota(sb
))
7184 * When the filesystem was remounted read-only first, we cannot cleanup
7185 * inode flags here. Bad luck but people should be using QUOTA feature
7186 * these days anyway.
7193 * Update modification times of quota files when userspace can
7194 * start looking at them. If we fail, we return success anyway since
7195 * this is not a hard failure and quotas are already disabled.
7197 handle
= ext4_journal_start(inode
, EXT4_HT_QUOTA
, 1);
7198 if (IS_ERR(handle
)) {
7199 err
= PTR_ERR(handle
);
7202 EXT4_I(inode
)->i_flags
&= ~(EXT4_NOATIME_FL
| EXT4_IMMUTABLE_FL
);
7203 inode_set_flags(inode
, 0, S_NOATIME
| S_IMMUTABLE
);
7204 inode_set_mtime_to_ts(inode
, inode_set_ctime_current(inode
));
7205 err
= ext4_mark_inode_dirty(handle
, inode
);
7206 ext4_journal_stop(handle
);
7208 inode_unlock(inode
);
7210 lockdep_set_quota_inode(inode
, I_DATA_SEM_NORMAL
);
7214 return dquot_quota_off(sb
, type
);
7217 /* Read data from quotafile - avoid pagecache and such because we cannot afford
7218 * acquiring the locks... As quota files are never truncated and quota code
7219 * itself serializes the operations (and no one else should touch the files)
7220 * we don't have to be afraid of races */
7221 static ssize_t
ext4_quota_read(struct super_block
*sb
, int type
, char *data
,
7222 size_t len
, loff_t off
)
7224 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
7225 ext4_lblk_t blk
= off
>> EXT4_BLOCK_SIZE_BITS(sb
);
7226 int offset
= off
& (sb
->s_blocksize
- 1);
7229 struct buffer_head
*bh
;
7230 loff_t i_size
= i_size_read(inode
);
7234 if (off
+len
> i_size
)
7237 while (toread
> 0) {
7238 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
, toread
);
7239 bh
= ext4_bread(NULL
, inode
, blk
, 0);
7242 if (!bh
) /* A hole? */
7243 memset(data
, 0, tocopy
);
7245 memcpy(data
, bh
->b_data
+offset
, tocopy
);
7255 /* Write to quotafile (we know the transaction is already started and has
7256 * enough credits) */
7257 static ssize_t
ext4_quota_write(struct super_block
*sb
, int type
,
7258 const char *data
, size_t len
, loff_t off
)
7260 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
7261 ext4_lblk_t blk
= off
>> EXT4_BLOCK_SIZE_BITS(sb
);
7262 int err
= 0, err2
= 0, offset
= off
& (sb
->s_blocksize
- 1);
7264 struct buffer_head
*bh
;
7265 handle_t
*handle
= journal_current_handle();
7268 ext4_msg(sb
, KERN_WARNING
, "Quota write (off=%llu, len=%llu)"
7269 " cancelled because transaction is not started",
7270 (unsigned long long)off
, (unsigned long long)len
);
7274 * Since we account only one data block in transaction credits,
7275 * then it is impossible to cross a block boundary.
7277 if (sb
->s_blocksize
- offset
< len
) {
7278 ext4_msg(sb
, KERN_WARNING
, "Quota write (off=%llu, len=%llu)"
7279 " cancelled because not block aligned",
7280 (unsigned long long)off
, (unsigned long long)len
);
7285 bh
= ext4_bread(handle
, inode
, blk
,
7286 EXT4_GET_BLOCKS_CREATE
|
7287 EXT4_GET_BLOCKS_METADATA_NOFAIL
);
7288 } while (PTR_ERR(bh
) == -ENOSPC
&&
7289 ext4_should_retry_alloc(inode
->i_sb
, &retries
));
7294 BUFFER_TRACE(bh
, "get write access");
7295 err
= ext4_journal_get_write_access(handle
, sb
, bh
, EXT4_JTR_NONE
);
7301 memcpy(bh
->b_data
+offset
, data
, len
);
7302 flush_dcache_page(bh
->b_page
);
7304 err
= ext4_handle_dirty_metadata(handle
, NULL
, bh
);
7307 if (inode
->i_size
< off
+ len
) {
7308 i_size_write(inode
, off
+ len
);
7309 EXT4_I(inode
)->i_disksize
= inode
->i_size
;
7310 err2
= ext4_mark_inode_dirty(handle
, inode
);
7311 if (unlikely(err2
&& !err
))
7314 return err
? err
: len
;
7318 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
7319 static inline void register_as_ext2(void)
7321 int err
= register_filesystem(&ext2_fs_type
);
7324 "EXT4-fs: Unable to register as ext2 (%d)\n", err
);
7327 static inline void unregister_as_ext2(void)
7329 unregister_filesystem(&ext2_fs_type
);
7332 static inline int ext2_feature_set_ok(struct super_block
*sb
)
7334 if (ext4_has_unknown_ext2_incompat_features(sb
))
7338 if (ext4_has_unknown_ext2_ro_compat_features(sb
))
7343 static inline void register_as_ext2(void) { }
7344 static inline void unregister_as_ext2(void) { }
7345 static inline int ext2_feature_set_ok(struct super_block
*sb
) { return 0; }
7348 static inline void register_as_ext3(void)
7350 int err
= register_filesystem(&ext3_fs_type
);
7353 "EXT4-fs: Unable to register as ext3 (%d)\n", err
);
7356 static inline void unregister_as_ext3(void)
7358 unregister_filesystem(&ext3_fs_type
);
7361 static inline int ext3_feature_set_ok(struct super_block
*sb
)
7363 if (ext4_has_unknown_ext3_incompat_features(sb
))
7365 if (!ext4_has_feature_journal(sb
))
7369 if (ext4_has_unknown_ext3_ro_compat_features(sb
))
7374 static void ext4_kill_sb(struct super_block
*sb
)
7376 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
7377 struct file
*bdev_file
= sbi
? sbi
->s_journal_bdev_file
: NULL
;
7379 kill_block_super(sb
);
7382 bdev_fput(bdev_file
);
7385 static struct file_system_type ext4_fs_type
= {
7386 .owner
= THIS_MODULE
,
7388 .init_fs_context
= ext4_init_fs_context
,
7389 .parameters
= ext4_param_specs
,
7390 .kill_sb
= ext4_kill_sb
,
7391 .fs_flags
= FS_REQUIRES_DEV
| FS_ALLOW_IDMAP
| FS_MGTIME
,
7393 MODULE_ALIAS_FS("ext4");
7395 /* Shared across all ext4 file systems */
7396 wait_queue_head_t ext4__ioend_wq
[EXT4_WQ_HASH_SZ
];
7398 static int __init
ext4_init_fs(void)
7402 ratelimit_state_init(&ext4_mount_msg_ratelimit
, 30 * HZ
, 64);
7403 ext4_li_info
= NULL
;
7405 /* Build-time check for flags consistency */
7406 ext4_check_flag_values();
7408 for (i
= 0; i
< EXT4_WQ_HASH_SZ
; i
++)
7409 init_waitqueue_head(&ext4__ioend_wq
[i
]);
7411 err
= ext4_init_es();
7415 err
= ext4_init_pending();
7419 err
= ext4_init_post_read_processing();
7423 err
= ext4_init_pageio();
7427 err
= ext4_init_system_zone();
7431 err
= ext4_init_sysfs();
7435 err
= ext4_init_mballoc();
7438 err
= init_inodecache();
7442 err
= ext4_fc_init_dentry_cache();
7448 err
= register_filesystem(&ext4_fs_type
);
7454 unregister_as_ext2();
7455 unregister_as_ext3();
7456 ext4_fc_destroy_dentry_cache();
7458 destroy_inodecache();
7460 ext4_exit_mballoc();
7464 ext4_exit_system_zone();
7468 ext4_exit_post_read_processing();
7470 ext4_exit_pending();
7477 static void __exit
ext4_exit_fs(void)
7479 ext4_destroy_lazyinit_thread();
7480 unregister_as_ext2();
7481 unregister_as_ext3();
7482 unregister_filesystem(&ext4_fs_type
);
7483 ext4_fc_destroy_dentry_cache();
7484 destroy_inodecache();
7485 ext4_exit_mballoc();
7487 ext4_exit_system_zone();
7489 ext4_exit_post_read_processing();
7491 ext4_exit_pending();
7494 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
7495 MODULE_DESCRIPTION("Fourth Extended Filesystem");
7496 MODULE_LICENSE("GPL");
7497 MODULE_SOFTDEP("pre: crc32c");
7498 module_init(ext4_init_fs
)
7499 module_exit(ext4_exit_fs
)