1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
7 #include <linux/module.h>
8 #include <linux/buffer_head.h>
9 #include <linux/statfs.h>
10 #include <linux/parser.h>
11 #include <linux/seq_file.h>
12 #include <linux/crc32c.h>
13 #include <linux/fs_context.h>
14 #include <linux/fs_parser.h>
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/erofs.h>
20 static struct kmem_cache
*erofs_inode_cachep __read_mostly
;
22 void _erofs_err(struct super_block
*sb
, const char *function
,
33 pr_err("(device %s): %s: %pV", sb
->s_id
, function
, &vaf
);
37 void _erofs_info(struct super_block
*sb
, const char *function
,
48 pr_info("(device %s): %pV", sb
->s_id
, &vaf
);
52 static int erofs_superblock_csum_verify(struct super_block
*sb
, void *sbdata
)
54 struct erofs_super_block
*dsb
;
55 u32 expected_crc
, crc
;
57 dsb
= kmemdup(sbdata
+ EROFS_SUPER_OFFSET
,
58 EROFS_BLKSIZ
- EROFS_SUPER_OFFSET
, GFP_KERNEL
);
62 expected_crc
= le32_to_cpu(dsb
->checksum
);
64 /* to allow for x86 boot sectors and other oddities. */
65 crc
= crc32c(~0, dsb
, EROFS_BLKSIZ
- EROFS_SUPER_OFFSET
);
68 if (crc
!= expected_crc
) {
69 erofs_err(sb
, "invalid checksum 0x%08x, 0x%08x expected",
76 static void erofs_inode_init_once(void *ptr
)
78 struct erofs_inode
*vi
= ptr
;
80 inode_init_once(&vi
->vfs_inode
);
83 static struct inode
*erofs_alloc_inode(struct super_block
*sb
)
85 struct erofs_inode
*vi
=
86 kmem_cache_alloc(erofs_inode_cachep
, GFP_KERNEL
);
91 /* zero out everything except vfs_inode */
92 memset(vi
, 0, offsetof(struct erofs_inode
, vfs_inode
));
93 return &vi
->vfs_inode
;
96 static void erofs_free_inode(struct inode
*inode
)
98 struct erofs_inode
*vi
= EROFS_I(inode
);
100 /* be careful of RCU symlink path */
101 if (inode
->i_op
== &erofs_fast_symlink_iops
)
102 kfree(inode
->i_link
);
103 kfree(vi
->xattr_shared_xattrs
);
105 kmem_cache_free(erofs_inode_cachep
, vi
);
108 static bool check_layout_compatibility(struct super_block
*sb
,
109 struct erofs_super_block
*dsb
)
111 const unsigned int feature
= le32_to_cpu(dsb
->feature_incompat
);
113 EROFS_SB(sb
)->feature_incompat
= feature
;
115 /* check if current kernel meets all mandatory requirements */
116 if (feature
& (~EROFS_ALL_FEATURE_INCOMPAT
)) {
118 "unidentified incompatible feature %x, please upgrade kernel version",
119 feature
& ~EROFS_ALL_FEATURE_INCOMPAT
);
125 static int erofs_read_superblock(struct super_block
*sb
)
127 struct erofs_sb_info
*sbi
;
129 struct erofs_super_block
*dsb
;
130 unsigned int blkszbits
;
134 page
= read_mapping_page(sb
->s_bdev
->bd_inode
->i_mapping
, 0, NULL
);
136 erofs_err(sb
, "cannot read erofs superblock");
137 return PTR_ERR(page
);
143 dsb
= (struct erofs_super_block
*)(data
+ EROFS_SUPER_OFFSET
);
146 if (le32_to_cpu(dsb
->magic
) != EROFS_SUPER_MAGIC_V1
) {
147 erofs_err(sb
, "cannot find valid erofs superblock");
151 sbi
->feature_compat
= le32_to_cpu(dsb
->feature_compat
);
152 if (sbi
->feature_compat
& EROFS_FEATURE_COMPAT_SB_CHKSUM
) {
153 ret
= erofs_superblock_csum_verify(sb
, data
);
158 blkszbits
= dsb
->blkszbits
;
159 /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
160 if (blkszbits
!= LOG_BLOCK_SIZE
) {
161 erofs_err(sb
, "blksize %u isn't supported on this platform",
166 if (!check_layout_compatibility(sb
, dsb
))
169 sbi
->blocks
= le32_to_cpu(dsb
->blocks
);
170 sbi
->meta_blkaddr
= le32_to_cpu(dsb
->meta_blkaddr
);
171 #ifdef CONFIG_EROFS_FS_XATTR
172 sbi
->xattr_blkaddr
= le32_to_cpu(dsb
->xattr_blkaddr
);
174 sbi
->islotbits
= ilog2(sizeof(struct erofs_inode_compact
));
175 sbi
->root_nid
= le16_to_cpu(dsb
->root_nid
);
176 sbi
->inos
= le64_to_cpu(dsb
->inos
);
178 sbi
->build_time
= le64_to_cpu(dsb
->build_time
);
179 sbi
->build_time_nsec
= le32_to_cpu(dsb
->build_time_nsec
);
181 memcpy(&sb
->s_uuid
, dsb
->uuid
, sizeof(dsb
->uuid
));
183 ret
= strscpy(sbi
->volume_name
, dsb
->volume_name
,
184 sizeof(dsb
->volume_name
));
185 if (ret
< 0) { /* -E2BIG */
186 erofs_err(sb
, "bad volume name without NIL terminator");
197 /* set up default EROFS parameters */
198 static void erofs_default_options(struct erofs_fs_context
*ctx
)
200 #ifdef CONFIG_EROFS_FS_ZIP
201 ctx
->cache_strategy
= EROFS_ZIP_CACHE_READAROUND
;
202 ctx
->max_sync_decompress_pages
= 3;
204 #ifdef CONFIG_EROFS_FS_XATTR
205 set_opt(ctx
, XATTR_USER
);
207 #ifdef CONFIG_EROFS_FS_POSIX_ACL
208 set_opt(ctx
, POSIX_ACL
);
219 static const struct constant_table erofs_param_cache_strategy
[] = {
220 {"disabled", EROFS_ZIP_CACHE_DISABLED
},
221 {"readahead", EROFS_ZIP_CACHE_READAHEAD
},
222 {"readaround", EROFS_ZIP_CACHE_READAROUND
},
226 static const struct fs_parameter_spec erofs_fs_parameters
[] = {
227 fsparam_flag_no("user_xattr", Opt_user_xattr
),
228 fsparam_flag_no("acl", Opt_acl
),
229 fsparam_enum("cache_strategy", Opt_cache_strategy
,
230 erofs_param_cache_strategy
),
234 static int erofs_fc_parse_param(struct fs_context
*fc
,
235 struct fs_parameter
*param
)
237 struct erofs_fs_context
*ctx __maybe_unused
= fc
->fs_private
;
238 struct fs_parse_result result
;
241 opt
= fs_parse(fc
, erofs_fs_parameters
, param
, &result
);
247 #ifdef CONFIG_EROFS_FS_XATTR
249 set_opt(ctx
, XATTR_USER
);
251 clear_opt(ctx
, XATTR_USER
);
253 errorfc(fc
, "{,no}user_xattr options not supported");
257 #ifdef CONFIG_EROFS_FS_POSIX_ACL
259 set_opt(ctx
, POSIX_ACL
);
261 clear_opt(ctx
, POSIX_ACL
);
263 errorfc(fc
, "{,no}acl options not supported");
266 case Opt_cache_strategy
:
267 #ifdef CONFIG_EROFS_FS_ZIP
268 ctx
->cache_strategy
= result
.uint_32
;
270 errorfc(fc
, "compression not supported, cache_strategy ignored");
279 #ifdef CONFIG_EROFS_FS_ZIP
280 static const struct address_space_operations managed_cache_aops
;
282 static int erofs_managed_cache_releasepage(struct page
*page
, gfp_t gfp_mask
)
284 int ret
= 1; /* 0 - busy */
285 struct address_space
*const mapping
= page
->mapping
;
287 DBG_BUGON(!PageLocked(page
));
288 DBG_BUGON(mapping
->a_ops
!= &managed_cache_aops
);
290 if (PagePrivate(page
))
291 ret
= erofs_try_to_free_cached_page(mapping
, page
);
296 static void erofs_managed_cache_invalidatepage(struct page
*page
,
300 const unsigned int stop
= length
+ offset
;
302 DBG_BUGON(!PageLocked(page
));
304 /* Check for potential overflow in debug mode */
305 DBG_BUGON(stop
> PAGE_SIZE
|| stop
< length
);
307 if (offset
== 0 && stop
== PAGE_SIZE
)
308 while (!erofs_managed_cache_releasepage(page
, GFP_NOFS
))
312 static const struct address_space_operations managed_cache_aops
= {
313 .releasepage
= erofs_managed_cache_releasepage
,
314 .invalidatepage
= erofs_managed_cache_invalidatepage
,
317 static int erofs_init_managed_cache(struct super_block
*sb
)
319 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
320 struct inode
*const inode
= new_inode(sb
);
326 inode
->i_size
= OFFSET_MAX
;
328 inode
->i_mapping
->a_ops
= &managed_cache_aops
;
329 mapping_set_gfp_mask(inode
->i_mapping
,
330 GFP_NOFS
| __GFP_HIGHMEM
| __GFP_MOVABLE
);
331 sbi
->managed_cache
= inode
;
335 static int erofs_init_managed_cache(struct super_block
*sb
) { return 0; }
338 static int erofs_fc_fill_super(struct super_block
*sb
, struct fs_context
*fc
)
341 struct erofs_sb_info
*sbi
;
342 struct erofs_fs_context
*ctx
= fc
->fs_private
;
345 sb
->s_magic
= EROFS_SUPER_MAGIC
;
347 if (!sb_set_blocksize(sb
, EROFS_BLKSIZ
)) {
348 erofs_err(sb
, "failed to set erofs blksize");
352 sbi
= kzalloc(sizeof(*sbi
), GFP_KERNEL
);
357 err
= erofs_read_superblock(sb
);
361 sb
->s_flags
|= SB_RDONLY
| SB_NOATIME
;
362 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
365 sb
->s_op
= &erofs_sops
;
366 sb
->s_xattr
= erofs_xattr_handlers
;
368 if (test_opt(ctx
, POSIX_ACL
))
369 sb
->s_flags
|= SB_POSIXACL
;
371 sb
->s_flags
&= ~SB_POSIXACL
;
375 #ifdef CONFIG_EROFS_FS_ZIP
376 xa_init(&sbi
->managed_pslots
);
379 /* get the root inode */
380 inode
= erofs_iget(sb
, ROOT_NID(sbi
), true);
382 return PTR_ERR(inode
);
384 if (!S_ISDIR(inode
->i_mode
)) {
385 erofs_err(sb
, "rootino(nid %llu) is not a directory(i_mode %o)",
386 ROOT_NID(sbi
), inode
->i_mode
);
391 sb
->s_root
= d_make_root(inode
);
395 erofs_shrinker_register(sb
);
396 /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
397 err
= erofs_init_managed_cache(sb
);
401 erofs_info(sb
, "mounted with root inode @ nid %llu.", ROOT_NID(sbi
));
405 static int erofs_fc_get_tree(struct fs_context
*fc
)
407 return get_tree_bdev(fc
, erofs_fc_fill_super
);
410 static int erofs_fc_reconfigure(struct fs_context
*fc
)
412 struct super_block
*sb
= fc
->root
->d_sb
;
413 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
414 struct erofs_fs_context
*ctx
= fc
->fs_private
;
416 DBG_BUGON(!sb_rdonly(sb
));
418 if (test_opt(ctx
, POSIX_ACL
))
419 fc
->sb_flags
|= SB_POSIXACL
;
421 fc
->sb_flags
&= ~SB_POSIXACL
;
425 fc
->sb_flags
|= SB_RDONLY
;
429 static void erofs_fc_free(struct fs_context
*fc
)
431 kfree(fc
->fs_private
);
434 static const struct fs_context_operations erofs_context_ops
= {
435 .parse_param
= erofs_fc_parse_param
,
436 .get_tree
= erofs_fc_get_tree
,
437 .reconfigure
= erofs_fc_reconfigure
,
438 .free
= erofs_fc_free
,
441 static int erofs_init_fs_context(struct fs_context
*fc
)
443 fc
->fs_private
= kzalloc(sizeof(struct erofs_fs_context
), GFP_KERNEL
);
447 /* set default mount options */
448 erofs_default_options(fc
->fs_private
);
450 fc
->ops
= &erofs_context_ops
;
456 * could be triggered after deactivate_locked_super()
457 * is called, thus including umount and failed to initialize.
459 static void erofs_kill_sb(struct super_block
*sb
)
461 struct erofs_sb_info
*sbi
;
463 WARN_ON(sb
->s_magic
!= EROFS_SUPER_MAGIC
);
465 kill_block_super(sb
);
471 sb
->s_fs_info
= NULL
;
474 /* called when ->s_root is non-NULL */
475 static void erofs_put_super(struct super_block
*sb
)
477 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
481 erofs_shrinker_unregister(sb
);
482 #ifdef CONFIG_EROFS_FS_ZIP
483 iput(sbi
->managed_cache
);
484 sbi
->managed_cache
= NULL
;
488 static struct file_system_type erofs_fs_type
= {
489 .owner
= THIS_MODULE
,
491 .init_fs_context
= erofs_init_fs_context
,
492 .kill_sb
= erofs_kill_sb
,
493 .fs_flags
= FS_REQUIRES_DEV
,
495 MODULE_ALIAS_FS("erofs");
497 static int __init
erofs_module_init(void)
501 erofs_check_ondisk_layout_definitions();
503 erofs_inode_cachep
= kmem_cache_create("erofs_inode",
504 sizeof(struct erofs_inode
), 0,
505 SLAB_RECLAIM_ACCOUNT
,
506 erofs_inode_init_once
);
507 if (!erofs_inode_cachep
) {
512 err
= erofs_init_shrinker();
516 err
= z_erofs_init_zip_subsystem();
520 err
= register_filesystem(&erofs_fs_type
);
527 z_erofs_exit_zip_subsystem();
529 erofs_exit_shrinker();
531 kmem_cache_destroy(erofs_inode_cachep
);
536 static void __exit
erofs_module_exit(void)
538 unregister_filesystem(&erofs_fs_type
);
539 z_erofs_exit_zip_subsystem();
540 erofs_exit_shrinker();
542 /* Ensure all RCU free inodes are safe before cache is destroyed. */
544 kmem_cache_destroy(erofs_inode_cachep
);
547 /* get filesystem statistics */
548 static int erofs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
550 struct super_block
*sb
= dentry
->d_sb
;
551 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
552 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
554 buf
->f_type
= sb
->s_magic
;
555 buf
->f_bsize
= EROFS_BLKSIZ
;
556 buf
->f_blocks
= sbi
->blocks
;
557 buf
->f_bfree
= buf
->f_bavail
= 0;
559 buf
->f_files
= ULLONG_MAX
;
560 buf
->f_ffree
= ULLONG_MAX
- sbi
->inos
;
562 buf
->f_namelen
= EROFS_NAME_LEN
;
564 buf
->f_fsid
= u64_to_fsid(id
);
568 static int erofs_show_options(struct seq_file
*seq
, struct dentry
*root
)
570 struct erofs_sb_info
*sbi __maybe_unused
= EROFS_SB(root
->d_sb
);
571 struct erofs_fs_context
*ctx __maybe_unused
= &sbi
->ctx
;
573 #ifdef CONFIG_EROFS_FS_XATTR
574 if (test_opt(ctx
, XATTR_USER
))
575 seq_puts(seq
, ",user_xattr");
577 seq_puts(seq
, ",nouser_xattr");
579 #ifdef CONFIG_EROFS_FS_POSIX_ACL
580 if (test_opt(ctx
, POSIX_ACL
))
581 seq_puts(seq
, ",acl");
583 seq_puts(seq
, ",noacl");
585 #ifdef CONFIG_EROFS_FS_ZIP
586 if (ctx
->cache_strategy
== EROFS_ZIP_CACHE_DISABLED
)
587 seq_puts(seq
, ",cache_strategy=disabled");
588 else if (ctx
->cache_strategy
== EROFS_ZIP_CACHE_READAHEAD
)
589 seq_puts(seq
, ",cache_strategy=readahead");
590 else if (ctx
->cache_strategy
== EROFS_ZIP_CACHE_READAROUND
)
591 seq_puts(seq
, ",cache_strategy=readaround");
596 const struct super_operations erofs_sops
= {
597 .put_super
= erofs_put_super
,
598 .alloc_inode
= erofs_alloc_inode
,
599 .free_inode
= erofs_free_inode
,
600 .statfs
= erofs_statfs
,
601 .show_options
= erofs_show_options
,
604 module_init(erofs_module_init
);
605 module_exit(erofs_module_exit
);
607 MODULE_DESCRIPTION("Enhanced ROM File System");
608 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
609 MODULE_LICENSE("GPL");