1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
7 #include <linux/module.h>
8 #include <linux/buffer_head.h>
9 #include <linux/statfs.h>
10 #include <linux/parser.h>
11 #include <linux/seq_file.h>
12 #include <linux/crc32c.h>
13 #include <linux/fs_context.h>
14 #include <linux/fs_parser.h>
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/erofs.h>
20 static struct kmem_cache
*erofs_inode_cachep __read_mostly
;
22 void _erofs_err(struct super_block
*sb
, const char *function
,
33 pr_err("(device %s): %s: %pV", sb
->s_id
, function
, &vaf
);
37 void _erofs_info(struct super_block
*sb
, const char *function
,
48 pr_info("(device %s): %pV", sb
->s_id
, &vaf
);
52 static int erofs_superblock_csum_verify(struct super_block
*sb
, void *sbdata
)
54 struct erofs_super_block
*dsb
;
55 u32 expected_crc
, crc
;
57 dsb
= kmemdup(sbdata
+ EROFS_SUPER_OFFSET
,
58 EROFS_BLKSIZ
- EROFS_SUPER_OFFSET
, GFP_KERNEL
);
62 expected_crc
= le32_to_cpu(dsb
->checksum
);
64 /* to allow for x86 boot sectors and other oddities. */
65 crc
= crc32c(~0, dsb
, EROFS_BLKSIZ
- EROFS_SUPER_OFFSET
);
68 if (crc
!= expected_crc
) {
69 erofs_err(sb
, "invalid checksum 0x%08x, 0x%08x expected",
76 static void erofs_inode_init_once(void *ptr
)
78 struct erofs_inode
*vi
= ptr
;
80 inode_init_once(&vi
->vfs_inode
);
83 static struct inode
*erofs_alloc_inode(struct super_block
*sb
)
85 struct erofs_inode
*vi
=
86 kmem_cache_alloc(erofs_inode_cachep
, GFP_KERNEL
);
91 /* zero out everything except vfs_inode */
92 memset(vi
, 0, offsetof(struct erofs_inode
, vfs_inode
));
93 return &vi
->vfs_inode
;
96 static void erofs_free_inode(struct inode
*inode
)
98 struct erofs_inode
*vi
= EROFS_I(inode
);
100 /* be careful of RCU symlink path */
101 if (inode
->i_op
== &erofs_fast_symlink_iops
)
102 kfree(inode
->i_link
);
103 kfree(vi
->xattr_shared_xattrs
);
105 kmem_cache_free(erofs_inode_cachep
, vi
);
108 static bool check_layout_compatibility(struct super_block
*sb
,
109 struct erofs_super_block
*dsb
)
111 const unsigned int feature
= le32_to_cpu(dsb
->feature_incompat
);
113 EROFS_SB(sb
)->feature_incompat
= feature
;
115 /* check if current kernel meets all mandatory requirements */
116 if (feature
& (~EROFS_ALL_FEATURE_INCOMPAT
)) {
118 "unidentified incompatible feature %x, please upgrade kernel version",
119 feature
& ~EROFS_ALL_FEATURE_INCOMPAT
);
125 static int erofs_read_superblock(struct super_block
*sb
)
127 struct erofs_sb_info
*sbi
;
129 struct erofs_super_block
*dsb
;
130 unsigned int blkszbits
;
134 page
= read_mapping_page(sb
->s_bdev
->bd_inode
->i_mapping
, 0, NULL
);
136 erofs_err(sb
, "cannot read erofs superblock");
137 return PTR_ERR(page
);
143 dsb
= (struct erofs_super_block
*)(data
+ EROFS_SUPER_OFFSET
);
146 if (le32_to_cpu(dsb
->magic
) != EROFS_SUPER_MAGIC_V1
) {
147 erofs_err(sb
, "cannot find valid erofs superblock");
151 sbi
->feature_compat
= le32_to_cpu(dsb
->feature_compat
);
152 if (sbi
->feature_compat
& EROFS_FEATURE_COMPAT_SB_CHKSUM
) {
153 ret
= erofs_superblock_csum_verify(sb
, data
);
158 blkszbits
= dsb
->blkszbits
;
159 /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
160 if (blkszbits
!= LOG_BLOCK_SIZE
) {
161 erofs_err(sb
, "blksize %u isn't supported on this platform",
166 if (!check_layout_compatibility(sb
, dsb
))
169 sbi
->blocks
= le32_to_cpu(dsb
->blocks
);
170 sbi
->meta_blkaddr
= le32_to_cpu(dsb
->meta_blkaddr
);
171 #ifdef CONFIG_EROFS_FS_XATTR
172 sbi
->xattr_blkaddr
= le32_to_cpu(dsb
->xattr_blkaddr
);
174 sbi
->islotbits
= ilog2(sizeof(struct erofs_inode_compact
));
175 sbi
->root_nid
= le16_to_cpu(dsb
->root_nid
);
176 sbi
->inos
= le64_to_cpu(dsb
->inos
);
178 sbi
->build_time
= le64_to_cpu(dsb
->build_time
);
179 sbi
->build_time_nsec
= le32_to_cpu(dsb
->build_time_nsec
);
181 memcpy(&sb
->s_uuid
, dsb
->uuid
, sizeof(dsb
->uuid
));
183 ret
= strscpy(sbi
->volume_name
, dsb
->volume_name
,
184 sizeof(dsb
->volume_name
));
185 if (ret
< 0) { /* -E2BIG */
186 erofs_err(sb
, "bad volume name without NIL terminator");
197 /* set up default EROFS parameters */
198 static void erofs_default_options(struct erofs_fs_context
*ctx
)
200 #ifdef CONFIG_EROFS_FS_ZIP
201 ctx
->cache_strategy
= EROFS_ZIP_CACHE_READAROUND
;
202 ctx
->max_sync_decompress_pages
= 3;
204 #ifdef CONFIG_EROFS_FS_XATTR
205 set_opt(ctx
, XATTR_USER
);
207 #ifdef CONFIG_EROFS_FS_POSIX_ACL
208 set_opt(ctx
, POSIX_ACL
);
221 static const struct constant_table erofs_param_cache_strategy
[] = {
222 {"disabled", EROFS_ZIP_CACHE_DISABLED
},
223 {"readahead", EROFS_ZIP_CACHE_READAHEAD
},
224 {"readaround", EROFS_ZIP_CACHE_READAROUND
},
228 static const struct fs_parameter_spec erofs_fs_parameters
[] = {
229 fsparam_flag_no("user_xattr", Opt_user_xattr
),
230 fsparam_flag_no("acl", Opt_acl
),
231 fsparam_enum("cache_strategy", Opt_cache_strategy
,
232 erofs_param_cache_strategy
),
236 static int erofs_fc_parse_param(struct fs_context
*fc
,
237 struct fs_parameter
*param
)
239 struct erofs_fs_context
*ctx __maybe_unused
= fc
->fs_private
;
240 struct fs_parse_result result
;
243 opt
= fs_parse(fc
, erofs_fs_parameters
, param
, &result
);
249 #ifdef CONFIG_EROFS_FS_XATTR
251 set_opt(ctx
, XATTR_USER
);
253 clear_opt(ctx
, XATTR_USER
);
255 errorfc(fc
, "{,no}user_xattr options not supported");
259 #ifdef CONFIG_EROFS_FS_POSIX_ACL
261 set_opt(ctx
, POSIX_ACL
);
263 clear_opt(ctx
, POSIX_ACL
);
265 errorfc(fc
, "{,no}acl options not supported");
268 case Opt_cache_strategy
:
269 #ifdef CONFIG_EROFS_FS_ZIP
270 ctx
->cache_strategy
= result
.uint_32
;
272 errorfc(fc
, "compression not supported, cache_strategy ignored");
281 #ifdef CONFIG_EROFS_FS_ZIP
282 static const struct address_space_operations managed_cache_aops
;
284 static int erofs_managed_cache_releasepage(struct page
*page
, gfp_t gfp_mask
)
286 int ret
= 1; /* 0 - busy */
287 struct address_space
*const mapping
= page
->mapping
;
289 DBG_BUGON(!PageLocked(page
));
290 DBG_BUGON(mapping
->a_ops
!= &managed_cache_aops
);
292 if (PagePrivate(page
))
293 ret
= erofs_try_to_free_cached_page(mapping
, page
);
298 static void erofs_managed_cache_invalidatepage(struct page
*page
,
302 const unsigned int stop
= length
+ offset
;
304 DBG_BUGON(!PageLocked(page
));
306 /* Check for potential overflow in debug mode */
307 DBG_BUGON(stop
> PAGE_SIZE
|| stop
< length
);
309 if (offset
== 0 && stop
== PAGE_SIZE
)
310 while (!erofs_managed_cache_releasepage(page
, GFP_NOFS
))
314 static const struct address_space_operations managed_cache_aops
= {
315 .releasepage
= erofs_managed_cache_releasepage
,
316 .invalidatepage
= erofs_managed_cache_invalidatepage
,
319 static int erofs_init_managed_cache(struct super_block
*sb
)
321 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
322 struct inode
*const inode
= new_inode(sb
);
328 inode
->i_size
= OFFSET_MAX
;
330 inode
->i_mapping
->a_ops
= &managed_cache_aops
;
331 mapping_set_gfp_mask(inode
->i_mapping
,
332 GFP_NOFS
| __GFP_HIGHMEM
| __GFP_MOVABLE
);
333 sbi
->managed_cache
= inode
;
337 static int erofs_init_managed_cache(struct super_block
*sb
) { return 0; }
340 static int erofs_fc_fill_super(struct super_block
*sb
, struct fs_context
*fc
)
343 struct erofs_sb_info
*sbi
;
344 struct erofs_fs_context
*ctx
= fc
->fs_private
;
347 sb
->s_magic
= EROFS_SUPER_MAGIC
;
349 if (!sb_set_blocksize(sb
, EROFS_BLKSIZ
)) {
350 erofs_err(sb
, "failed to set erofs blksize");
354 sbi
= kzalloc(sizeof(*sbi
), GFP_KERNEL
);
359 err
= erofs_read_superblock(sb
);
363 sb
->s_flags
|= SB_RDONLY
| SB_NOATIME
;
364 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
367 sb
->s_op
= &erofs_sops
;
368 sb
->s_xattr
= erofs_xattr_handlers
;
370 if (test_opt(ctx
, POSIX_ACL
))
371 sb
->s_flags
|= SB_POSIXACL
;
373 sb
->s_flags
&= ~SB_POSIXACL
;
377 #ifdef CONFIG_EROFS_FS_ZIP
378 xa_init(&sbi
->managed_pslots
);
381 /* get the root inode */
382 inode
= erofs_iget(sb
, ROOT_NID(sbi
), true);
384 return PTR_ERR(inode
);
386 if (!S_ISDIR(inode
->i_mode
)) {
387 erofs_err(sb
, "rootino(nid %llu) is not a directory(i_mode %o)",
388 ROOT_NID(sbi
), inode
->i_mode
);
393 sb
->s_root
= d_make_root(inode
);
397 erofs_shrinker_register(sb
);
398 /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
399 err
= erofs_init_managed_cache(sb
);
403 erofs_info(sb
, "mounted with root inode @ nid %llu.", ROOT_NID(sbi
));
407 static int erofs_fc_get_tree(struct fs_context
*fc
)
409 return get_tree_bdev(fc
, erofs_fc_fill_super
);
412 static int erofs_fc_reconfigure(struct fs_context
*fc
)
414 struct super_block
*sb
= fc
->root
->d_sb
;
415 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
416 struct erofs_fs_context
*ctx
= fc
->fs_private
;
418 DBG_BUGON(!sb_rdonly(sb
));
420 if (test_opt(ctx
, POSIX_ACL
))
421 fc
->sb_flags
|= SB_POSIXACL
;
423 fc
->sb_flags
&= ~SB_POSIXACL
;
427 fc
->sb_flags
|= SB_RDONLY
;
431 static void erofs_fc_free(struct fs_context
*fc
)
433 kfree(fc
->fs_private
);
436 static const struct fs_context_operations erofs_context_ops
= {
437 .parse_param
= erofs_fc_parse_param
,
438 .get_tree
= erofs_fc_get_tree
,
439 .reconfigure
= erofs_fc_reconfigure
,
440 .free
= erofs_fc_free
,
443 static int erofs_init_fs_context(struct fs_context
*fc
)
445 fc
->fs_private
= kzalloc(sizeof(struct erofs_fs_context
), GFP_KERNEL
);
449 /* set default mount options */
450 erofs_default_options(fc
->fs_private
);
452 fc
->ops
= &erofs_context_ops
;
458 * could be triggered after deactivate_locked_super()
459 * is called, thus including umount and failed to initialize.
461 static void erofs_kill_sb(struct super_block
*sb
)
463 struct erofs_sb_info
*sbi
;
465 WARN_ON(sb
->s_magic
!= EROFS_SUPER_MAGIC
);
467 kill_block_super(sb
);
473 sb
->s_fs_info
= NULL
;
476 /* called when ->s_root is non-NULL */
477 static void erofs_put_super(struct super_block
*sb
)
479 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
483 erofs_shrinker_unregister(sb
);
484 #ifdef CONFIG_EROFS_FS_ZIP
485 iput(sbi
->managed_cache
);
486 sbi
->managed_cache
= NULL
;
490 static struct file_system_type erofs_fs_type
= {
491 .owner
= THIS_MODULE
,
493 .init_fs_context
= erofs_init_fs_context
,
494 .kill_sb
= erofs_kill_sb
,
495 .fs_flags
= FS_REQUIRES_DEV
,
497 MODULE_ALIAS_FS("erofs");
499 static int __init
erofs_module_init(void)
503 erofs_check_ondisk_layout_definitions();
505 erofs_inode_cachep
= kmem_cache_create("erofs_inode",
506 sizeof(struct erofs_inode
), 0,
507 SLAB_RECLAIM_ACCOUNT
,
508 erofs_inode_init_once
);
509 if (!erofs_inode_cachep
) {
514 err
= erofs_init_shrinker();
518 err
= z_erofs_init_zip_subsystem();
522 err
= register_filesystem(&erofs_fs_type
);
529 z_erofs_exit_zip_subsystem();
531 erofs_exit_shrinker();
533 kmem_cache_destroy(erofs_inode_cachep
);
538 static void __exit
erofs_module_exit(void)
540 unregister_filesystem(&erofs_fs_type
);
541 z_erofs_exit_zip_subsystem();
542 erofs_exit_shrinker();
544 /* Ensure all RCU free inodes are safe before cache is destroyed. */
546 kmem_cache_destroy(erofs_inode_cachep
);
549 /* get filesystem statistics */
550 static int erofs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
552 struct super_block
*sb
= dentry
->d_sb
;
553 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
554 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
556 buf
->f_type
= sb
->s_magic
;
557 buf
->f_bsize
= EROFS_BLKSIZ
;
558 buf
->f_blocks
= sbi
->blocks
;
559 buf
->f_bfree
= buf
->f_bavail
= 0;
561 buf
->f_files
= ULLONG_MAX
;
562 buf
->f_ffree
= ULLONG_MAX
- sbi
->inos
;
564 buf
->f_namelen
= EROFS_NAME_LEN
;
566 buf
->f_fsid
.val
[0] = (u32
)id
;
567 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
571 static int erofs_show_options(struct seq_file
*seq
, struct dentry
*root
)
573 struct erofs_sb_info
*sbi __maybe_unused
= EROFS_SB(root
->d_sb
);
574 struct erofs_fs_context
*ctx __maybe_unused
= &sbi
->ctx
;
576 #ifdef CONFIG_EROFS_FS_XATTR
577 if (test_opt(ctx
, XATTR_USER
))
578 seq_puts(seq
, ",user_xattr");
580 seq_puts(seq
, ",nouser_xattr");
582 #ifdef CONFIG_EROFS_FS_POSIX_ACL
583 if (test_opt(ctx
, POSIX_ACL
))
584 seq_puts(seq
, ",acl");
586 seq_puts(seq
, ",noacl");
588 #ifdef CONFIG_EROFS_FS_ZIP
589 if (ctx
->cache_strategy
== EROFS_ZIP_CACHE_DISABLED
)
590 seq_puts(seq
, ",cache_strategy=disabled");
591 else if (ctx
->cache_strategy
== EROFS_ZIP_CACHE_READAHEAD
)
592 seq_puts(seq
, ",cache_strategy=readahead");
593 else if (ctx
->cache_strategy
== EROFS_ZIP_CACHE_READAROUND
)
594 seq_puts(seq
, ",cache_strategy=readaround");
599 const struct super_operations erofs_sops
= {
600 .put_super
= erofs_put_super
,
601 .alloc_inode
= erofs_alloc_inode
,
602 .free_inode
= erofs_free_inode
,
603 .statfs
= erofs_statfs
,
604 .show_options
= erofs_show_options
,
607 module_init(erofs_module_init
);
608 module_exit(erofs_module_exit
);
610 MODULE_DESCRIPTION("Enhanced ROM File System");
611 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
612 MODULE_LICENSE("GPL");