1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
7 #include <linux/module.h>
8 #include <linux/buffer_head.h>
9 #include <linux/statfs.h>
10 #include <linux/parser.h>
11 #include <linux/seq_file.h>
12 #include <linux/crc32c.h>
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/erofs.h>
18 static struct kmem_cache
*erofs_inode_cachep __read_mostly
;
20 void _erofs_err(struct super_block
*sb
, const char *function
,
31 pr_err("(device %s): %s: %pV", sb
->s_id
, function
, &vaf
);
35 void _erofs_info(struct super_block
*sb
, const char *function
,
46 pr_info("(device %s): %pV", sb
->s_id
, &vaf
);
50 static int erofs_superblock_csum_verify(struct super_block
*sb
, void *sbdata
)
52 struct erofs_super_block
*dsb
;
53 u32 expected_crc
, crc
;
55 dsb
= kmemdup(sbdata
+ EROFS_SUPER_OFFSET
,
56 EROFS_BLKSIZ
- EROFS_SUPER_OFFSET
, GFP_KERNEL
);
60 expected_crc
= le32_to_cpu(dsb
->checksum
);
62 /* to allow for x86 boot sectors and other oddities. */
63 crc
= crc32c(~0, dsb
, EROFS_BLKSIZ
- EROFS_SUPER_OFFSET
);
66 if (crc
!= expected_crc
) {
67 erofs_err(sb
, "invalid checksum 0x%08x, 0x%08x expected",
74 static void erofs_inode_init_once(void *ptr
)
76 struct erofs_inode
*vi
= ptr
;
78 inode_init_once(&vi
->vfs_inode
);
81 static struct inode
*erofs_alloc_inode(struct super_block
*sb
)
83 struct erofs_inode
*vi
=
84 kmem_cache_alloc(erofs_inode_cachep
, GFP_KERNEL
);
89 /* zero out everything except vfs_inode */
90 memset(vi
, 0, offsetof(struct erofs_inode
, vfs_inode
));
91 return &vi
->vfs_inode
;
94 static void erofs_free_inode(struct inode
*inode
)
96 struct erofs_inode
*vi
= EROFS_I(inode
);
98 /* be careful of RCU symlink path */
99 if (inode
->i_op
== &erofs_fast_symlink_iops
)
100 kfree(inode
->i_link
);
101 kfree(vi
->xattr_shared_xattrs
);
103 kmem_cache_free(erofs_inode_cachep
, vi
);
106 static bool check_layout_compatibility(struct super_block
*sb
,
107 struct erofs_super_block
*dsb
)
109 const unsigned int feature
= le32_to_cpu(dsb
->feature_incompat
);
111 EROFS_SB(sb
)->feature_incompat
= feature
;
113 /* check if current kernel meets all mandatory requirements */
114 if (feature
& (~EROFS_ALL_FEATURE_INCOMPAT
)) {
116 "unidentified incompatible feature %x, please upgrade kernel version",
117 feature
& ~EROFS_ALL_FEATURE_INCOMPAT
);
123 static int erofs_read_superblock(struct super_block
*sb
)
125 struct erofs_sb_info
*sbi
;
127 struct erofs_super_block
*dsb
;
128 unsigned int blkszbits
;
132 page
= read_mapping_page(sb
->s_bdev
->bd_inode
->i_mapping
, 0, NULL
);
134 erofs_err(sb
, "cannot read erofs superblock");
135 return PTR_ERR(page
);
141 dsb
= (struct erofs_super_block
*)(data
+ EROFS_SUPER_OFFSET
);
144 if (le32_to_cpu(dsb
->magic
) != EROFS_SUPER_MAGIC_V1
) {
145 erofs_err(sb
, "cannot find valid erofs superblock");
149 sbi
->feature_compat
= le32_to_cpu(dsb
->feature_compat
);
150 if (sbi
->feature_compat
& EROFS_FEATURE_COMPAT_SB_CHKSUM
) {
151 ret
= erofs_superblock_csum_verify(sb
, data
);
156 blkszbits
= dsb
->blkszbits
;
157 /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
158 if (blkszbits
!= LOG_BLOCK_SIZE
) {
159 erofs_err(sb
, "blksize %u isn't supported on this platform",
164 if (!check_layout_compatibility(sb
, dsb
))
167 sbi
->blocks
= le32_to_cpu(dsb
->blocks
);
168 sbi
->meta_blkaddr
= le32_to_cpu(dsb
->meta_blkaddr
);
169 #ifdef CONFIG_EROFS_FS_XATTR
170 sbi
->xattr_blkaddr
= le32_to_cpu(dsb
->xattr_blkaddr
);
172 sbi
->islotbits
= ilog2(sizeof(struct erofs_inode_compact
));
173 sbi
->root_nid
= le16_to_cpu(dsb
->root_nid
);
174 sbi
->inos
= le64_to_cpu(dsb
->inos
);
176 sbi
->build_time
= le64_to_cpu(dsb
->build_time
);
177 sbi
->build_time_nsec
= le32_to_cpu(dsb
->build_time_nsec
);
179 memcpy(&sb
->s_uuid
, dsb
->uuid
, sizeof(dsb
->uuid
));
181 ret
= strscpy(sbi
->volume_name
, dsb
->volume_name
,
182 sizeof(dsb
->volume_name
));
183 if (ret
< 0) { /* -E2BIG */
184 erofs_err(sb
, "bad volume name without NIL terminator");
195 #ifdef CONFIG_EROFS_FS_ZIP
196 static int erofs_build_cache_strategy(struct super_block
*sb
,
199 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
200 const char *cs
= match_strdup(args
);
204 erofs_err(sb
, "Not enough memory to store cache strategy");
208 if (!strcmp(cs
, "disabled")) {
209 sbi
->cache_strategy
= EROFS_ZIP_CACHE_DISABLED
;
210 } else if (!strcmp(cs
, "readahead")) {
211 sbi
->cache_strategy
= EROFS_ZIP_CACHE_READAHEAD
;
212 } else if (!strcmp(cs
, "readaround")) {
213 sbi
->cache_strategy
= EROFS_ZIP_CACHE_READAROUND
;
215 erofs_err(sb
, "Unrecognized cache strategy \"%s\"", cs
);
222 static int erofs_build_cache_strategy(struct super_block
*sb
,
225 erofs_info(sb
, "EROFS compression is disabled, so cache strategy is ignored");
230 /* set up default EROFS parameters */
231 static void erofs_default_options(struct erofs_sb_info
*sbi
)
233 #ifdef CONFIG_EROFS_FS_ZIP
234 sbi
->cache_strategy
= EROFS_ZIP_CACHE_READAROUND
;
235 sbi
->max_sync_decompress_pages
= 3;
237 #ifdef CONFIG_EROFS_FS_XATTR
238 set_opt(sbi
, XATTR_USER
);
240 #ifdef CONFIG_EROFS_FS_POSIX_ACL
241 set_opt(sbi
, POSIX_ACL
);
254 static match_table_t erofs_tokens
= {
255 {Opt_user_xattr
, "user_xattr"},
256 {Opt_nouser_xattr
, "nouser_xattr"},
258 {Opt_noacl
, "noacl"},
259 {Opt_cache_strategy
, "cache_strategy=%s"},
263 static int erofs_parse_options(struct super_block
*sb
, char *options
)
265 substring_t args
[MAX_OPT_ARGS
];
272 while ((p
= strsep(&options
, ","))) {
278 args
[0].to
= args
[0].from
= NULL
;
279 token
= match_token(p
, erofs_tokens
, args
);
282 #ifdef CONFIG_EROFS_FS_XATTR
284 set_opt(EROFS_SB(sb
), XATTR_USER
);
286 case Opt_nouser_xattr
:
287 clear_opt(EROFS_SB(sb
), XATTR_USER
);
291 erofs_info(sb
, "user_xattr options not supported");
293 case Opt_nouser_xattr
:
294 erofs_info(sb
, "nouser_xattr options not supported");
297 #ifdef CONFIG_EROFS_FS_POSIX_ACL
299 set_opt(EROFS_SB(sb
), POSIX_ACL
);
302 clear_opt(EROFS_SB(sb
), POSIX_ACL
);
306 erofs_info(sb
, "acl options not supported");
309 erofs_info(sb
, "noacl options not supported");
312 case Opt_cache_strategy
:
313 err
= erofs_build_cache_strategy(sb
, args
);
318 erofs_err(sb
, "Unrecognized mount option \"%s\" or missing value", p
);
325 #ifdef CONFIG_EROFS_FS_ZIP
326 static const struct address_space_operations managed_cache_aops
;
328 static int erofs_managed_cache_releasepage(struct page
*page
, gfp_t gfp_mask
)
330 int ret
= 1; /* 0 - busy */
331 struct address_space
*const mapping
= page
->mapping
;
333 DBG_BUGON(!PageLocked(page
));
334 DBG_BUGON(mapping
->a_ops
!= &managed_cache_aops
);
336 if (PagePrivate(page
))
337 ret
= erofs_try_to_free_cached_page(mapping
, page
);
342 static void erofs_managed_cache_invalidatepage(struct page
*page
,
346 const unsigned int stop
= length
+ offset
;
348 DBG_BUGON(!PageLocked(page
));
350 /* Check for potential overflow in debug mode */
351 DBG_BUGON(stop
> PAGE_SIZE
|| stop
< length
);
353 if (offset
== 0 && stop
== PAGE_SIZE
)
354 while (!erofs_managed_cache_releasepage(page
, GFP_NOFS
))
358 static const struct address_space_operations managed_cache_aops
= {
359 .releasepage
= erofs_managed_cache_releasepage
,
360 .invalidatepage
= erofs_managed_cache_invalidatepage
,
363 static int erofs_init_managed_cache(struct super_block
*sb
)
365 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
366 struct inode
*const inode
= new_inode(sb
);
372 inode
->i_size
= OFFSET_MAX
;
374 inode
->i_mapping
->a_ops
= &managed_cache_aops
;
375 mapping_set_gfp_mask(inode
->i_mapping
,
376 GFP_NOFS
| __GFP_HIGHMEM
| __GFP_MOVABLE
);
377 sbi
->managed_cache
= inode
;
381 static int erofs_init_managed_cache(struct super_block
*sb
) { return 0; }
384 static int erofs_fill_super(struct super_block
*sb
, void *data
, int silent
)
387 struct erofs_sb_info
*sbi
;
390 sb
->s_magic
= EROFS_SUPER_MAGIC
;
392 if (!sb_set_blocksize(sb
, EROFS_BLKSIZ
)) {
393 erofs_err(sb
, "failed to set erofs blksize");
397 sbi
= kzalloc(sizeof(*sbi
), GFP_KERNEL
);
402 err
= erofs_read_superblock(sb
);
406 sb
->s_flags
|= SB_RDONLY
| SB_NOATIME
;
407 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
410 sb
->s_op
= &erofs_sops
;
412 #ifdef CONFIG_EROFS_FS_XATTR
413 sb
->s_xattr
= erofs_xattr_handlers
;
415 /* set erofs default mount options */
416 erofs_default_options(sbi
);
418 err
= erofs_parse_options(sb
, data
);
422 if (test_opt(sbi
, POSIX_ACL
))
423 sb
->s_flags
|= SB_POSIXACL
;
425 sb
->s_flags
&= ~SB_POSIXACL
;
427 #ifdef CONFIG_EROFS_FS_ZIP
428 INIT_RADIX_TREE(&sbi
->workstn_tree
, GFP_ATOMIC
);
431 /* get the root inode */
432 inode
= erofs_iget(sb
, ROOT_NID(sbi
), true);
434 return PTR_ERR(inode
);
436 if (!S_ISDIR(inode
->i_mode
)) {
437 erofs_err(sb
, "rootino(nid %llu) is not a directory(i_mode %o)",
438 ROOT_NID(sbi
), inode
->i_mode
);
443 sb
->s_root
= d_make_root(inode
);
447 erofs_shrinker_register(sb
);
448 /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
449 err
= erofs_init_managed_cache(sb
);
453 erofs_info(sb
, "mounted with opts: %s, root inode @ nid %llu.",
454 (char *)data
, ROOT_NID(sbi
));
458 static struct dentry
*erofs_mount(struct file_system_type
*fs_type
, int flags
,
459 const char *dev_name
, void *data
)
461 return mount_bdev(fs_type
, flags
, dev_name
, data
, erofs_fill_super
);
465 * could be triggered after deactivate_locked_super()
466 * is called, thus including umount and failed to initialize.
468 static void erofs_kill_sb(struct super_block
*sb
)
470 struct erofs_sb_info
*sbi
;
472 WARN_ON(sb
->s_magic
!= EROFS_SUPER_MAGIC
);
474 kill_block_super(sb
);
480 sb
->s_fs_info
= NULL
;
483 /* called when ->s_root is non-NULL */
484 static void erofs_put_super(struct super_block
*sb
)
486 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
490 erofs_shrinker_unregister(sb
);
491 #ifdef CONFIG_EROFS_FS_ZIP
492 iput(sbi
->managed_cache
);
493 sbi
->managed_cache
= NULL
;
497 static struct file_system_type erofs_fs_type
= {
498 .owner
= THIS_MODULE
,
500 .mount
= erofs_mount
,
501 .kill_sb
= erofs_kill_sb
,
502 .fs_flags
= FS_REQUIRES_DEV
,
504 MODULE_ALIAS_FS("erofs");
506 static int __init
erofs_module_init(void)
510 erofs_check_ondisk_layout_definitions();
512 erofs_inode_cachep
= kmem_cache_create("erofs_inode",
513 sizeof(struct erofs_inode
), 0,
514 SLAB_RECLAIM_ACCOUNT
,
515 erofs_inode_init_once
);
516 if (!erofs_inode_cachep
) {
521 err
= erofs_init_shrinker();
525 err
= z_erofs_init_zip_subsystem();
529 err
= register_filesystem(&erofs_fs_type
);
536 z_erofs_exit_zip_subsystem();
538 erofs_exit_shrinker();
540 kmem_cache_destroy(erofs_inode_cachep
);
545 static void __exit
erofs_module_exit(void)
547 unregister_filesystem(&erofs_fs_type
);
548 z_erofs_exit_zip_subsystem();
549 erofs_exit_shrinker();
551 /* Ensure all RCU free inodes are safe before cache is destroyed. */
553 kmem_cache_destroy(erofs_inode_cachep
);
556 /* get filesystem statistics */
557 static int erofs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
559 struct super_block
*sb
= dentry
->d_sb
;
560 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
561 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
563 buf
->f_type
= sb
->s_magic
;
564 buf
->f_bsize
= EROFS_BLKSIZ
;
565 buf
->f_blocks
= sbi
->blocks
;
566 buf
->f_bfree
= buf
->f_bavail
= 0;
568 buf
->f_files
= ULLONG_MAX
;
569 buf
->f_ffree
= ULLONG_MAX
- sbi
->inos
;
571 buf
->f_namelen
= EROFS_NAME_LEN
;
573 buf
->f_fsid
.val
[0] = (u32
)id
;
574 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
578 static int erofs_show_options(struct seq_file
*seq
, struct dentry
*root
)
580 struct erofs_sb_info
*sbi __maybe_unused
= EROFS_SB(root
->d_sb
);
582 #ifdef CONFIG_EROFS_FS_XATTR
583 if (test_opt(sbi
, XATTR_USER
))
584 seq_puts(seq
, ",user_xattr");
586 seq_puts(seq
, ",nouser_xattr");
588 #ifdef CONFIG_EROFS_FS_POSIX_ACL
589 if (test_opt(sbi
, POSIX_ACL
))
590 seq_puts(seq
, ",acl");
592 seq_puts(seq
, ",noacl");
594 #ifdef CONFIG_EROFS_FS_ZIP
595 if (sbi
->cache_strategy
== EROFS_ZIP_CACHE_DISABLED
) {
596 seq_puts(seq
, ",cache_strategy=disabled");
597 } else if (sbi
->cache_strategy
== EROFS_ZIP_CACHE_READAHEAD
) {
598 seq_puts(seq
, ",cache_strategy=readahead");
599 } else if (sbi
->cache_strategy
== EROFS_ZIP_CACHE_READAROUND
) {
600 seq_puts(seq
, ",cache_strategy=readaround");
606 static int erofs_remount(struct super_block
*sb
, int *flags
, char *data
)
608 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
609 unsigned int org_mnt_opt
= sbi
->mount_opt
;
612 DBG_BUGON(!sb_rdonly(sb
));
613 err
= erofs_parse_options(sb
, data
);
617 if (test_opt(sbi
, POSIX_ACL
))
618 sb
->s_flags
|= SB_POSIXACL
;
620 sb
->s_flags
&= ~SB_POSIXACL
;
625 sbi
->mount_opt
= org_mnt_opt
;
629 const struct super_operations erofs_sops
= {
630 .put_super
= erofs_put_super
,
631 .alloc_inode
= erofs_alloc_inode
,
632 .free_inode
= erofs_free_inode
,
633 .statfs
= erofs_statfs
,
634 .show_options
= erofs_show_options
,
635 .remount_fs
= erofs_remount
,
638 module_init(erofs_module_init
);
639 module_exit(erofs_module_exit
);
641 MODULE_DESCRIPTION("Enhanced ROM File System");
642 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
643 MODULE_LICENSE("GPL");