1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
7 #ifndef __EROFS_INTERNAL_H
8 #define __EROFS_INTERNAL_H
11 #include <linux/dax.h>
12 #include <linux/dcache.h>
14 #include <linux/module.h>
15 #include <linux/pagemap.h>
16 #include <linux/bio.h>
17 #include <linux/magic.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/iomap.h>
23 __printf(2, 3) void _erofs_printk(struct super_block
*sb
, const char *fmt
, ...);
24 #define erofs_err(sb, fmt, ...) \
25 _erofs_printk(sb, KERN_ERR fmt "\n", ##__VA_ARGS__)
26 #define erofs_info(sb, fmt, ...) \
27 _erofs_printk(sb, KERN_INFO fmt "\n", ##__VA_ARGS__)
29 #ifdef CONFIG_EROFS_FS_DEBUG
30 #define DBG_BUGON BUG_ON
32 #define DBG_BUGON(x) ((void)(x))
33 #endif /* !CONFIG_EROFS_FS_DEBUG */
35 /* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
36 #define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
38 typedef u64 erofs_nid_t
;
39 typedef u64 erofs_off_t
;
40 /* data type for filesystem-wide blocks number */
41 typedef u32 erofs_blk_t
;
43 struct erofs_device_info
{
45 struct erofs_fscache
*fscache
;
47 struct dax_device
*dax_dev
;
55 EROFS_SYNC_DECOMPRESS_AUTO
,
56 EROFS_SYNC_DECOMPRESS_FORCE_ON
,
57 EROFS_SYNC_DECOMPRESS_FORCE_OFF
60 struct erofs_mount_opts
{
61 /* current strategy of how to use managed cache */
62 unsigned char cache_strategy
;
63 /* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */
64 unsigned int sync_decompress
;
65 /* threshold for decompression synchronously */
66 unsigned int max_sync_decompress_pages
;
67 unsigned int mount_opt
;
70 struct erofs_dev_context
{
72 struct rw_semaphore rwsem
;
74 unsigned int extra_devices
;
78 /* all filesystem-wide lz4 configurations */
79 struct erofs_sb_lz4_info
{
80 /* # of pages needed for EROFS lz4 rolling decompression */
81 u16 max_distance_pages
;
82 /* maximum possible blocks for pclusters in the filesystem */
88 struct list_head list
;
89 struct fscache_volume
*volume
;
93 struct erofs_fscache
{
94 struct fscache_cookie
*cookie
;
95 struct inode
*inode
; /* anonymous inode for the blob */
97 /* used for share domain mode */
98 struct erofs_domain
*domain
;
99 struct list_head node
;
104 struct erofs_xattr_prefix_item
{
105 struct erofs_xattr_long_prefix
*prefix
;
109 struct erofs_sb_info
{
110 struct erofs_device_info dif0
;
111 struct erofs_mount_opts opt
; /* options */
112 #ifdef CONFIG_EROFS_FS_ZIP
113 /* list for all registered superblocks, mainly for shrinker */
114 struct list_head list
;
115 struct mutex umount_mutex
;
117 /* managed XArray arranged in physical block number */
118 struct xarray managed_pslots
;
120 unsigned int shrinker_run_no
;
121 u16 available_compr_algs
;
123 /* pseudo inode to manage cached pages */
124 struct inode
*managed_cache
;
126 struct erofs_sb_lz4_info lz4
;
127 #endif /* CONFIG_EROFS_FS_ZIP */
128 struct inode
*packed_inode
;
129 struct erofs_dev_context
*devs
;
133 #ifdef CONFIG_EROFS_FS_XATTR
135 u32 xattr_prefix_start
;
136 u8 xattr_prefix_count
;
137 struct erofs_xattr_prefix_item
*xattr_prefixes
;
138 unsigned int xattr_filter_reserved
;
140 u16 device_id_mask
; /* valid bits of device id to be used */
142 unsigned char islotbits
; /* inode slot unit size in bit shift */
143 unsigned char blkszbits
; /* filesystem block size in bit shift */
145 u32 sb_size
; /* total superblock size */
149 /* what we really care is nid, rather than ino.. */
150 erofs_nid_t root_nid
;
151 erofs_nid_t packed_nid
;
152 /* used for statfs, f_files - f_favail */
155 u8 uuid
[16]; /* 128-bit uuid for volume */
156 u8 volume_name
[16]; /* volume name */
158 u32 feature_incompat
;
161 struct kobject s_kobj
; /* /sys/fs/erofs/<devname> */
162 struct completion s_kobj_unregister
;
164 /* fscache support */
165 struct fscache_volume
*volume
;
166 struct erofs_domain
*domain
;
171 #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
172 #define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
174 /* Mount flags set via mount options or defaults */
175 #define EROFS_MOUNT_XATTR_USER 0x00000010
176 #define EROFS_MOUNT_POSIX_ACL 0x00000020
177 #define EROFS_MOUNT_DAX_ALWAYS 0x00000040
178 #define EROFS_MOUNT_DAX_NEVER 0x00000080
179 #define EROFS_MOUNT_DIRECT_IO 0x00000100
181 #define clear_opt(opt, option) ((opt)->mount_opt &= ~EROFS_MOUNT_##option)
182 #define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option)
183 #define test_opt(opt, option) ((opt)->mount_opt & EROFS_MOUNT_##option)
185 static inline bool erofs_is_fileio_mode(struct erofs_sb_info
*sbi
)
187 return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE
) && sbi
->dif0
.file
;
190 static inline bool erofs_is_fscache_mode(struct super_block
*sb
)
192 return IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND
) &&
193 !erofs_is_fileio_mode(EROFS_SB(sb
)) && !sb
->s_bdev
;
197 EROFS_ZIP_CACHE_DISABLED
,
198 EROFS_ZIP_CACHE_READAHEAD
,
199 EROFS_ZIP_CACHE_READAROUND
202 enum erofs_kmap_type
{
203 EROFS_NO_KMAP
, /* don't map the buffer */
204 EROFS_KMAP
, /* use kmap_local_page() to map the buffer */
208 struct address_space
*mapping
;
213 #define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL })
215 #define erofs_blknr(sb, addr) ((erofs_blk_t)((addr) >> (sb)->s_blocksize_bits))
216 #define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1))
217 #define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits)
218 #define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits)
220 #define EROFS_FEATURE_FUNCS(name, compat, feature) \
221 static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \
223 return sbi->feature_##compat & EROFS_FEATURE_##feature; \
226 EROFS_FEATURE_FUNCS(zero_padding
, incompat
, INCOMPAT_ZERO_PADDING
)
227 EROFS_FEATURE_FUNCS(compr_cfgs
, incompat
, INCOMPAT_COMPR_CFGS
)
228 EROFS_FEATURE_FUNCS(big_pcluster
, incompat
, INCOMPAT_BIG_PCLUSTER
)
229 EROFS_FEATURE_FUNCS(chunked_file
, incompat
, INCOMPAT_CHUNKED_FILE
)
230 EROFS_FEATURE_FUNCS(device_table
, incompat
, INCOMPAT_DEVICE_TABLE
)
231 EROFS_FEATURE_FUNCS(compr_head2
, incompat
, INCOMPAT_COMPR_HEAD2
)
232 EROFS_FEATURE_FUNCS(ztailpacking
, incompat
, INCOMPAT_ZTAILPACKING
)
233 EROFS_FEATURE_FUNCS(fragments
, incompat
, INCOMPAT_FRAGMENTS
)
234 EROFS_FEATURE_FUNCS(dedupe
, incompat
, INCOMPAT_DEDUPE
)
235 EROFS_FEATURE_FUNCS(xattr_prefixes
, incompat
, INCOMPAT_XATTR_PREFIXES
)
236 EROFS_FEATURE_FUNCS(sb_chksum
, compat
, COMPAT_SB_CHKSUM
)
237 EROFS_FEATURE_FUNCS(xattr_filter
, compat
, COMPAT_XATTR_FILTER
)
239 /* atomic flag definitions */
240 #define EROFS_I_EA_INITED_BIT 0
241 #define EROFS_I_Z_INITED_BIT 1
243 /* bitlock definitions (arranged in reverse order) */
244 #define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1)
245 #define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2)
250 /* atomic flags (including bitlocks) */
253 unsigned char datalayout
;
254 unsigned char inode_isize
;
255 unsigned int xattr_isize
;
257 unsigned int xattr_name_filter
;
258 unsigned int xattr_shared_count
;
259 unsigned int *xattr_shared_xattrs
;
262 erofs_blk_t raw_blkaddr
;
264 unsigned short chunkformat
;
265 unsigned char chunkbits
;
267 #ifdef CONFIG_EROFS_FS_ZIP
269 unsigned short z_advise
;
270 unsigned char z_algorithmtype
[2];
271 unsigned char z_logical_clusterbits
;
272 unsigned long z_tailextent_headlcn
;
275 erofs_off_t z_idataoff
;
276 unsigned short z_idata_size
;
278 erofs_off_t z_fragmentoff
;
281 #endif /* CONFIG_EROFS_FS_ZIP */
283 /* the corresponding vfs inode */
284 struct inode vfs_inode
;
287 #define EROFS_I(ptr) container_of(ptr, struct erofs_inode, vfs_inode)
289 static inline erofs_off_t
erofs_iloc(struct inode
*inode
)
291 struct erofs_sb_info
*sbi
= EROFS_I_SB(inode
);
293 return erofs_pos(inode
->i_sb
, sbi
->meta_blkaddr
) +
294 (EROFS_I(inode
)->nid
<< sbi
->islotbits
);
297 static inline unsigned int erofs_inode_version(unsigned int ifmt
)
299 return (ifmt
>> EROFS_I_VERSION_BIT
) & EROFS_I_VERSION_MASK
;
302 static inline unsigned int erofs_inode_datalayout(unsigned int ifmt
)
304 return (ifmt
>> EROFS_I_DATALAYOUT_BIT
) & EROFS_I_DATALAYOUT_MASK
;
307 /* reclaiming is never triggered when allocating new folios. */
308 static inline struct folio
*erofs_grab_folio_nowait(struct address_space
*as
,
311 return __filemap_get_folio(as
, index
,
312 FGP_LOCK
|FGP_CREAT
|FGP_NOFS
|FGP_NOWAIT
,
313 readahead_gfp_mask(as
) & ~__GFP_RECLAIM
);
316 /* Has a disk mapping */
317 #define EROFS_MAP_MAPPED 0x0001
318 /* Located in metadata (could be copied from bd_inode) */
319 #define EROFS_MAP_META 0x0002
320 /* The extent is encoded */
321 #define EROFS_MAP_ENCODED 0x0004
322 /* The length of extent is full */
323 #define EROFS_MAP_FULL_MAPPED 0x0008
324 /* Located in the special packed inode */
325 #define EROFS_MAP_FRAGMENT 0x0010
326 /* The extent refers to partial decompressed data */
327 #define EROFS_MAP_PARTIAL_REF 0x0020
329 struct erofs_map_blocks
{
330 struct erofs_buf buf
;
332 erofs_off_t m_pa
, m_la
;
335 unsigned short m_deviceid
;
336 char m_algorithmformat
;
337 unsigned int m_flags
;
341 * Used to get the exact decompressed length, e.g. fiemap (consider lookback
342 * approach instead if possible since it's more metadata lightweight.)
344 #define EROFS_GET_BLOCKS_FIEMAP 0x0001
345 /* Used to map the whole extent if non-negligible data is requested for LZMA */
346 #define EROFS_GET_BLOCKS_READMORE 0x0002
347 /* Used to map tail extent for tailpacking inline or fragment pcluster */
348 #define EROFS_GET_BLOCKS_FINDTAIL 0x0004
351 Z_EROFS_COMPRESSION_SHIFTED
= Z_EROFS_COMPRESSION_MAX
,
352 Z_EROFS_COMPRESSION_INTERLACED
,
353 Z_EROFS_COMPRESSION_RUNTIME_MAX
356 struct erofs_map_dev
{
357 struct super_block
*m_sb
;
358 struct erofs_device_info
*m_dif
;
359 struct block_device
*m_bdev
;
362 unsigned int m_deviceid
;
365 extern const struct super_operations erofs_sops
;
367 extern const struct address_space_operations erofs_aops
;
368 extern const struct address_space_operations erofs_fileio_aops
;
369 extern const struct address_space_operations z_erofs_aops
;
370 extern const struct address_space_operations erofs_fscache_access_aops
;
372 extern const struct inode_operations erofs_generic_iops
;
373 extern const struct inode_operations erofs_symlink_iops
;
374 extern const struct inode_operations erofs_fast_symlink_iops
;
375 extern const struct inode_operations erofs_dir_iops
;
377 extern const struct file_operations erofs_file_fops
;
378 extern const struct file_operations erofs_dir_fops
;
380 extern const struct iomap_ops z_erofs_iomap_report_ops
;
382 /* flags for erofs_fscache_register_cookie() */
383 #define EROFS_REG_COOKIE_SHARE 0x0001
384 #define EROFS_REG_COOKIE_NEED_NOEXIST 0x0002
386 void *erofs_read_metadata(struct super_block
*sb
, struct erofs_buf
*buf
,
387 erofs_off_t
*offset
, int *lengthp
);
388 void erofs_unmap_metabuf(struct erofs_buf
*buf
);
389 void erofs_put_metabuf(struct erofs_buf
*buf
);
390 void *erofs_bread(struct erofs_buf
*buf
, erofs_off_t offset
,
391 enum erofs_kmap_type type
);
392 void erofs_init_metabuf(struct erofs_buf
*buf
, struct super_block
*sb
);
393 void *erofs_read_metabuf(struct erofs_buf
*buf
, struct super_block
*sb
,
394 erofs_off_t offset
, enum erofs_kmap_type type
);
395 int erofs_map_dev(struct super_block
*sb
, struct erofs_map_dev
*dev
);
396 int erofs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
398 int erofs_map_blocks(struct inode
*inode
, struct erofs_map_blocks
*map
);
399 void erofs_onlinefolio_init(struct folio
*folio
);
400 void erofs_onlinefolio_split(struct folio
*folio
);
401 void erofs_onlinefolio_end(struct folio
*folio
, int err
);
402 struct inode
*erofs_iget(struct super_block
*sb
, erofs_nid_t nid
);
403 int erofs_getattr(struct mnt_idmap
*idmap
, const struct path
*path
,
404 struct kstat
*stat
, u32 request_mask
,
405 unsigned int query_flags
);
406 int erofs_namei(struct inode
*dir
, const struct qstr
*name
,
407 erofs_nid_t
*nid
, unsigned int *d_type
);
409 static inline void *erofs_vm_map_ram(struct page
**pages
, unsigned int count
)
414 void *p
= vm_map_ram(pages
, count
, -1);
416 /* retry two more times (totally 3 times) */
417 if (p
|| ++retried
>= 3)
424 int erofs_register_sysfs(struct super_block
*sb
);
425 void erofs_unregister_sysfs(struct super_block
*sb
);
426 int __init
erofs_init_sysfs(void);
427 void erofs_exit_sysfs(void);
429 struct page
*__erofs_allocpage(struct page
**pagepool
, gfp_t gfp
, bool tryrsv
);
430 static inline struct page
*erofs_allocpage(struct page
**pagepool
, gfp_t gfp
)
432 return __erofs_allocpage(pagepool
, gfp
, false);
434 static inline void erofs_pagepool_add(struct page
**pagepool
, struct page
*page
)
436 set_page_private(page
, (unsigned long)*pagepool
);
439 void erofs_release_pages(struct page
**pagepool
);
441 #ifdef CONFIG_EROFS_FS_ZIP
442 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
444 extern atomic_long_t erofs_global_shrink_cnt
;
445 void erofs_shrinker_register(struct super_block
*sb
);
446 void erofs_shrinker_unregister(struct super_block
*sb
);
447 int __init
erofs_init_shrinker(void);
448 void erofs_exit_shrinker(void);
449 int __init
z_erofs_init_subsystem(void);
450 void z_erofs_exit_subsystem(void);
451 unsigned long z_erofs_shrink_scan(struct erofs_sb_info
*sbi
,
452 unsigned long nr_shrink
);
453 int z_erofs_map_blocks_iter(struct inode
*inode
, struct erofs_map_blocks
*map
,
455 void *z_erofs_get_gbuf(unsigned int requiredpages
);
456 void z_erofs_put_gbuf(void *ptr
);
457 int z_erofs_gbuf_growsize(unsigned int nrpages
);
458 int __init
z_erofs_gbuf_init(void);
459 void z_erofs_gbuf_exit(void);
460 int erofs_init_managed_cache(struct super_block
*sb
);
461 int z_erofs_parse_cfgs(struct super_block
*sb
, struct erofs_super_block
*dsb
);
463 static inline void erofs_shrinker_register(struct super_block
*sb
) {}
464 static inline void erofs_shrinker_unregister(struct super_block
*sb
) {}
465 static inline int erofs_init_shrinker(void) { return 0; }
466 static inline void erofs_exit_shrinker(void) {}
467 static inline int z_erofs_init_subsystem(void) { return 0; }
468 static inline void z_erofs_exit_subsystem(void) {}
469 static inline int erofs_init_managed_cache(struct super_block
*sb
) { return 0; }
470 #endif /* !CONFIG_EROFS_FS_ZIP */
472 #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
473 struct bio
*erofs_fileio_bio_alloc(struct erofs_map_dev
*mdev
);
474 void erofs_fileio_submit_bio(struct bio
*bio
);
476 static inline struct bio
*erofs_fileio_bio_alloc(struct erofs_map_dev
*mdev
) { return NULL
; }
477 static inline void erofs_fileio_submit_bio(struct bio
*bio
) {}
480 #ifdef CONFIG_EROFS_FS_ONDEMAND
481 int erofs_fscache_register_fs(struct super_block
*sb
);
482 void erofs_fscache_unregister_fs(struct super_block
*sb
);
484 struct erofs_fscache
*erofs_fscache_register_cookie(struct super_block
*sb
,
485 char *name
, unsigned int flags
);
486 void erofs_fscache_unregister_cookie(struct erofs_fscache
*fscache
);
487 struct bio
*erofs_fscache_bio_alloc(struct erofs_map_dev
*mdev
);
488 void erofs_fscache_submit_bio(struct bio
*bio
);
490 static inline int erofs_fscache_register_fs(struct super_block
*sb
)
494 static inline void erofs_fscache_unregister_fs(struct super_block
*sb
) {}
497 struct erofs_fscache
*erofs_fscache_register_cookie(struct super_block
*sb
,
498 char *name
, unsigned int flags
)
500 return ERR_PTR(-EOPNOTSUPP
);
503 static inline void erofs_fscache_unregister_cookie(struct erofs_fscache
*fscache
)
506 static inline struct bio
*erofs_fscache_bio_alloc(struct erofs_map_dev
*mdev
) { return NULL
; }
507 static inline void erofs_fscache_submit_bio(struct bio
*bio
) {}
510 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
512 #endif /* __EROFS_INTERNAL_H */