1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
7 #ifndef __EROFS_INTERNAL_H
8 #define __EROFS_INTERNAL_H
11 #include <linux/dcache.h>
13 #include <linux/pagemap.h>
14 #include <linux/bio.h>
15 #include <linux/buffer_head.h>
16 #include <linux/magic.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
21 /* redefine pr_fmt "erofs: " */
23 #define pr_fmt(fmt) "erofs: " fmt
25 __printf(3, 4) void _erofs_err(struct super_block
*sb
,
26 const char *function
, const char *fmt
, ...);
27 #define erofs_err(sb, fmt, ...) \
28 _erofs_err(sb, __func__, fmt "\n", ##__VA_ARGS__)
29 __printf(3, 4) void _erofs_info(struct super_block
*sb
,
30 const char *function
, const char *fmt
, ...);
31 #define erofs_info(sb, fmt, ...) \
32 _erofs_info(sb, __func__, fmt "\n", ##__VA_ARGS__)
33 #ifdef CONFIG_EROFS_FS_DEBUG
34 #define erofs_dbg(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
35 #define DBG_BUGON BUG_ON
37 #define erofs_dbg(x, ...) ((void)0)
38 #define DBG_BUGON(x) ((void)(x))
39 #endif /* !CONFIG_EROFS_FS_DEBUG */
41 /* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
42 #define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
44 typedef u64 erofs_nid_t
;
45 typedef u64 erofs_off_t
;
46 /* data type for filesystem-wide blocks number */
47 typedef u32 erofs_blk_t
;
49 struct erofs_sb_info
{
50 #ifdef CONFIG_EROFS_FS_ZIP
51 /* list for all registered superblocks, mainly for shrinker */
52 struct list_head list
;
53 struct mutex umount_mutex
;
55 /* managed XArray arranged in physical block number */
56 struct xarray managed_pslots
;
58 /* threshold for decompression synchronously */
59 unsigned int max_sync_decompress_pages
;
61 unsigned int shrinker_run_no
;
63 /* current strategy of how to use managed cache */
64 unsigned char cache_strategy
;
66 /* pseudo inode to manage cached pages */
67 struct inode
*managed_cache
;
68 #endif /* CONFIG_EROFS_FS_ZIP */
71 #ifdef CONFIG_EROFS_FS_XATTR
75 /* inode slot unit size in bit shift */
76 unsigned char islotbits
;
81 /* what we really care is nid, rather than ino.. */
83 /* used for statfs, f_files - f_favail */
86 u8 uuid
[16]; /* 128-bit uuid for volume */
87 u8 volume_name
[16]; /* volume name */
91 unsigned int mount_opt
;
94 #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
95 #define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
97 /* Mount flags set via mount options or defaults */
98 #define EROFS_MOUNT_XATTR_USER 0x00000010
99 #define EROFS_MOUNT_POSIX_ACL 0x00000020
101 #define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
102 #define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
103 #define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
105 #ifdef CONFIG_EROFS_FS_ZIP
107 EROFS_ZIP_CACHE_DISABLED
,
108 EROFS_ZIP_CACHE_READAHEAD
,
109 EROFS_ZIP_CACHE_READAROUND
112 #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
114 /* basic unit of the workstation of a super_block */
115 struct erofs_workgroup
{
116 /* the workgroup index in the workstation */
119 /* overall workgroup reference count */
123 #if defined(CONFIG_SMP)
124 static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup
*grp
,
128 if (val
!= atomic_cmpxchg(&grp
->refcount
, val
, EROFS_LOCKED_MAGIC
)) {
135 static inline void erofs_workgroup_unfreeze(struct erofs_workgroup
*grp
,
139 * other observers should notice all modifications
140 * in the freezing period.
143 atomic_set(&grp
->refcount
, orig_val
);
147 static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup
*grp
)
149 return atomic_cond_read_relaxed(&grp
->refcount
,
150 VAL
!= EROFS_LOCKED_MAGIC
);
153 static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup
*grp
,
157 /* no need to spin on UP platforms, let's just disable preemption. */
158 if (val
!= atomic_read(&grp
->refcount
)) {
165 static inline void erofs_workgroup_unfreeze(struct erofs_workgroup
*grp
,
171 static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup
*grp
)
173 int v
= atomic_read(&grp
->refcount
);
175 /* workgroup is never freezed on uniprocessor systems */
176 DBG_BUGON(v
== EROFS_LOCKED_MAGIC
);
179 #endif /* !CONFIG_SMP */
181 /* hard limit of pages per compressed cluster */
182 #define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
183 #define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
185 #define EROFS_PCPUBUF_NR_PAGES 0
186 #endif /* !CONFIG_EROFS_FS_ZIP */
188 /* we strictly follow PAGE_SIZE and no buffer head yet */
189 #define LOG_BLOCK_SIZE PAGE_SHIFT
191 #undef LOG_SECTORS_PER_BLOCK
192 #define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
194 #undef SECTORS_PER_BLOCK
195 #define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
197 #define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
199 #if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
200 #error erofs cannot be used in this platform
203 #define ROOT_NID(sb) ((sb)->root_nid)
205 #define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
206 #define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
207 #define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
209 static inline erofs_off_t
iloc(struct erofs_sb_info
*sbi
, erofs_nid_t nid
)
211 return blknr_to_addr(sbi
->meta_blkaddr
) + (nid
<< sbi
->islotbits
);
214 /* atomic flag definitions */
215 #define EROFS_I_EA_INITED_BIT 0
216 #define EROFS_I_Z_INITED_BIT 1
218 /* bitlock definitions (arranged in reverse order) */
219 #define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1)
220 #define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2)
225 /* atomic flags (including bitlocks) */
228 unsigned char datalayout
;
229 unsigned char inode_isize
;
230 unsigned short xattr_isize
;
232 unsigned int xattr_shared_count
;
233 unsigned int *xattr_shared_xattrs
;
236 erofs_blk_t raw_blkaddr
;
237 #ifdef CONFIG_EROFS_FS_ZIP
239 unsigned short z_advise
;
240 unsigned char z_algorithmtype
[2];
241 unsigned char z_logical_clusterbits
;
242 unsigned char z_physical_clusterbits
[2];
244 #endif /* CONFIG_EROFS_FS_ZIP */
246 /* the corresponding vfs inode */
247 struct inode vfs_inode
;
250 #define EROFS_I(ptr) \
251 container_of(ptr, struct erofs_inode, vfs_inode)
253 static inline unsigned long erofs_inode_datablocks(struct inode
*inode
)
255 /* since i_size cannot be changed */
256 return DIV_ROUND_UP(inode
->i_size
, EROFS_BLKSIZ
);
259 static inline unsigned int erofs_bitrange(unsigned int value
, unsigned int bit
,
263 return (value
>> bit
) & ((1 << bits
) - 1);
267 static inline unsigned int erofs_inode_version(unsigned int value
)
269 return erofs_bitrange(value
, EROFS_I_VERSION_BIT
,
270 EROFS_I_VERSION_BITS
);
273 static inline unsigned int erofs_inode_datalayout(unsigned int value
)
275 return erofs_bitrange(value
, EROFS_I_DATALAYOUT_BIT
,
276 EROFS_I_DATALAYOUT_BITS
);
279 extern const struct super_operations erofs_sops
;
281 extern const struct address_space_operations erofs_raw_access_aops
;
282 extern const struct address_space_operations z_erofs_aops
;
285 * Logical to physical block mapping, used by erofs_map_blocks()
287 * Different with other file systems, it is used for 2 access modes:
289 * 1) RAW access mode:
291 * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
292 * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
294 * Note that m_lblk in the RAW access mode refers to the number of
295 * the compressed ondisk block rather than the uncompressed
296 * in-memory block for the compressed file.
298 * m_pofs equals to m_lofs except for the inline data page.
300 * 2) Normal access mode:
302 * If the inode is not compressed, it has no difference with
303 * the RAW access mode. However, if the inode is compressed,
304 * users should pass a valid (m_lblk, m_lofs) pair, and get
305 * the needed m_pblk, m_pofs, m_len to get the compressed data
306 * and the updated m_lblk, m_lofs which indicates the start
307 * of the corresponding uncompressed data in the file.
310 BH_Zipped
= BH_PrivateStart
,
314 /* Has a disk mapping */
315 #define EROFS_MAP_MAPPED (1 << BH_Mapped)
316 /* Located in metadata (could be copied from bd_inode) */
317 #define EROFS_MAP_META (1 << BH_Meta)
318 /* The extent has been compressed */
319 #define EROFS_MAP_ZIPPED (1 << BH_Zipped)
320 /* The length of extent is full */
321 #define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped)
323 struct erofs_map_blocks
{
324 erofs_off_t m_pa
, m_la
;
327 unsigned int m_flags
;
332 /* Flags used by erofs_map_blocks() */
333 #define EROFS_GET_BLOCKS_RAW 0x0001
336 #ifdef CONFIG_EROFS_FS_ZIP
337 int z_erofs_fill_inode(struct inode
*inode
);
338 int z_erofs_map_blocks_iter(struct inode
*inode
,
339 struct erofs_map_blocks
*map
,
342 static inline int z_erofs_fill_inode(struct inode
*inode
) { return -EOPNOTSUPP
; }
343 static inline int z_erofs_map_blocks_iter(struct inode
*inode
,
344 struct erofs_map_blocks
*map
,
349 #endif /* !CONFIG_EROFS_FS_ZIP */
352 struct page
*erofs_get_meta_page(struct super_block
*sb
, erofs_blk_t blkaddr
);
354 int erofs_map_blocks(struct inode
*, struct erofs_map_blocks
*, int);
357 static inline unsigned long erofs_inode_hash(erofs_nid_t nid
)
359 #if BITS_PER_LONG == 32
360 return (nid
>> 32) ^ (nid
& 0xffffffff);
366 extern const struct inode_operations erofs_generic_iops
;
367 extern const struct inode_operations erofs_symlink_iops
;
368 extern const struct inode_operations erofs_fast_symlink_iops
;
370 struct inode
*erofs_iget(struct super_block
*sb
, erofs_nid_t nid
, bool dir
);
371 int erofs_getattr(const struct path
*path
, struct kstat
*stat
,
372 u32 request_mask
, unsigned int query_flags
);
375 extern const struct inode_operations erofs_dir_iops
;
377 int erofs_namei(struct inode
*dir
, struct qstr
*name
,
378 erofs_nid_t
*nid
, unsigned int *d_type
);
381 extern const struct file_operations erofs_dir_fops
;
383 /* utils.c / zdata.c */
384 struct page
*erofs_allocpage(struct list_head
*pool
, gfp_t gfp
);
386 #if (EROFS_PCPUBUF_NR_PAGES > 0)
387 void *erofs_get_pcpubuf(unsigned int pagenr
);
388 #define erofs_put_pcpubuf(buf) do { \
393 static inline void *erofs_get_pcpubuf(unsigned int pagenr
)
395 return ERR_PTR(-EOPNOTSUPP
);
398 #define erofs_put_pcpubuf(buf) do {} while (0)
401 #ifdef CONFIG_EROFS_FS_ZIP
402 int erofs_workgroup_put(struct erofs_workgroup
*grp
);
403 struct erofs_workgroup
*erofs_find_workgroup(struct super_block
*sb
,
405 struct erofs_workgroup
*erofs_insert_workgroup(struct super_block
*sb
,
406 struct erofs_workgroup
*grp
);
407 void erofs_workgroup_free_rcu(struct erofs_workgroup
*grp
);
408 void erofs_shrinker_register(struct super_block
*sb
);
409 void erofs_shrinker_unregister(struct super_block
*sb
);
410 int __init
erofs_init_shrinker(void);
411 void erofs_exit_shrinker(void);
412 int __init
z_erofs_init_zip_subsystem(void);
413 void z_erofs_exit_zip_subsystem(void);
414 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info
*sbi
,
415 struct erofs_workgroup
*egrp
);
416 int erofs_try_to_free_cached_page(struct address_space
*mapping
,
419 static inline void erofs_shrinker_register(struct super_block
*sb
) {}
420 static inline void erofs_shrinker_unregister(struct super_block
*sb
) {}
421 static inline int erofs_init_shrinker(void) { return 0; }
422 static inline void erofs_exit_shrinker(void) {}
423 static inline int z_erofs_init_zip_subsystem(void) { return 0; }
424 static inline void z_erofs_exit_zip_subsystem(void) {}
425 #endif /* !CONFIG_EROFS_FS_ZIP */
427 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
429 #endif /* __EROFS_INTERNAL_H */