Linux 4.19.133
[linux/fpc-iii.git] / drivers / staging / erofs / internal.h
blob8ce37091db2051e5ef27844e99ae884d2b65f1d1
1 /* SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/internal.h
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #ifndef __INTERNAL_H
14 #define __INTERNAL_H
16 #include <linux/fs.h>
17 #include <linux/dcache.h>
18 #include <linux/mm.h>
19 #include <linux/pagemap.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/cleancache.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include "erofs_fs.h"
27 /* redefine pr_fmt "erofs: " */
28 #undef pr_fmt
29 #define pr_fmt(fmt) "erofs: " fmt
31 #define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__)
32 #define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__)
33 #ifdef CONFIG_EROFS_FS_DEBUG
34 #define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
36 #define dbg_might_sleep might_sleep
37 #define DBG_BUGON BUG_ON
38 #else
39 #define debugln(x, ...) ((void)0)
41 #define dbg_might_sleep() ((void)0)
42 #define DBG_BUGON(x) ((void)(x))
43 #endif
45 #ifdef CONFIG_EROFS_FAULT_INJECTION
46 enum {
47 FAULT_KMALLOC,
48 FAULT_MAX,
51 extern char *erofs_fault_name[FAULT_MAX];
52 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
54 struct erofs_fault_info {
55 atomic_t inject_ops;
56 unsigned int inject_rate;
57 unsigned int inject_type;
59 #endif
61 #ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
62 #define EROFS_FS_ZIP_CACHE_LVL (2)
63 #elif defined(EROFS_FS_ZIP_CACHE_UNIPOLAR)
64 #define EROFS_FS_ZIP_CACHE_LVL (1)
65 #else
66 #define EROFS_FS_ZIP_CACHE_LVL (0)
67 #endif
69 #if (!defined(EROFS_FS_HAS_MANAGED_CACHE) && (EROFS_FS_ZIP_CACHE_LVL > 0))
70 #define EROFS_FS_HAS_MANAGED_CACHE
71 #endif
73 /* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
74 #define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
76 typedef u64 erofs_nid_t;
78 struct erofs_sb_info {
79 /* list for all registered superblocks, mainly for shrinker */
80 struct list_head list;
81 struct mutex umount_mutex;
83 u32 blocks;
84 u32 meta_blkaddr;
85 #ifdef CONFIG_EROFS_FS_XATTR
86 u32 xattr_blkaddr;
87 #endif
89 /* inode slot unit size in bit shift */
90 unsigned char islotbits;
91 #ifdef CONFIG_EROFS_FS_ZIP
92 /* cluster size in bit shift */
93 unsigned char clusterbits;
95 /* the dedicated workstation for compression */
96 struct radix_tree_root workstn_tree;
98 #ifdef EROFS_FS_HAS_MANAGED_CACHE
99 struct inode *managed_cache;
100 #endif
102 #endif
104 u32 build_time_nsec;
105 u64 build_time;
107 /* what we really care is nid, rather than ino.. */
108 erofs_nid_t root_nid;
109 /* used for statfs, f_files - f_favail */
110 u64 inos;
112 u8 uuid[16]; /* 128-bit uuid for volume */
113 u8 volume_name[16]; /* volume name */
114 u32 requirements;
116 char *dev_name;
118 unsigned int mount_opt;
119 unsigned int shrinker_run_no;
121 #ifdef CONFIG_EROFS_FAULT_INJECTION
122 struct erofs_fault_info fault_info; /* For fault injection */
123 #endif
126 #ifdef CONFIG_EROFS_FAULT_INJECTION
127 #define erofs_show_injection_info(type) \
128 infoln("inject %s in %s of %pS", erofs_fault_name[type], \
129 __func__, __builtin_return_address(0))
131 static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
133 struct erofs_fault_info *ffi = &sbi->fault_info;
135 if (!ffi->inject_rate)
136 return false;
138 if (!IS_FAULT_SET(ffi, type))
139 return false;
141 atomic_inc(&ffi->inject_ops);
142 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
143 atomic_set(&ffi->inject_ops, 0);
144 return true;
146 return false;
148 #endif
150 static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
151 size_t size, gfp_t flags)
153 #ifdef CONFIG_EROFS_FAULT_INJECTION
154 if (time_to_inject(sbi, FAULT_KMALLOC)) {
155 erofs_show_injection_info(FAULT_KMALLOC);
156 return NULL;
158 #endif
159 return kmalloc(size, flags);
162 #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
163 #define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
165 /* Mount flags set via mount options or defaults */
166 #define EROFS_MOUNT_XATTR_USER 0x00000010
167 #define EROFS_MOUNT_POSIX_ACL 0x00000020
168 #define EROFS_MOUNT_FAULT_INJECTION 0x00000040
170 #define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
171 #define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
172 #define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
174 #ifdef CONFIG_EROFS_FS_ZIP
175 #define erofs_workstn_lock(sbi) xa_lock(&(sbi)->workstn_tree)
176 #define erofs_workstn_unlock(sbi) xa_unlock(&(sbi)->workstn_tree)
178 /* basic unit of the workstation of a super_block */
179 struct erofs_workgroup {
180 /* the workgroup index in the workstation */
181 pgoff_t index;
183 /* overall workgroup reference count */
184 atomic_t refcount;
187 #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
189 #if defined(CONFIG_SMP)
190 static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
191 int val)
193 preempt_disable();
194 if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
195 preempt_enable();
196 return false;
198 return true;
201 static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
202 int orig_val)
205 * other observers should notice all modifications
206 * in the freezing period.
208 smp_mb();
209 atomic_set(&grp->refcount, orig_val);
210 preempt_enable();
213 static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
215 return atomic_cond_read_relaxed(&grp->refcount,
216 VAL != EROFS_LOCKED_MAGIC);
218 #else
219 static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
220 int val)
222 preempt_disable();
223 /* no need to spin on UP platforms, let's just disable preemption. */
224 if (val != atomic_read(&grp->refcount)) {
225 preempt_enable();
226 return false;
228 return true;
231 static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
232 int orig_val)
234 preempt_enable();
237 static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
239 int v = atomic_read(&grp->refcount);
241 /* workgroup is never freezed on uniprocessor systems */
242 DBG_BUGON(v == EROFS_LOCKED_MAGIC);
243 return v;
245 #endif
247 static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
249 int o;
251 repeat:
252 o = erofs_wait_on_workgroup_freezed(grp);
254 if (unlikely(o <= 0))
255 return -1;
257 if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
258 goto repeat;
260 *ocnt = o;
261 return 0;
264 #define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
265 #define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
267 extern int erofs_workgroup_put(struct erofs_workgroup *grp);
269 extern struct erofs_workgroup *erofs_find_workgroup(
270 struct super_block *sb, pgoff_t index, bool *tag);
272 extern int erofs_register_workgroup(struct super_block *sb,
273 struct erofs_workgroup *grp, bool tag);
275 extern unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
276 unsigned long nr_shrink, bool cleanup);
278 static inline void erofs_workstation_cleanup_all(struct super_block *sb)
280 erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
283 #ifdef EROFS_FS_HAS_MANAGED_CACHE
284 #define EROFS_UNALLOCATED_CACHED_PAGE ((void *)0x5F0EF00D)
286 extern int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
287 struct erofs_workgroup *egrp);
288 extern int erofs_try_to_free_cached_page(struct address_space *mapping,
289 struct page *page);
290 #endif
292 #endif
294 /* we strictly follow PAGE_SIZE and no buffer head yet */
295 #define LOG_BLOCK_SIZE PAGE_SHIFT
297 #undef LOG_SECTORS_PER_BLOCK
298 #define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
300 #undef SECTORS_PER_BLOCK
301 #define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
303 #define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
305 #if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
306 #error erofs cannot be used in this platform
307 #endif
309 #define ROOT_NID(sb) ((sb)->root_nid)
311 #ifdef CONFIG_EROFS_FS_ZIP
312 /* hard limit of pages per compressed cluster */
313 #define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
315 /* page count of a compressed cluster */
316 #define erofs_clusterpages(sbi) ((1 << (sbi)->clusterbits) / PAGE_SIZE)
317 #endif
319 typedef u64 erofs_off_t;
321 /* data type for filesystem-wide blocks number */
322 typedef u32 erofs_blk_t;
324 #define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
325 #define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
326 #define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
328 static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
330 return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
333 /* atomic flag definitions */
334 #define EROFS_V_EA_INITED_BIT 0
336 /* bitlock definitions (arranged in reverse order) */
337 #define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1)
339 struct erofs_vnode {
340 erofs_nid_t nid;
342 /* atomic flags (including bitlocks) */
343 unsigned long flags;
345 unsigned char data_mapping_mode;
346 /* inline size in bytes */
347 unsigned char inode_isize;
348 unsigned short xattr_isize;
350 unsigned xattr_shared_count;
351 unsigned *xattr_shared_xattrs;
353 erofs_blk_t raw_blkaddr;
355 /* the corresponding vfs inode */
356 struct inode vfs_inode;
359 #define EROFS_V(ptr) \
360 container_of(ptr, struct erofs_vnode, vfs_inode)
362 #define __inode_advise(x, bit, bits) \
363 (((x) >> (bit)) & ((1 << (bits)) - 1))
365 #define __inode_version(advise) \
366 __inode_advise(advise, EROFS_I_VERSION_BIT, \
367 EROFS_I_VERSION_BITS)
369 #define __inode_data_mapping(advise) \
370 __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\
371 EROFS_I_DATA_MAPPING_BITS)
373 static inline unsigned long inode_datablocks(struct inode *inode)
375 /* since i_size cannot be changed */
376 return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
379 static inline bool is_inode_layout_plain(struct inode *inode)
381 return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_PLAIN;
384 static inline bool is_inode_layout_compression(struct inode *inode)
386 return EROFS_V(inode)->data_mapping_mode ==
387 EROFS_INODE_LAYOUT_COMPRESSION;
390 static inline bool is_inode_layout_inline(struct inode *inode)
392 return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_INLINE;
395 extern const struct super_operations erofs_sops;
396 extern const struct inode_operations erofs_dir_iops;
397 extern const struct file_operations erofs_dir_fops;
399 extern const struct address_space_operations erofs_raw_access_aops;
400 #ifdef CONFIG_EROFS_FS_ZIP
401 extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
402 #endif
405 * Logical to physical block mapping, used by erofs_map_blocks()
407 * Different with other file systems, it is used for 2 access modes:
409 * 1) RAW access mode:
411 * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
412 * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
414 * Note that m_lblk in the RAW access mode refers to the number of
415 * the compressed ondisk block rather than the uncompressed
416 * in-memory block for the compressed file.
418 * m_pofs equals to m_lofs except for the inline data page.
420 * 2) Normal access mode:
422 * If the inode is not compressed, it has no difference with
423 * the RAW access mode. However, if the inode is compressed,
424 * users should pass a valid (m_lblk, m_lofs) pair, and get
425 * the needed m_pblk, m_pofs, m_len to get the compressed data
426 * and the updated m_lblk, m_lofs which indicates the start
427 * of the corresponding uncompressed data in the file.
429 enum {
430 BH_Zipped = BH_PrivateStart,
433 /* Has a disk mapping */
434 #define EROFS_MAP_MAPPED (1 << BH_Mapped)
435 /* Located in metadata (could be copied from bd_inode) */
436 #define EROFS_MAP_META (1 << BH_Meta)
437 /* The extent has been compressed */
438 #define EROFS_MAP_ZIPPED (1 << BH_Zipped)
440 struct erofs_map_blocks {
441 erofs_off_t m_pa, m_la;
442 u64 m_plen, m_llen;
444 unsigned int m_flags;
447 /* Flags used by erofs_map_blocks() */
448 #define EROFS_GET_BLOCKS_RAW 0x0001
450 /* data.c */
451 static inline struct bio *prepare_bio(
452 struct super_block *sb,
453 erofs_blk_t blkaddr, unsigned nr_pages,
454 bio_end_io_t endio)
456 gfp_t gfp = GFP_NOIO;
457 struct bio *bio = bio_alloc(gfp, nr_pages);
459 if (unlikely(bio == NULL) &&
460 (current->flags & PF_MEMALLOC)) {
461 do {
462 nr_pages /= 2;
463 if (unlikely(!nr_pages)) {
464 bio = bio_alloc(gfp | __GFP_NOFAIL, 1);
465 BUG_ON(bio == NULL);
466 break;
468 bio = bio_alloc(gfp, nr_pages);
469 } while (bio == NULL);
472 bio->bi_end_io = endio;
473 bio_set_dev(bio, sb->s_bdev);
474 bio->bi_iter.bi_sector = blkaddr << LOG_SECTORS_PER_BLOCK;
475 return bio;
478 static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags)
480 bio_set_op_attrs(bio, op, op_flags);
481 submit_bio(bio);
484 extern struct page *erofs_get_meta_page(struct super_block *sb,
485 erofs_blk_t blkaddr, bool prio);
486 extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
487 extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
488 struct page **, int);
490 struct erofs_map_blocks_iter {
491 struct erofs_map_blocks map;
492 struct page *mpage;
496 static inline struct page *
497 erofs_get_inline_page(struct inode *inode,
498 erofs_blk_t blkaddr)
500 return erofs_get_meta_page(inode->i_sb,
501 blkaddr, S_ISDIR(inode->i_mode));
504 /* inode.c */
505 extern struct inode *erofs_iget(struct super_block *sb,
506 erofs_nid_t nid, bool dir);
508 /* dir.c */
509 int erofs_namei(struct inode *dir, struct qstr *name,
510 erofs_nid_t *nid, unsigned *d_type);
512 /* xattr.c */
513 #ifdef CONFIG_EROFS_FS_XATTR
514 extern const struct xattr_handler *erofs_xattr_handlers[];
515 #endif
517 /* symlink */
518 #ifdef CONFIG_EROFS_FS_XATTR
519 extern const struct inode_operations erofs_symlink_xattr_iops;
520 extern const struct inode_operations erofs_fast_symlink_xattr_iops;
521 extern const struct inode_operations erofs_special_inode_operations;
522 #endif
524 static inline void set_inode_fast_symlink(struct inode *inode)
526 #ifdef CONFIG_EROFS_FS_XATTR
527 inode->i_op = &erofs_fast_symlink_xattr_iops;
528 #else
529 inode->i_op = &simple_symlink_inode_operations;
530 #endif
533 static inline bool is_inode_fast_symlink(struct inode *inode)
535 #ifdef CONFIG_EROFS_FS_XATTR
536 return inode->i_op == &erofs_fast_symlink_xattr_iops;
537 #else
538 return inode->i_op == &simple_symlink_inode_operations;
539 #endif
542 static inline void *erofs_vmap(struct page **pages, unsigned int count)
544 #ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
545 int i = 0;
547 while (1) {
548 void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
549 /* retry two more times (totally 3 times) */
550 if (addr != NULL || ++i >= 3)
551 return addr;
552 vm_unmap_aliases();
554 return NULL;
555 #else
556 return vmap(pages, count, VM_MAP, PAGE_KERNEL);
557 #endif
560 static inline void erofs_vunmap(const void *mem, unsigned int count)
562 #ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
563 vm_unmap_ram(mem, count);
564 #else
565 vunmap(mem);
566 #endif
569 /* utils.c */
570 extern struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
572 extern void erofs_register_super(struct super_block *sb);
573 extern void erofs_unregister_super(struct super_block *sb);
575 extern unsigned long erofs_shrink_count(struct shrinker *shrink,
576 struct shrink_control *sc);
577 extern unsigned long erofs_shrink_scan(struct shrinker *shrink,
578 struct shrink_control *sc);
580 #ifndef lru_to_page
581 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
582 #endif
584 #endif