4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
14 #include <linux/types.h>
15 #include <linux/page-flags.h>
16 #include <linux/buffer_head.h>
17 #include <linux/slab.h>
18 #include <linux/crc32.h>
19 #include <linux/magic.h>
20 #include <linux/kobject.h>
21 #include <linux/sched.h>
23 #ifdef CONFIG_F2FS_CHECK_FS
24 #define f2fs_bug_on(condition) BUG_ON(condition)
25 #define f2fs_down_write(x, y) down_write_nest_lock(x, y)
27 #define f2fs_bug_on(condition)
28 #define f2fs_down_write(x, y) down_write(x)
34 #define F2FS_MOUNT_BG_GC 0x00000001
35 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
36 #define F2FS_MOUNT_DISCARD 0x00000004
37 #define F2FS_MOUNT_NOHEAP 0x00000008
38 #define F2FS_MOUNT_XATTR_USER 0x00000010
39 #define F2FS_MOUNT_POSIX_ACL 0x00000020
40 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
41 #define F2FS_MOUNT_INLINE_XATTR 0x00000080
42 #define F2FS_MOUNT_INLINE_DATA 0x00000100
43 #define F2FS_MOUNT_FLUSH_MERGE 0x00000200
44 #define F2FS_MOUNT_NOBARRIER 0x00000400
46 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
47 #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
48 #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option)
50 #define ver_after(a, b) (typecheck(unsigned long long, a) && \
51 typecheck(unsigned long long, b) && \
52 ((long long)((a) - (b)) > 0))
54 typedef u32 block_t
; /*
55 * should not change u32, since it is the on-disk block
56 * address format, __le32.
60 struct f2fs_mount_info
{
64 #define CRCPOLY_LE 0xedb88320
66 static inline __u32
f2fs_crc32(void *buf
, size_t len
)
68 unsigned char *p
= (unsigned char *)buf
;
69 __u32 crc
= F2FS_SUPER_MAGIC
;
74 for (i
= 0; i
< 8; i
++)
75 crc
= (crc
>> 1) ^ ((crc
& 1) ? CRCPOLY_LE
: 0);
80 static inline bool f2fs_crc_valid(__u32 blk_crc
, void *buf
, size_t buf_size
)
82 return f2fs_crc32(buf
, buf_size
) == blk_crc
;
86 * For checkpoint manager
94 * For CP/NAT/SIT/SSA readahead
103 /* for the list of ino */
105 ORPHAN_INO
, /* for orphan ino list */
106 APPEND_INO
, /* for append ino list */
107 UPDATE_INO
, /* for update ino list */
108 MAX_INO_ENTRY
, /* max. list */
112 struct list_head list
; /* list head */
113 nid_t ino
; /* inode number */
116 /* for the list of directory inodes */
117 struct dir_inode_entry
{
118 struct list_head list
; /* list head */
119 struct inode
*inode
; /* vfs inode pointer */
122 /* for the list of blockaddresses to be discarded */
123 struct discard_entry
{
124 struct list_head list
; /* list head */
125 block_t blkaddr
; /* block address to be discarded */
126 int len
; /* # of consecutive blocks of the discard */
129 /* for the list of fsync inodes, used only during recovery */
130 struct fsync_inode_entry
{
131 struct list_head list
; /* list head */
132 struct inode
*inode
; /* vfs inode pointer */
133 block_t blkaddr
; /* block address locating the last inode */
136 #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats))
137 #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits))
139 #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
140 #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
141 #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
142 #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
144 static inline int update_nats_in_cursum(struct f2fs_summary_block
*rs
, int i
)
146 int before
= nats_in_cursum(rs
);
147 rs
->n_nats
= cpu_to_le16(before
+ i
);
151 static inline int update_sits_in_cursum(struct f2fs_summary_block
*rs
, int i
)
153 int before
= sits_in_cursum(rs
);
154 rs
->n_sits
= cpu_to_le16(before
+ i
);
161 #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS
162 #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS
164 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
166 * ioctl commands in 32 bit emulation
168 #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
169 #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
173 * For INODE and NODE manager
176 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
177 * as its node offset to distinguish from index node blocks.
178 * But some bits are used to mark the node block.
180 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
183 ALLOC_NODE
, /* allocate a new node page if needed */
184 LOOKUP_NODE
, /* look up a node without readahead */
186 * look up a node with readahead called
191 #define F2FS_LINK_MAX 32000 /* maximum link count per file */
193 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
195 /* for in-memory extent cache entry */
196 #define F2FS_MIN_EXTENT_LEN 16 /* minimum extent length */
199 rwlock_t ext_lock
; /* rwlock for consistency */
200 unsigned int fofs
; /* start offset in a file */
201 u32 blk_addr
; /* start block address of the extent */
202 unsigned int len
; /* length of the extent */
206 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
208 #define FADVISE_COLD_BIT 0x01
209 #define FADVISE_LOST_PINO_BIT 0x02
211 #define DEF_DIR_LEVEL 0
213 struct f2fs_inode_info
{
214 struct inode vfs_inode
; /* serve a vfs inode */
215 unsigned long i_flags
; /* keep an inode flags for ioctl */
216 unsigned char i_advise
; /* use to give file attribute hints */
217 unsigned char i_dir_level
; /* use for dentry level for large dir */
218 unsigned int i_current_depth
; /* use only in directory structure */
219 unsigned int i_pino
; /* parent inode number */
220 umode_t i_acl_mode
; /* keep file acl mode temporarily */
222 /* Use below internally in f2fs*/
223 unsigned long flags
; /* use to pass per-file flags */
224 struct rw_semaphore i_sem
; /* protect fi info */
225 atomic_t dirty_dents
; /* # of dirty dentry pages */
226 f2fs_hash_t chash
; /* hash value of given file name */
227 unsigned int clevel
; /* maximum level of given file name */
228 nid_t i_xattr_nid
; /* node id that contains xattrs */
229 unsigned long long xattr_ver
; /* cp version of xattr modification */
230 struct extent_info ext
; /* in-memory extent cache entry */
231 struct dir_inode_entry
*dirty_dir
; /* the pointer of dirty dir */
234 static inline void get_extent_info(struct extent_info
*ext
,
235 struct f2fs_extent i_ext
)
237 write_lock(&ext
->ext_lock
);
238 ext
->fofs
= le32_to_cpu(i_ext
.fofs
);
239 ext
->blk_addr
= le32_to_cpu(i_ext
.blk_addr
);
240 ext
->len
= le32_to_cpu(i_ext
.len
);
241 write_unlock(&ext
->ext_lock
);
244 static inline void set_raw_extent(struct extent_info
*ext
,
245 struct f2fs_extent
*i_ext
)
247 read_lock(&ext
->ext_lock
);
248 i_ext
->fofs
= cpu_to_le32(ext
->fofs
);
249 i_ext
->blk_addr
= cpu_to_le32(ext
->blk_addr
);
250 i_ext
->len
= cpu_to_le32(ext
->len
);
251 read_unlock(&ext
->ext_lock
);
254 struct f2fs_nm_info
{
255 block_t nat_blkaddr
; /* base disk address of NAT */
256 nid_t max_nid
; /* maximum possible node ids */
257 nid_t available_nids
; /* maximum available node ids */
258 nid_t next_scan_nid
; /* the next nid to be scanned */
259 unsigned int ram_thresh
; /* control the memory footprint */
261 /* NAT cache management */
262 struct radix_tree_root nat_root
;/* root of the nat entry cache */
263 rwlock_t nat_tree_lock
; /* protect nat_tree_lock */
264 unsigned int nat_cnt
; /* the # of cached nat entries */
265 struct list_head nat_entries
; /* cached nat entry list (clean) */
266 struct list_head dirty_nat_entries
; /* cached nat entry list (dirty) */
267 struct list_head nat_entry_set
; /* nat entry set list */
268 unsigned int dirty_nat_cnt
; /* total num of nat entries in set */
270 /* free node ids management */
271 struct radix_tree_root free_nid_root
;/* root of the free_nid cache */
272 struct list_head free_nid_list
; /* a list for free nids */
273 spinlock_t free_nid_list_lock
; /* protect free nid list */
274 unsigned int fcnt
; /* the number of free node id */
275 struct mutex build_lock
; /* lock for build free nids */
278 char *nat_bitmap
; /* NAT bitmap pointer */
279 int bitmap_size
; /* bitmap size */
283 * this structure is used as one of function parameters.
284 * all the information are dedicated to a given direct node block determined
285 * by the data offset in a file.
287 struct dnode_of_data
{
288 struct inode
*inode
; /* vfs inode pointer */
289 struct page
*inode_page
; /* its inode page, NULL is possible */
290 struct page
*node_page
; /* cached direct node page */
291 nid_t nid
; /* node id of the direct node block */
292 unsigned int ofs_in_node
; /* data offset in the node page */
293 bool inode_page_locked
; /* inode page is locked or not */
294 block_t data_blkaddr
; /* block address of the node block */
297 static inline void set_new_dnode(struct dnode_of_data
*dn
, struct inode
*inode
,
298 struct page
*ipage
, struct page
*npage
, nid_t nid
)
300 memset(dn
, 0, sizeof(*dn
));
302 dn
->inode_page
= ipage
;
303 dn
->node_page
= npage
;
310 * By default, there are 6 active log areas across the whole main area.
311 * When considering hot and cold data separation to reduce cleaning overhead,
312 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
314 * In the current design, you should not change the numbers intentionally.
315 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
316 * logs individually according to the underlying devices. (default: 6)
317 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
318 * data and 8 for node logs.
320 #define NR_CURSEG_DATA_TYPE (3)
321 #define NR_CURSEG_NODE_TYPE (3)
322 #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
325 CURSEG_HOT_DATA
= 0, /* directory entry blocks */
326 CURSEG_WARM_DATA
, /* data blocks */
327 CURSEG_COLD_DATA
, /* multimedia or GCed data blocks */
328 CURSEG_HOT_NODE
, /* direct node blocks of directory files */
329 CURSEG_WARM_NODE
, /* direct node blocks of normal files */
330 CURSEG_COLD_NODE
, /* indirect node blocks */
335 struct flush_cmd
*next
;
336 struct completion wait
;
340 struct flush_cmd_control
{
341 struct task_struct
*f2fs_issue_flush
; /* flush thread */
342 wait_queue_head_t flush_wait_queue
; /* waiting queue for wake-up */
343 struct flush_cmd
*issue_list
; /* list for command issue */
344 struct flush_cmd
*dispatch_list
; /* list for command dispatch */
345 spinlock_t issue_lock
; /* for issue list lock */
346 struct flush_cmd
*issue_tail
; /* list tail of issue list */
349 struct f2fs_sm_info
{
350 struct sit_info
*sit_info
; /* whole segment information */
351 struct free_segmap_info
*free_info
; /* free segment information */
352 struct dirty_seglist_info
*dirty_info
; /* dirty segment information */
353 struct curseg_info
*curseg_array
; /* active segment information */
355 block_t seg0_blkaddr
; /* block address of 0'th segment */
356 block_t main_blkaddr
; /* start block address of main area */
357 block_t ssa_blkaddr
; /* start block address of SSA area */
359 unsigned int segment_count
; /* total # of segments */
360 unsigned int main_segments
; /* # of segments in main area */
361 unsigned int reserved_segments
; /* # of reserved segments */
362 unsigned int ovp_segments
; /* # of overprovision segments */
364 /* a threshold to reclaim prefree segments */
365 unsigned int rec_prefree_segments
;
367 /* for small discard management */
368 struct list_head discard_list
; /* 4KB discard list */
369 int nr_discards
; /* # of discards in the list */
370 int max_discards
; /* max. discards to be issued */
372 unsigned int ipu_policy
; /* in-place-update policy */
373 unsigned int min_ipu_util
; /* in-place-update threshold */
375 /* for flush command control */
376 struct flush_cmd_control
*cmd_control_info
;
384 * COUNT_TYPE for monitoring
386 * f2fs monitors the number of several block types such as on-writeback,
387 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
398 * The below are the page types of bios used in submti_bio().
399 * The available types are:
400 * DATA User data pages. It operates as async mode.
401 * NODE Node pages. It operates as async mode.
402 * META FS metadata pages such as SIT, NAT, CP.
403 * NR_PAGE_TYPE The number of page types.
404 * META_FLUSH Make sure the previous pages are written
405 * with waiting the bio's completion
406 * ... Only can be used with META.
408 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
417 struct f2fs_io_info
{
418 enum page_type type
; /* contains DATA/NODE/META/META_FLUSH */
419 int rw
; /* contains R/RS/W/WS with REQ_META/REQ_PRIO */
422 #define is_read_io(rw) (((rw) & 1) == READ)
423 struct f2fs_bio_info
{
424 struct f2fs_sb_info
*sbi
; /* f2fs superblock */
425 struct bio
*bio
; /* bios to merge */
426 sector_t last_block_in_bio
; /* last block number */
427 struct f2fs_io_info fio
; /* store buffered io info. */
428 struct rw_semaphore io_rwsem
; /* blocking op for bio */
431 struct f2fs_sb_info
{
432 struct super_block
*sb
; /* pointer to VFS super block */
433 struct proc_dir_entry
*s_proc
; /* proc entry */
434 struct buffer_head
*raw_super_buf
; /* buffer head of raw sb */
435 struct f2fs_super_block
*raw_super
; /* raw super block pointer */
436 int s_dirty
; /* dirty flag for checkpoint */
438 /* for node-related operations */
439 struct f2fs_nm_info
*nm_info
; /* node manager */
440 struct inode
*node_inode
; /* cache node blocks */
442 /* for segment-related operations */
443 struct f2fs_sm_info
*sm_info
; /* segment manager */
445 /* for bio operations */
446 struct f2fs_bio_info read_io
; /* for read bios */
447 struct f2fs_bio_info write_io
[NR_PAGE_TYPE
]; /* for write bios */
448 struct completion
*wait_io
; /* for completion bios */
451 struct f2fs_checkpoint
*ckpt
; /* raw checkpoint pointer */
452 struct inode
*meta_inode
; /* cache meta blocks */
453 struct mutex cp_mutex
; /* checkpoint procedure lock */
454 struct rw_semaphore cp_rwsem
; /* blocking FS operations */
455 struct rw_semaphore node_write
; /* locking node writes */
456 struct mutex writepages
; /* mutex for writepages() */
457 bool por_doing
; /* recovery is doing or not */
458 wait_queue_head_t cp_wait
;
460 /* for inode management */
461 struct radix_tree_root ino_root
[MAX_INO_ENTRY
]; /* ino entry array */
462 spinlock_t ino_lock
[MAX_INO_ENTRY
]; /* for ino entry lock */
463 struct list_head ino_list
[MAX_INO_ENTRY
]; /* inode list head */
465 /* for orphan inode, use 0'th array */
466 unsigned int n_orphans
; /* # of orphan inodes */
467 unsigned int max_orphans
; /* max orphan inodes */
469 /* for directory inode management */
470 struct list_head dir_inode_list
; /* dir inode list */
471 spinlock_t dir_inode_lock
; /* for dir inode list lock */
473 /* basic file system units */
474 unsigned int log_sectors_per_block
; /* log2 sectors per block */
475 unsigned int log_blocksize
; /* log2 block size */
476 unsigned int blocksize
; /* block size */
477 unsigned int root_ino_num
; /* root inode number*/
478 unsigned int node_ino_num
; /* node inode number*/
479 unsigned int meta_ino_num
; /* meta inode number*/
480 unsigned int log_blocks_per_seg
; /* log2 blocks per segment */
481 unsigned int blocks_per_seg
; /* blocks per segment */
482 unsigned int segs_per_sec
; /* segments per section */
483 unsigned int secs_per_zone
; /* sections per zone */
484 unsigned int total_sections
; /* total section count */
485 unsigned int total_node_count
; /* total node block count */
486 unsigned int total_valid_node_count
; /* valid node block count */
487 unsigned int total_valid_inode_count
; /* valid inode count */
488 int active_logs
; /* # of active logs */
489 int dir_level
; /* directory level */
491 block_t user_block_count
; /* # of user blocks */
492 block_t total_valid_block_count
; /* # of valid blocks */
493 block_t alloc_valid_block_count
; /* # of allocated blocks */
494 block_t last_valid_block_count
; /* for recovery */
495 u32 s_next_generation
; /* for NFS support */
496 atomic_t nr_pages
[NR_COUNT_TYPE
]; /* # of pages, see count_type */
498 struct f2fs_mount_info mount_opt
; /* mount options */
500 /* for cleaning operations */
501 struct mutex gc_mutex
; /* mutex for GC */
502 struct f2fs_gc_kthread
*gc_thread
; /* GC thread */
503 unsigned int cur_victim_sec
; /* current victim section num */
505 /* maximum # of trials to find a victim segment for SSR and GC */
506 unsigned int max_victim_search
;
509 * for stat information.
510 * one is for the LFS mode, and the other is for the SSR mode.
512 #ifdef CONFIG_F2FS_STAT_FS
513 struct f2fs_stat_info
*stat_info
; /* FS status information */
514 unsigned int segment_count
[2]; /* # of allocated segments */
515 unsigned int block_count
[2]; /* # of allocated blocks */
516 int total_hit_ext
, read_hit_ext
; /* extent cache hit ratio */
517 int inline_inode
; /* # of inline_data inodes */
518 int bg_gc
; /* background gc calls */
519 unsigned int n_dirty_dirs
; /* # of dir inodes */
521 unsigned int last_victim
[2]; /* last victim segment # */
522 spinlock_t stat_lock
; /* lock for stat operations */
524 /* For sysfs suppport */
525 struct kobject s_kobj
;
526 struct completion s_kobj_unregister
;
532 static inline struct f2fs_inode_info
*F2FS_I(struct inode
*inode
)
534 return container_of(inode
, struct f2fs_inode_info
, vfs_inode
);
537 static inline struct f2fs_sb_info
*F2FS_SB(struct super_block
*sb
)
539 return sb
->s_fs_info
;
542 static inline struct f2fs_super_block
*F2FS_RAW_SUPER(struct f2fs_sb_info
*sbi
)
544 return (struct f2fs_super_block
*)(sbi
->raw_super
);
547 static inline struct f2fs_checkpoint
*F2FS_CKPT(struct f2fs_sb_info
*sbi
)
549 return (struct f2fs_checkpoint
*)(sbi
->ckpt
);
552 static inline struct f2fs_node
*F2FS_NODE(struct page
*page
)
554 return (struct f2fs_node
*)page_address(page
);
557 static inline struct f2fs_inode
*F2FS_INODE(struct page
*page
)
559 return &((struct f2fs_node
*)page_address(page
))->i
;
562 static inline struct f2fs_nm_info
*NM_I(struct f2fs_sb_info
*sbi
)
564 return (struct f2fs_nm_info
*)(sbi
->nm_info
);
567 static inline struct f2fs_sm_info
*SM_I(struct f2fs_sb_info
*sbi
)
569 return (struct f2fs_sm_info
*)(sbi
->sm_info
);
572 static inline struct sit_info
*SIT_I(struct f2fs_sb_info
*sbi
)
574 return (struct sit_info
*)(SM_I(sbi
)->sit_info
);
577 static inline struct free_segmap_info
*FREE_I(struct f2fs_sb_info
*sbi
)
579 return (struct free_segmap_info
*)(SM_I(sbi
)->free_info
);
582 static inline struct dirty_seglist_info
*DIRTY_I(struct f2fs_sb_info
*sbi
)
584 return (struct dirty_seglist_info
*)(SM_I(sbi
)->dirty_info
);
587 static inline struct address_space
*META_MAPPING(struct f2fs_sb_info
*sbi
)
589 return sbi
->meta_inode
->i_mapping
;
592 static inline struct address_space
*NODE_MAPPING(struct f2fs_sb_info
*sbi
)
594 return sbi
->node_inode
->i_mapping
;
597 static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info
*sbi
)
602 static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info
*sbi
)
607 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint
*cp
)
609 return le64_to_cpu(cp
->checkpoint_ver
);
612 static inline bool is_set_ckpt_flags(struct f2fs_checkpoint
*cp
, unsigned int f
)
614 unsigned int ckpt_flags
= le32_to_cpu(cp
->ckpt_flags
);
615 return ckpt_flags
& f
;
618 static inline void set_ckpt_flags(struct f2fs_checkpoint
*cp
, unsigned int f
)
620 unsigned int ckpt_flags
= le32_to_cpu(cp
->ckpt_flags
);
622 cp
->ckpt_flags
= cpu_to_le32(ckpt_flags
);
625 static inline void clear_ckpt_flags(struct f2fs_checkpoint
*cp
, unsigned int f
)
627 unsigned int ckpt_flags
= le32_to_cpu(cp
->ckpt_flags
);
629 cp
->ckpt_flags
= cpu_to_le32(ckpt_flags
);
632 static inline void f2fs_lock_op(struct f2fs_sb_info
*sbi
)
634 down_read(&sbi
->cp_rwsem
);
637 static inline void f2fs_unlock_op(struct f2fs_sb_info
*sbi
)
639 up_read(&sbi
->cp_rwsem
);
642 static inline void f2fs_lock_all(struct f2fs_sb_info
*sbi
)
644 f2fs_down_write(&sbi
->cp_rwsem
, &sbi
->cp_mutex
);
647 static inline void f2fs_unlock_all(struct f2fs_sb_info
*sbi
)
649 up_write(&sbi
->cp_rwsem
);
653 * Check whether the given nid is within node id range.
655 static inline int check_nid_range(struct f2fs_sb_info
*sbi
, nid_t nid
)
657 if (unlikely(nid
< F2FS_ROOT_INO(sbi
)))
659 if (unlikely(nid
>= NM_I(sbi
)->max_nid
))
664 #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1
667 * Check whether the inode has blocks or not
669 static inline int F2FS_HAS_BLOCKS(struct inode
*inode
)
671 if (F2FS_I(inode
)->i_xattr_nid
)
672 return inode
->i_blocks
> F2FS_DEFAULT_ALLOCATED_BLOCKS
+ 1;
674 return inode
->i_blocks
> F2FS_DEFAULT_ALLOCATED_BLOCKS
;
677 static inline bool f2fs_has_xattr_block(unsigned int ofs
)
679 return ofs
== XATTR_NODE_OFFSET
;
682 static inline bool inc_valid_block_count(struct f2fs_sb_info
*sbi
,
683 struct inode
*inode
, blkcnt_t count
)
685 block_t valid_block_count
;
687 spin_lock(&sbi
->stat_lock
);
689 sbi
->total_valid_block_count
+ (block_t
)count
;
690 if (unlikely(valid_block_count
> sbi
->user_block_count
)) {
691 spin_unlock(&sbi
->stat_lock
);
694 inode
->i_blocks
+= count
;
695 sbi
->total_valid_block_count
= valid_block_count
;
696 sbi
->alloc_valid_block_count
+= (block_t
)count
;
697 spin_unlock(&sbi
->stat_lock
);
701 static inline void dec_valid_block_count(struct f2fs_sb_info
*sbi
,
705 spin_lock(&sbi
->stat_lock
);
706 f2fs_bug_on(sbi
->total_valid_block_count
< (block_t
) count
);
707 f2fs_bug_on(inode
->i_blocks
< count
);
708 inode
->i_blocks
-= count
;
709 sbi
->total_valid_block_count
-= (block_t
)count
;
710 spin_unlock(&sbi
->stat_lock
);
713 static inline void inc_page_count(struct f2fs_sb_info
*sbi
, int count_type
)
715 atomic_inc(&sbi
->nr_pages
[count_type
]);
716 F2FS_SET_SB_DIRT(sbi
);
719 static inline void inode_inc_dirty_dents(struct inode
*inode
)
721 inc_page_count(F2FS_SB(inode
->i_sb
), F2FS_DIRTY_DENTS
);
722 atomic_inc(&F2FS_I(inode
)->dirty_dents
);
725 static inline void dec_page_count(struct f2fs_sb_info
*sbi
, int count_type
)
727 atomic_dec(&sbi
->nr_pages
[count_type
]);
730 static inline void inode_dec_dirty_dents(struct inode
*inode
)
732 if (!S_ISDIR(inode
->i_mode
))
735 dec_page_count(F2FS_SB(inode
->i_sb
), F2FS_DIRTY_DENTS
);
736 atomic_dec(&F2FS_I(inode
)->dirty_dents
);
739 static inline int get_pages(struct f2fs_sb_info
*sbi
, int count_type
)
741 return atomic_read(&sbi
->nr_pages
[count_type
]);
744 static inline int get_dirty_dents(struct inode
*inode
)
746 return atomic_read(&F2FS_I(inode
)->dirty_dents
);
749 static inline int get_blocktype_secs(struct f2fs_sb_info
*sbi
, int block_type
)
751 unsigned int pages_per_sec
= sbi
->segs_per_sec
*
752 (1 << sbi
->log_blocks_per_seg
);
753 return ((get_pages(sbi
, block_type
) + pages_per_sec
- 1)
754 >> sbi
->log_blocks_per_seg
) / sbi
->segs_per_sec
;
757 static inline block_t
valid_user_blocks(struct f2fs_sb_info
*sbi
)
759 return sbi
->total_valid_block_count
;
762 static inline unsigned long __bitmap_size(struct f2fs_sb_info
*sbi
, int flag
)
764 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
766 /* return NAT or SIT bitmap */
767 if (flag
== NAT_BITMAP
)
768 return le32_to_cpu(ckpt
->nat_ver_bitmap_bytesize
);
769 else if (flag
== SIT_BITMAP
)
770 return le32_to_cpu(ckpt
->sit_ver_bitmap_bytesize
);
775 static inline void *__bitmap_ptr(struct f2fs_sb_info
*sbi
, int flag
)
777 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
780 if (le32_to_cpu(F2FS_RAW_SUPER(sbi
)->cp_payload
) > 0) {
781 if (flag
== NAT_BITMAP
)
782 return &ckpt
->sit_nat_version_bitmap
;
784 return (unsigned char *)ckpt
+ F2FS_BLKSIZE
;
786 offset
= (flag
== NAT_BITMAP
) ?
787 le32_to_cpu(ckpt
->sit_ver_bitmap_bytesize
) : 0;
788 return &ckpt
->sit_nat_version_bitmap
+ offset
;
792 static inline block_t
__start_cp_addr(struct f2fs_sb_info
*sbi
)
795 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
796 unsigned long long ckpt_version
= cur_cp_version(ckpt
);
798 start_addr
= le32_to_cpu(F2FS_RAW_SUPER(sbi
)->cp_blkaddr
);
801 * odd numbered checkpoint should at cp segment 0
802 * and even segent must be at cp segment 1
804 if (!(ckpt_version
& 1))
805 start_addr
+= sbi
->blocks_per_seg
;
810 static inline block_t
__start_sum_addr(struct f2fs_sb_info
*sbi
)
812 return le32_to_cpu(F2FS_CKPT(sbi
)->cp_pack_start_sum
);
815 static inline bool inc_valid_node_count(struct f2fs_sb_info
*sbi
,
818 block_t valid_block_count
;
819 unsigned int valid_node_count
;
821 spin_lock(&sbi
->stat_lock
);
823 valid_block_count
= sbi
->total_valid_block_count
+ 1;
824 if (unlikely(valid_block_count
> sbi
->user_block_count
)) {
825 spin_unlock(&sbi
->stat_lock
);
829 valid_node_count
= sbi
->total_valid_node_count
+ 1;
830 if (unlikely(valid_node_count
> sbi
->total_node_count
)) {
831 spin_unlock(&sbi
->stat_lock
);
838 sbi
->alloc_valid_block_count
++;
839 sbi
->total_valid_node_count
++;
840 sbi
->total_valid_block_count
++;
841 spin_unlock(&sbi
->stat_lock
);
846 static inline void dec_valid_node_count(struct f2fs_sb_info
*sbi
,
849 spin_lock(&sbi
->stat_lock
);
851 f2fs_bug_on(!sbi
->total_valid_block_count
);
852 f2fs_bug_on(!sbi
->total_valid_node_count
);
853 f2fs_bug_on(!inode
->i_blocks
);
856 sbi
->total_valid_node_count
--;
857 sbi
->total_valid_block_count
--;
859 spin_unlock(&sbi
->stat_lock
);
862 static inline unsigned int valid_node_count(struct f2fs_sb_info
*sbi
)
864 return sbi
->total_valid_node_count
;
867 static inline void inc_valid_inode_count(struct f2fs_sb_info
*sbi
)
869 spin_lock(&sbi
->stat_lock
);
870 f2fs_bug_on(sbi
->total_valid_inode_count
== sbi
->total_node_count
);
871 sbi
->total_valid_inode_count
++;
872 spin_unlock(&sbi
->stat_lock
);
875 static inline void dec_valid_inode_count(struct f2fs_sb_info
*sbi
)
877 spin_lock(&sbi
->stat_lock
);
878 f2fs_bug_on(!sbi
->total_valid_inode_count
);
879 sbi
->total_valid_inode_count
--;
880 spin_unlock(&sbi
->stat_lock
);
883 static inline unsigned int valid_inode_count(struct f2fs_sb_info
*sbi
)
885 return sbi
->total_valid_inode_count
;
888 static inline void f2fs_put_page(struct page
*page
, int unlock
)
894 f2fs_bug_on(!PageLocked(page
));
897 page_cache_release(page
);
900 static inline void f2fs_put_dnode(struct dnode_of_data
*dn
)
903 f2fs_put_page(dn
->node_page
, 1);
904 if (dn
->inode_page
&& dn
->node_page
!= dn
->inode_page
)
905 f2fs_put_page(dn
->inode_page
, 0);
906 dn
->node_page
= NULL
;
907 dn
->inode_page
= NULL
;
910 static inline struct kmem_cache
*f2fs_kmem_cache_create(const char *name
,
913 return kmem_cache_create(name
, size
, 0, SLAB_RECLAIM_ACCOUNT
, NULL
);
916 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache
*cachep
,
921 entry
= kmem_cache_alloc(cachep
, flags
);
930 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
932 static inline bool IS_INODE(struct page
*page
)
934 struct f2fs_node
*p
= F2FS_NODE(page
);
935 return RAW_IS_INODE(p
);
938 static inline __le32
*blkaddr_in_node(struct f2fs_node
*node
)
940 return RAW_IS_INODE(node
) ? node
->i
.i_addr
: node
->dn
.addr
;
943 static inline block_t
datablock_addr(struct page
*node_page
,
946 struct f2fs_node
*raw_node
;
948 raw_node
= F2FS_NODE(node_page
);
949 addr_array
= blkaddr_in_node(raw_node
);
950 return le32_to_cpu(addr_array
[offset
]);
953 static inline int f2fs_test_bit(unsigned int nr
, char *addr
)
958 mask
= 1 << (7 - (nr
& 0x07));
962 static inline int f2fs_set_bit(unsigned int nr
, char *addr
)
968 mask
= 1 << (7 - (nr
& 0x07));
974 static inline int f2fs_clear_bit(unsigned int nr
, char *addr
)
980 mask
= 1 << (7 - (nr
& 0x07));
986 /* used for f2fs_inode_info->flags */
988 FI_NEW_INODE
, /* indicate newly allocated inode */
989 FI_DIRTY_INODE
, /* indicate inode is dirty or not */
990 FI_DIRTY_DIR
, /* indicate directory has dirty pages */
991 FI_INC_LINK
, /* need to increment i_nlink */
992 FI_ACL_MODE
, /* indicate acl mode */
993 FI_NO_ALLOC
, /* should not allocate any blocks */
994 FI_UPDATE_DIR
, /* should update inode block for consistency */
995 FI_DELAY_IPUT
, /* used for the recovery */
996 FI_NO_EXTENT
, /* not to use the extent cache */
997 FI_INLINE_XATTR
, /* used for inline xattr */
998 FI_INLINE_DATA
, /* used for inline data*/
999 FI_APPEND_WRITE
, /* inode has appended data */
1000 FI_UPDATE_WRITE
, /* inode has in-place-update data */
1001 FI_NEED_IPU
, /* used fo ipu for fdatasync */
1004 static inline void set_inode_flag(struct f2fs_inode_info
*fi
, int flag
)
1006 if (!test_bit(flag
, &fi
->flags
))
1007 set_bit(flag
, &fi
->flags
);
1010 static inline int is_inode_flag_set(struct f2fs_inode_info
*fi
, int flag
)
1012 return test_bit(flag
, &fi
->flags
);
1015 static inline void clear_inode_flag(struct f2fs_inode_info
*fi
, int flag
)
1017 if (test_bit(flag
, &fi
->flags
))
1018 clear_bit(flag
, &fi
->flags
);
1021 static inline void set_acl_inode(struct f2fs_inode_info
*fi
, umode_t mode
)
1023 fi
->i_acl_mode
= mode
;
1024 set_inode_flag(fi
, FI_ACL_MODE
);
1027 static inline int cond_clear_inode_flag(struct f2fs_inode_info
*fi
, int flag
)
1029 if (is_inode_flag_set(fi
, FI_ACL_MODE
)) {
1030 clear_inode_flag(fi
, FI_ACL_MODE
);
1036 static inline void get_inline_info(struct f2fs_inode_info
*fi
,
1037 struct f2fs_inode
*ri
)
1039 if (ri
->i_inline
& F2FS_INLINE_XATTR
)
1040 set_inode_flag(fi
, FI_INLINE_XATTR
);
1041 if (ri
->i_inline
& F2FS_INLINE_DATA
)
1042 set_inode_flag(fi
, FI_INLINE_DATA
);
1045 static inline void set_raw_inline(struct f2fs_inode_info
*fi
,
1046 struct f2fs_inode
*ri
)
1050 if (is_inode_flag_set(fi
, FI_INLINE_XATTR
))
1051 ri
->i_inline
|= F2FS_INLINE_XATTR
;
1052 if (is_inode_flag_set(fi
, FI_INLINE_DATA
))
1053 ri
->i_inline
|= F2FS_INLINE_DATA
;
1056 static inline int f2fs_has_inline_xattr(struct inode
*inode
)
1058 return is_inode_flag_set(F2FS_I(inode
), FI_INLINE_XATTR
);
1061 static inline unsigned int addrs_per_inode(struct f2fs_inode_info
*fi
)
1063 if (f2fs_has_inline_xattr(&fi
->vfs_inode
))
1064 return DEF_ADDRS_PER_INODE
- F2FS_INLINE_XATTR_ADDRS
;
1065 return DEF_ADDRS_PER_INODE
;
1068 static inline void *inline_xattr_addr(struct page
*page
)
1070 struct f2fs_inode
*ri
= F2FS_INODE(page
);
1071 return (void *)&(ri
->i_addr
[DEF_ADDRS_PER_INODE
-
1072 F2FS_INLINE_XATTR_ADDRS
]);
1075 static inline int inline_xattr_size(struct inode
*inode
)
1077 if (f2fs_has_inline_xattr(inode
))
1078 return F2FS_INLINE_XATTR_ADDRS
<< 2;
1083 static inline int f2fs_has_inline_data(struct inode
*inode
)
1085 return is_inode_flag_set(F2FS_I(inode
), FI_INLINE_DATA
);
1088 static inline void *inline_data_addr(struct page
*page
)
1090 struct f2fs_inode
*ri
= F2FS_INODE(page
);
1091 return (void *)&(ri
->i_addr
[1]);
1094 static inline int f2fs_readonly(struct super_block
*sb
)
1096 return sb
->s_flags
& MS_RDONLY
;
1099 static inline void f2fs_stop_checkpoint(struct f2fs_sb_info
*sbi
)
1101 set_ckpt_flags(sbi
->ckpt
, CP_ERROR_FLAG
);
1102 sbi
->sb
->s_flags
|= MS_RDONLY
;
1105 #define get_inode_mode(i) \
1106 ((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
1107 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
1109 /* get offset of first page in next direct node */
1110 #define PGOFS_OF_NEXT_DNODE(pgofs, fi) \
1111 ((pgofs < ADDRS_PER_INODE(fi)) ? ADDRS_PER_INODE(fi) : \
1112 (pgofs - ADDRS_PER_INODE(fi) + ADDRS_PER_BLOCK) / \
1113 ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi))
1118 int f2fs_sync_file(struct file
*, loff_t
, loff_t
, int);
1119 void truncate_data_blocks(struct dnode_of_data
*);
1120 int truncate_blocks(struct inode
*, u64
);
1121 void f2fs_truncate(struct inode
*);
1122 int f2fs_getattr(struct vfsmount
*, struct dentry
*, struct kstat
*);
1123 int f2fs_setattr(struct dentry
*, struct iattr
*);
1124 int truncate_hole(struct inode
*, pgoff_t
, pgoff_t
);
1125 int truncate_data_blocks_range(struct dnode_of_data
*, int);
1126 long f2fs_ioctl(struct file
*, unsigned int, unsigned long);
1127 long f2fs_compat_ioctl(struct file
*, unsigned int, unsigned long);
1132 void f2fs_set_inode_flags(struct inode
*);
1133 struct inode
*f2fs_iget(struct super_block
*, unsigned long);
1134 int try_to_free_nats(struct f2fs_sb_info
*, int);
1135 void update_inode(struct inode
*, struct page
*);
1136 void update_inode_page(struct inode
*);
1137 int f2fs_write_inode(struct inode
*, struct writeback_control
*);
1138 void f2fs_evict_inode(struct inode
*);
1143 struct dentry
*f2fs_get_parent(struct dentry
*child
);
1148 struct f2fs_dir_entry
*f2fs_find_entry(struct inode
*, struct qstr
*,
1150 struct f2fs_dir_entry
*f2fs_parent_dir(struct inode
*, struct page
**);
1151 ino_t
f2fs_inode_by_name(struct inode
*, struct qstr
*);
1152 void f2fs_set_link(struct inode
*, struct f2fs_dir_entry
*,
1153 struct page
*, struct inode
*);
1154 int update_dent_inode(struct inode
*, const struct qstr
*);
1155 int __f2fs_add_link(struct inode
*, const struct qstr
*, struct inode
*);
1156 void f2fs_delete_entry(struct f2fs_dir_entry
*, struct page
*, struct inode
*);
1157 int f2fs_do_tmpfile(struct inode
*, struct inode
*);
1158 int f2fs_make_empty(struct inode
*, struct inode
*);
1159 bool f2fs_empty_dir(struct inode
*);
1161 static inline int f2fs_add_link(struct dentry
*dentry
, struct inode
*inode
)
1163 return __f2fs_add_link(dentry
->d_parent
->d_inode
, &dentry
->d_name
,
1170 int f2fs_sync_fs(struct super_block
*, int);
1171 extern __printf(3, 4)
1172 void f2fs_msg(struct super_block
*, const char *, const char *, ...);
1177 f2fs_hash_t
f2fs_dentry_hash(const struct qstr
*);
1182 struct dnode_of_data
;
1185 bool available_free_memory(struct f2fs_sb_info
*, int);
1186 int is_checkpointed_node(struct f2fs_sb_info
*, nid_t
);
1187 bool fsync_mark_done(struct f2fs_sb_info
*, nid_t
);
1188 void fsync_mark_clear(struct f2fs_sb_info
*, nid_t
);
1189 void get_node_info(struct f2fs_sb_info
*, nid_t
, struct node_info
*);
1190 int get_dnode_of_data(struct dnode_of_data
*, pgoff_t
, int);
1191 int truncate_inode_blocks(struct inode
*, pgoff_t
);
1192 int truncate_xattr_node(struct inode
*, struct page
*);
1193 int wait_on_node_pages_writeback(struct f2fs_sb_info
*, nid_t
);
1194 void remove_inode_page(struct inode
*);
1195 struct page
*new_inode_page(struct inode
*);
1196 struct page
*new_node_page(struct dnode_of_data
*, unsigned int, struct page
*);
1197 void ra_node_page(struct f2fs_sb_info
*, nid_t
);
1198 struct page
*get_node_page(struct f2fs_sb_info
*, pgoff_t
);
1199 struct page
*get_node_page_ra(struct page
*, int);
1200 void sync_inode_page(struct dnode_of_data
*);
1201 int sync_node_pages(struct f2fs_sb_info
*, nid_t
, struct writeback_control
*);
1202 bool alloc_nid(struct f2fs_sb_info
*, nid_t
*);
1203 void alloc_nid_done(struct f2fs_sb_info
*, nid_t
);
1204 void alloc_nid_failed(struct f2fs_sb_info
*, nid_t
);
1205 void recover_node_page(struct f2fs_sb_info
*, struct page
*,
1206 struct f2fs_summary
*, struct node_info
*, block_t
);
1207 void recover_inline_xattr(struct inode
*, struct page
*);
1208 bool recover_xattr_data(struct inode
*, struct page
*, block_t
);
1209 int recover_inode_page(struct f2fs_sb_info
*, struct page
*);
1210 int restore_node_summary(struct f2fs_sb_info
*, unsigned int,
1211 struct f2fs_summary_block
*);
1212 void flush_nat_entries(struct f2fs_sb_info
*);
1213 int build_node_manager(struct f2fs_sb_info
*);
1214 void destroy_node_manager(struct f2fs_sb_info
*);
1215 int __init
create_node_manager_caches(void);
1216 void destroy_node_manager_caches(void);
1221 void f2fs_balance_fs(struct f2fs_sb_info
*);
1222 void f2fs_balance_fs_bg(struct f2fs_sb_info
*);
1223 int f2fs_issue_flush(struct f2fs_sb_info
*);
1224 int create_flush_cmd_control(struct f2fs_sb_info
*);
1225 void destroy_flush_cmd_control(struct f2fs_sb_info
*);
1226 void invalidate_blocks(struct f2fs_sb_info
*, block_t
);
1227 void refresh_sit_entry(struct f2fs_sb_info
*, block_t
, block_t
);
1228 void clear_prefree_segments(struct f2fs_sb_info
*);
1229 void discard_next_dnode(struct f2fs_sb_info
*, block_t
);
1230 int npages_for_summary_flush(struct f2fs_sb_info
*);
1231 void allocate_new_segments(struct f2fs_sb_info
*);
1232 struct page
*get_sum_page(struct f2fs_sb_info
*, unsigned int);
1233 void write_meta_page(struct f2fs_sb_info
*, struct page
*);
1234 void write_node_page(struct f2fs_sb_info
*, struct page
*,
1235 struct f2fs_io_info
*, unsigned int, block_t
, block_t
*);
1236 void write_data_page(struct page
*, struct dnode_of_data
*, block_t
*,
1237 struct f2fs_io_info
*);
1238 void rewrite_data_page(struct page
*, block_t
, struct f2fs_io_info
*);
1239 void recover_data_page(struct f2fs_sb_info
*, struct page
*,
1240 struct f2fs_summary
*, block_t
, block_t
);
1241 void rewrite_node_page(struct f2fs_sb_info
*, struct page
*,
1242 struct f2fs_summary
*, block_t
, block_t
);
1243 void allocate_data_block(struct f2fs_sb_info
*, struct page
*,
1244 block_t
, block_t
*, struct f2fs_summary
*, int);
1245 void f2fs_wait_on_page_writeback(struct page
*, enum page_type
);
1246 void write_data_summaries(struct f2fs_sb_info
*, block_t
);
1247 void write_node_summaries(struct f2fs_sb_info
*, block_t
);
1248 int lookup_journal_in_cursum(struct f2fs_summary_block
*,
1249 int, unsigned int, int);
1250 void flush_sit_entries(struct f2fs_sb_info
*);
1251 int build_segment_manager(struct f2fs_sb_info
*);
1252 void destroy_segment_manager(struct f2fs_sb_info
*);
1253 int __init
create_segment_manager_caches(void);
1254 void destroy_segment_manager_caches(void);
1259 struct page
*grab_meta_page(struct f2fs_sb_info
*, pgoff_t
);
1260 struct page
*get_meta_page(struct f2fs_sb_info
*, pgoff_t
);
1261 int ra_meta_pages(struct f2fs_sb_info
*, int, int, int);
1262 long sync_meta_pages(struct f2fs_sb_info
*, enum page_type
, long);
1263 void add_dirty_inode(struct f2fs_sb_info
*, nid_t
, int type
);
1264 void remove_dirty_inode(struct f2fs_sb_info
*, nid_t
, int type
);
1265 bool exist_written_data(struct f2fs_sb_info
*, nid_t
, int);
1266 int acquire_orphan_inode(struct f2fs_sb_info
*);
1267 void release_orphan_inode(struct f2fs_sb_info
*);
1268 void add_orphan_inode(struct f2fs_sb_info
*, nid_t
);
1269 void remove_orphan_inode(struct f2fs_sb_info
*, nid_t
);
1270 void recover_orphan_inodes(struct f2fs_sb_info
*);
1271 int get_valid_checkpoint(struct f2fs_sb_info
*);
1272 void set_dirty_dir_page(struct inode
*, struct page
*);
1273 void add_dirty_dir_inode(struct inode
*);
1274 void remove_dirty_dir_inode(struct inode
*);
1275 void sync_dirty_dir_inodes(struct f2fs_sb_info
*);
1276 void write_checkpoint(struct f2fs_sb_info
*, bool);
1277 void init_ino_entry_info(struct f2fs_sb_info
*);
1278 int __init
create_checkpoint_caches(void);
1279 void destroy_checkpoint_caches(void);
1284 void f2fs_submit_merged_bio(struct f2fs_sb_info
*, enum page_type
, int);
1285 int f2fs_submit_page_bio(struct f2fs_sb_info
*, struct page
*, block_t
, int);
1286 void f2fs_submit_page_mbio(struct f2fs_sb_info
*, struct page
*, block_t
,
1287 struct f2fs_io_info
*);
1288 int reserve_new_block(struct dnode_of_data
*);
1289 int f2fs_reserve_block(struct dnode_of_data
*, pgoff_t
);
1290 void update_extent_cache(block_t
, struct dnode_of_data
*);
1291 struct page
*find_data_page(struct inode
*, pgoff_t
, bool);
1292 struct page
*get_lock_data_page(struct inode
*, pgoff_t
);
1293 struct page
*get_new_data_page(struct inode
*, struct page
*, pgoff_t
, bool);
1294 int do_write_data_page(struct page
*, struct f2fs_io_info
*);
1295 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*, u64
, u64
);
1300 int start_gc_thread(struct f2fs_sb_info
*);
1301 void stop_gc_thread(struct f2fs_sb_info
*);
1302 block_t
start_bidx_of_node(unsigned int, struct f2fs_inode_info
*);
1303 int f2fs_gc(struct f2fs_sb_info
*);
1304 void build_gc_manager(struct f2fs_sb_info
*);
1305 int __init
create_gc_caches(void);
1306 void destroy_gc_caches(void);
1311 int recover_fsync_data(struct f2fs_sb_info
*);
1312 bool space_for_roll_forward(struct f2fs_sb_info
*);
1317 #ifdef CONFIG_F2FS_STAT_FS
1318 struct f2fs_stat_info
{
1319 struct list_head stat_list
;
1320 struct f2fs_sb_info
*sbi
;
1321 int all_area_segs
, sit_area_segs
, nat_area_segs
, ssa_area_segs
;
1322 int main_area_segs
, main_area_sections
, main_area_zones
;
1323 int hit_ext
, total_ext
;
1324 int ndirty_node
, ndirty_dent
, ndirty_dirs
, ndirty_meta
;
1325 int nats
, sits
, fnids
;
1326 int total_count
, utilization
;
1327 int bg_gc
, inline_inode
;
1328 unsigned int valid_count
, valid_node_count
, valid_inode_count
;
1329 unsigned int bimodal
, avg_vblocks
;
1330 int util_free
, util_valid
, util_invalid
;
1331 int rsvd_segs
, overp_segs
;
1332 int dirty_count
, node_pages
, meta_pages
;
1333 int prefree_count
, call_count
, cp_count
;
1334 int tot_segs
, node_segs
, data_segs
, free_segs
, free_secs
;
1335 int tot_blks
, data_blks
, node_blks
;
1336 int curseg
[NR_CURSEG_TYPE
];
1337 int cursec
[NR_CURSEG_TYPE
];
1338 int curzone
[NR_CURSEG_TYPE
];
1340 unsigned int segment_count
[2];
1341 unsigned int block_count
[2];
1342 unsigned base_mem
, cache_mem
;
1345 static inline struct f2fs_stat_info
*F2FS_STAT(struct f2fs_sb_info
*sbi
)
1347 return (struct f2fs_stat_info
*)sbi
->stat_info
;
1350 #define stat_inc_cp_count(si) ((si)->cp_count++)
1351 #define stat_inc_call_count(si) ((si)->call_count++)
1352 #define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++)
1353 #define stat_inc_dirty_dir(sbi) ((sbi)->n_dirty_dirs++)
1354 #define stat_dec_dirty_dir(sbi) ((sbi)->n_dirty_dirs--)
1355 #define stat_inc_total_hit(sb) ((F2FS_SB(sb))->total_hit_ext++)
1356 #define stat_inc_read_hit(sb) ((F2FS_SB(sb))->read_hit_ext++)
1357 #define stat_inc_inline_inode(inode) \
1359 if (f2fs_has_inline_data(inode)) \
1360 ((F2FS_SB(inode->i_sb))->inline_inode++); \
1362 #define stat_dec_inline_inode(inode) \
1364 if (f2fs_has_inline_data(inode)) \
1365 ((F2FS_SB(inode->i_sb))->inline_inode--); \
1368 #define stat_inc_seg_type(sbi, curseg) \
1369 ((sbi)->segment_count[(curseg)->alloc_type]++)
1370 #define stat_inc_block_count(sbi, curseg) \
1371 ((sbi)->block_count[(curseg)->alloc_type]++)
1373 #define stat_inc_seg_count(sbi, type) \
1375 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
1377 if (type == SUM_TYPE_DATA) \
1383 #define stat_inc_tot_blk_count(si, blks) \
1384 (si->tot_blks += (blks))
1386 #define stat_inc_data_blk_count(sbi, blks) \
1388 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
1389 stat_inc_tot_blk_count(si, blks); \
1390 si->data_blks += (blks); \
1393 #define stat_inc_node_blk_count(sbi, blks) \
1395 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
1396 stat_inc_tot_blk_count(si, blks); \
1397 si->node_blks += (blks); \
1400 int f2fs_build_stats(struct f2fs_sb_info
*);
1401 void f2fs_destroy_stats(struct f2fs_sb_info
*);
1402 void __init
f2fs_create_root_stats(void);
1403 void f2fs_destroy_root_stats(void);
1405 #define stat_inc_cp_count(si)
1406 #define stat_inc_call_count(si)
1407 #define stat_inc_bggc_count(si)
1408 #define stat_inc_dirty_dir(sbi)
1409 #define stat_dec_dirty_dir(sbi)
1410 #define stat_inc_total_hit(sb)
1411 #define stat_inc_read_hit(sb)
1412 #define stat_inc_inline_inode(inode)
1413 #define stat_dec_inline_inode(inode)
1414 #define stat_inc_seg_type(sbi, curseg)
1415 #define stat_inc_block_count(sbi, curseg)
1416 #define stat_inc_seg_count(si, type)
1417 #define stat_inc_tot_blk_count(si, blks)
1418 #define stat_inc_data_blk_count(si, blks)
1419 #define stat_inc_node_blk_count(sbi, blks)
1421 static inline int f2fs_build_stats(struct f2fs_sb_info
*sbi
) { return 0; }
1422 static inline void f2fs_destroy_stats(struct f2fs_sb_info
*sbi
) { }
1423 static inline void __init
f2fs_create_root_stats(void) { }
1424 static inline void f2fs_destroy_root_stats(void) { }
1427 extern const struct file_operations f2fs_dir_operations
;
1428 extern const struct file_operations f2fs_file_operations
;
1429 extern const struct inode_operations f2fs_file_inode_operations
;
1430 extern const struct address_space_operations f2fs_dblock_aops
;
1431 extern const struct address_space_operations f2fs_node_aops
;
1432 extern const struct address_space_operations f2fs_meta_aops
;
1433 extern const struct inode_operations f2fs_dir_inode_operations
;
1434 extern const struct inode_operations f2fs_symlink_inode_operations
;
1435 extern const struct inode_operations f2fs_special_inode_operations
;
1440 bool f2fs_may_inline(struct inode
*);
1441 int f2fs_read_inline_data(struct inode
*, struct page
*);
1442 int f2fs_convert_inline_data(struct inode
*, pgoff_t
);
1443 int f2fs_write_inline_data(struct inode
*, struct page
*, unsigned int);
1444 void truncate_inline_data(struct inode
*, u64
);
1445 int recover_inline_data(struct inode
*, struct page
*);