4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/blkdev.h>
12 #include <linux/backing-dev.h>
15 #define NULL_SEGNO ((unsigned int)(~0))
16 #define NULL_SECNO ((unsigned int)(~0))
18 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
20 #define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
22 /* L: Logical segment # in volume, R: Relative segment # in main area */
23 #define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
24 #define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
26 #define IS_DATASEG(t) (t <= CURSEG_COLD_DATA)
27 #define IS_NODESEG(t) (t >= CURSEG_HOT_NODE)
29 #define IS_CURSEG(sbi, seg) \
30 ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
31 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
32 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
33 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
34 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
35 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
37 #define IS_CURSEC(sbi, secno) \
38 ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
39 sbi->segs_per_sec) || \
40 (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
41 sbi->segs_per_sec) || \
42 (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
43 sbi->segs_per_sec) || \
44 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
45 sbi->segs_per_sec) || \
46 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
47 sbi->segs_per_sec) || \
48 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
51 #define MAIN_BLKADDR(sbi) \
52 (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
53 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
54 #define SEG0_BLKADDR(sbi) \
55 (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
56 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
58 #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
59 #define MAIN_SECS(sbi) (sbi->total_sections)
61 #define TOTAL_SEGS(sbi) \
62 (SM_I(sbi) ? SM_I(sbi)->segment_count : \
63 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
64 #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
66 #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
67 #define SEGMENT_SIZE(sbi) (1ULL << (sbi->log_blocksize + \
68 sbi->log_blocks_per_seg))
70 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
71 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
73 #define NEXT_FREE_BLKADDR(sbi, curseg) \
74 (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
76 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
77 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
78 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
79 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
80 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
82 #define GET_SEGNO(sbi, blk_addr) \
83 ((!is_valid_data_blkaddr(sbi, blk_addr)) ? \
84 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
85 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
86 #define GET_SECNO(sbi, segno) \
87 ((segno) / sbi->segs_per_sec)
88 #define GET_ZONENO_FROM_SEGNO(sbi, segno) \
89 ((segno / sbi->segs_per_sec) / sbi->secs_per_zone)
91 #define GET_SUM_BLOCK(sbi, segno) \
92 ((sbi->sm_info->ssa_blkaddr) + segno)
94 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
95 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type)
97 #define SIT_ENTRY_OFFSET(sit_i, segno) \
98 (segno % sit_i->sents_per_block)
99 #define SIT_BLOCK_OFFSET(segno) \
100 (segno / SIT_ENTRY_PER_BLOCK)
101 #define START_SEGNO(segno) \
102 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
103 #define SIT_BLK_CNT(sbi) \
104 ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
105 #define f2fs_bitmap_size(nr) \
106 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
108 #define SECTOR_FROM_BLOCK(blk_addr) \
109 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
110 #define SECTOR_TO_BLOCK(sectors) \
111 (sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
112 #define MAX_BIO_BLOCKS(sbi) \
113 ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES))
116 * indicate a block allocation direction: RIGHT and LEFT.
117 * RIGHT means allocating new sections towards the end of volume.
118 * LEFT means the opposite direction.
126 * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
127 * LFS writes data sequentially with cleaning operations.
128 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
136 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
137 * GC_CB is based on cost-benefit algorithm.
138 * GC_GREEDY is based on greedy algorithm.
146 * BG_GC means the background cleaning job.
147 * FG_GC means the on-demand cleaning job.
148 * FORCE_FG_GC means on-demand cleaning job in background.
156 /* for a function parameter to select a victim segment */
157 struct victim_sel_policy
{
158 int alloc_mode
; /* LFS or SSR */
159 int gc_mode
; /* GC_CB or GC_GREEDY */
160 unsigned long *dirty_segmap
; /* dirty segment bitmap */
161 unsigned int max_search
; /* maximum # of segments to search */
162 unsigned int offset
; /* last scanned bitmap offset */
163 unsigned int ofs_unit
; /* bitmap search unit */
164 unsigned int min_cost
; /* minimum cost */
165 unsigned int min_segno
; /* segment # having min. cost */
169 unsigned short valid_blocks
; /* # of valid blocks */
170 unsigned char *cur_valid_map
; /* validity bitmap of blocks */
172 * # of valid blocks and the validity bitmap stored in the the last
173 * checkpoint pack. This information is used by the SSR mode.
175 unsigned short ckpt_valid_blocks
;
176 unsigned char *ckpt_valid_map
;
177 unsigned char *discard_map
;
178 unsigned char type
; /* segment type like CURSEG_XXX_TYPE */
179 unsigned long long mtime
; /* modification time of the segment */
183 unsigned int valid_blocks
; /* # of valid blocks in a section */
186 struct segment_allocation
{
187 void (*allocate_segment
)(struct f2fs_sb_info
*, int, bool);
191 * this value is set in page as a private data which indicate that
192 * the page is atomically written, and it is in inmem_pages list.
194 #define ATOMIC_WRITTEN_PAGE 0x0000ffff
196 #define IS_ATOMIC_WRITTEN_PAGE(page) \
197 (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
200 struct list_head list
;
205 const struct segment_allocation
*s_ops
;
207 block_t sit_base_addr
; /* start block address of SIT area */
208 block_t sit_blocks
; /* # of blocks used by SIT area */
209 block_t written_valid_blocks
; /* # of valid blocks in main area */
210 char *sit_bitmap
; /* SIT bitmap pointer */
211 unsigned int bitmap_size
; /* SIT bitmap size */
213 unsigned long *tmp_map
; /* bitmap for temporal use */
214 unsigned long *dirty_sentries_bitmap
; /* bitmap for dirty sentries */
215 unsigned int dirty_sentries
; /* # of dirty sentries */
216 unsigned int sents_per_block
; /* # of SIT entries per block */
217 struct mutex sentry_lock
; /* to protect SIT cache */
218 struct seg_entry
*sentries
; /* SIT segment-level cache */
219 struct sec_entry
*sec_entries
; /* SIT section-level cache */
221 /* for cost-benefit algorithm in cleaning procedure */
222 unsigned long long elapsed_time
; /* elapsed time after mount */
223 unsigned long long mounted_time
; /* mount time */
224 unsigned long long min_mtime
; /* min. modification time */
225 unsigned long long max_mtime
; /* max. modification time */
228 struct free_segmap_info
{
229 unsigned int start_segno
; /* start segment number logically */
230 unsigned int free_segments
; /* # of free segments */
231 unsigned int free_sections
; /* # of free sections */
232 spinlock_t segmap_lock
; /* free segmap lock */
233 unsigned long *free_segmap
; /* free segment bitmap */
234 unsigned long *free_secmap
; /* free section bitmap */
237 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
239 DIRTY_HOT_DATA
, /* dirty segments assigned as hot data logs */
240 DIRTY_WARM_DATA
, /* dirty segments assigned as warm data logs */
241 DIRTY_COLD_DATA
, /* dirty segments assigned as cold data logs */
242 DIRTY_HOT_NODE
, /* dirty segments assigned as hot node logs */
243 DIRTY_WARM_NODE
, /* dirty segments assigned as warm node logs */
244 DIRTY_COLD_NODE
, /* dirty segments assigned as cold node logs */
245 DIRTY
, /* to count # of dirty segments */
246 PRE
, /* to count # of entirely obsolete segments */
250 struct dirty_seglist_info
{
251 const struct victim_selection
*v_ops
; /* victim selction operation */
252 unsigned long *dirty_segmap
[NR_DIRTY_TYPE
];
253 struct mutex seglist_lock
; /* lock for segment bitmaps */
254 int nr_dirty
[NR_DIRTY_TYPE
]; /* # of dirty segments */
255 unsigned long *victim_secmap
; /* background GC victims */
258 /* victim selection function for cleaning and SSR */
259 struct victim_selection
{
260 int (*get_victim
)(struct f2fs_sb_info
*, unsigned int *,
264 /* for active log information */
266 struct mutex curseg_mutex
; /* lock for consistency */
267 struct f2fs_summary_block
*sum_blk
; /* cached summary block */
268 unsigned char alloc_type
; /* current allocation type */
269 unsigned int segno
; /* current segment number */
270 unsigned short next_blkoff
; /* next block offset to write */
271 unsigned int zone
; /* current zone number */
272 unsigned int next_segno
; /* preallocated segment */
275 struct sit_entry_set
{
276 struct list_head set_list
; /* link with all sit sets */
277 unsigned int start_segno
; /* start segno of sits in set */
278 unsigned int entry_cnt
; /* the # of sit entries in set */
284 static inline struct curseg_info
*CURSEG_I(struct f2fs_sb_info
*sbi
, int type
)
286 return (struct curseg_info
*)(SM_I(sbi
)->curseg_array
+ type
);
289 static inline struct seg_entry
*get_seg_entry(struct f2fs_sb_info
*sbi
,
292 struct sit_info
*sit_i
= SIT_I(sbi
);
293 return &sit_i
->sentries
[segno
];
296 static inline struct sec_entry
*get_sec_entry(struct f2fs_sb_info
*sbi
,
299 struct sit_info
*sit_i
= SIT_I(sbi
);
300 return &sit_i
->sec_entries
[GET_SECNO(sbi
, segno
)];
303 static inline unsigned int get_valid_blocks(struct f2fs_sb_info
*sbi
,
304 unsigned int segno
, int section
)
307 * In order to get # of valid blocks in a section instantly from many
308 * segments, f2fs manages two counting structures separately.
311 return get_sec_entry(sbi
, segno
)->valid_blocks
;
313 return get_seg_entry(sbi
, segno
)->valid_blocks
;
316 static inline void seg_info_from_raw_sit(struct seg_entry
*se
,
317 struct f2fs_sit_entry
*rs
)
319 se
->valid_blocks
= GET_SIT_VBLOCKS(rs
);
320 se
->ckpt_valid_blocks
= GET_SIT_VBLOCKS(rs
);
321 memcpy(se
->cur_valid_map
, rs
->valid_map
, SIT_VBLOCK_MAP_SIZE
);
322 memcpy(se
->ckpt_valid_map
, rs
->valid_map
, SIT_VBLOCK_MAP_SIZE
);
323 se
->type
= GET_SIT_TYPE(rs
);
324 se
->mtime
= le64_to_cpu(rs
->mtime
);
327 static inline void seg_info_to_raw_sit(struct seg_entry
*se
,
328 struct f2fs_sit_entry
*rs
)
330 unsigned short raw_vblocks
= (se
->type
<< SIT_VBLOCKS_SHIFT
) |
332 rs
->vblocks
= cpu_to_le16(raw_vblocks
);
333 memcpy(rs
->valid_map
, se
->cur_valid_map
, SIT_VBLOCK_MAP_SIZE
);
334 memcpy(se
->ckpt_valid_map
, rs
->valid_map
, SIT_VBLOCK_MAP_SIZE
);
335 se
->ckpt_valid_blocks
= se
->valid_blocks
;
336 rs
->mtime
= cpu_to_le64(se
->mtime
);
339 static inline unsigned int find_next_inuse(struct free_segmap_info
*free_i
,
340 unsigned int max
, unsigned int segno
)
343 spin_lock(&free_i
->segmap_lock
);
344 ret
= find_next_bit(free_i
->free_segmap
, max
, segno
);
345 spin_unlock(&free_i
->segmap_lock
);
349 static inline void __set_free(struct f2fs_sb_info
*sbi
, unsigned int segno
)
351 struct free_segmap_info
*free_i
= FREE_I(sbi
);
352 unsigned int secno
= segno
/ sbi
->segs_per_sec
;
353 unsigned int start_segno
= secno
* sbi
->segs_per_sec
;
356 spin_lock(&free_i
->segmap_lock
);
357 clear_bit(segno
, free_i
->free_segmap
);
358 free_i
->free_segments
++;
360 next
= find_next_bit(free_i
->free_segmap
,
361 start_segno
+ sbi
->segs_per_sec
, start_segno
);
362 if (next
>= start_segno
+ sbi
->segs_per_sec
) {
363 clear_bit(secno
, free_i
->free_secmap
);
364 free_i
->free_sections
++;
366 spin_unlock(&free_i
->segmap_lock
);
369 static inline void __set_inuse(struct f2fs_sb_info
*sbi
,
372 struct free_segmap_info
*free_i
= FREE_I(sbi
);
373 unsigned int secno
= segno
/ sbi
->segs_per_sec
;
374 set_bit(segno
, free_i
->free_segmap
);
375 free_i
->free_segments
--;
376 if (!test_and_set_bit(secno
, free_i
->free_secmap
))
377 free_i
->free_sections
--;
380 static inline void __set_test_and_free(struct f2fs_sb_info
*sbi
,
383 struct free_segmap_info
*free_i
= FREE_I(sbi
);
384 unsigned int secno
= segno
/ sbi
->segs_per_sec
;
385 unsigned int start_segno
= secno
* sbi
->segs_per_sec
;
388 spin_lock(&free_i
->segmap_lock
);
389 if (test_and_clear_bit(segno
, free_i
->free_segmap
)) {
390 free_i
->free_segments
++;
392 if (IS_CURSEC(sbi
, secno
))
394 next
= find_next_bit(free_i
->free_segmap
,
395 start_segno
+ sbi
->segs_per_sec
, start_segno
);
396 if (next
>= start_segno
+ sbi
->segs_per_sec
) {
397 if (test_and_clear_bit(secno
, free_i
->free_secmap
))
398 free_i
->free_sections
++;
402 spin_unlock(&free_i
->segmap_lock
);
405 static inline void __set_test_and_inuse(struct f2fs_sb_info
*sbi
,
408 struct free_segmap_info
*free_i
= FREE_I(sbi
);
409 unsigned int secno
= segno
/ sbi
->segs_per_sec
;
410 spin_lock(&free_i
->segmap_lock
);
411 if (!test_and_set_bit(segno
, free_i
->free_segmap
)) {
412 free_i
->free_segments
--;
413 if (!test_and_set_bit(secno
, free_i
->free_secmap
))
414 free_i
->free_sections
--;
416 spin_unlock(&free_i
->segmap_lock
);
419 static inline void get_sit_bitmap(struct f2fs_sb_info
*sbi
,
422 struct sit_info
*sit_i
= SIT_I(sbi
);
423 memcpy(dst_addr
, sit_i
->sit_bitmap
, sit_i
->bitmap_size
);
426 static inline block_t
written_block_count(struct f2fs_sb_info
*sbi
)
428 return SIT_I(sbi
)->written_valid_blocks
;
431 static inline unsigned int free_segments(struct f2fs_sb_info
*sbi
)
433 return FREE_I(sbi
)->free_segments
;
436 static inline int reserved_segments(struct f2fs_sb_info
*sbi
)
438 return SM_I(sbi
)->reserved_segments
;
441 static inline unsigned int free_sections(struct f2fs_sb_info
*sbi
)
443 return FREE_I(sbi
)->free_sections
;
446 static inline unsigned int prefree_segments(struct f2fs_sb_info
*sbi
)
448 return DIRTY_I(sbi
)->nr_dirty
[PRE
];
451 static inline unsigned int dirty_segments(struct f2fs_sb_info
*sbi
)
453 return DIRTY_I(sbi
)->nr_dirty
[DIRTY_HOT_DATA
] +
454 DIRTY_I(sbi
)->nr_dirty
[DIRTY_WARM_DATA
] +
455 DIRTY_I(sbi
)->nr_dirty
[DIRTY_COLD_DATA
] +
456 DIRTY_I(sbi
)->nr_dirty
[DIRTY_HOT_NODE
] +
457 DIRTY_I(sbi
)->nr_dirty
[DIRTY_WARM_NODE
] +
458 DIRTY_I(sbi
)->nr_dirty
[DIRTY_COLD_NODE
];
461 static inline int overprovision_segments(struct f2fs_sb_info
*sbi
)
463 return SM_I(sbi
)->ovp_segments
;
466 static inline int overprovision_sections(struct f2fs_sb_info
*sbi
)
468 return ((unsigned int) overprovision_segments(sbi
)) / sbi
->segs_per_sec
;
471 static inline int reserved_sections(struct f2fs_sb_info
*sbi
)
473 return ((unsigned int) reserved_segments(sbi
)) / sbi
->segs_per_sec
;
476 static inline bool need_SSR(struct f2fs_sb_info
*sbi
)
478 int node_secs
= get_blocktype_secs(sbi
, F2FS_DIRTY_NODES
);
479 int dent_secs
= get_blocktype_secs(sbi
, F2FS_DIRTY_DENTS
);
480 return free_sections(sbi
) <= (node_secs
+ 2 * dent_secs
+
481 reserved_sections(sbi
) + 1);
484 static inline bool has_not_enough_free_secs(struct f2fs_sb_info
*sbi
, int freed
)
486 int node_secs
= get_blocktype_secs(sbi
, F2FS_DIRTY_NODES
);
487 int dent_secs
= get_blocktype_secs(sbi
, F2FS_DIRTY_DENTS
);
489 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
492 return (free_sections(sbi
) + freed
) <= (node_secs
+ 2 * dent_secs
+
493 reserved_sections(sbi
));
496 static inline bool excess_prefree_segs(struct f2fs_sb_info
*sbi
)
498 return prefree_segments(sbi
) > SM_I(sbi
)->rec_prefree_segments
;
501 static inline int utilization(struct f2fs_sb_info
*sbi
)
503 return div_u64((u64
)valid_user_blocks(sbi
) * 100,
504 sbi
->user_block_count
);
508 * Sometimes f2fs may be better to drop out-of-place update policy.
509 * And, users can control the policy through sysfs entries.
510 * There are five policies with triggering conditions as follows.
511 * F2FS_IPU_FORCE - all the time,
512 * F2FS_IPU_SSR - if SSR mode is activated,
513 * F2FS_IPU_UTIL - if FS utilization is over threashold,
514 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
516 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
517 * storages. IPU will be triggered only if the # of dirty
518 * pages over min_fsync_blocks.
519 * F2FS_IPUT_DISABLE - disable IPU. (=default option)
521 #define DEF_MIN_IPU_UTIL 70
522 #define DEF_MIN_FSYNC_BLOCKS 8
532 static inline bool need_inplace_update(struct inode
*inode
)
534 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
535 unsigned int policy
= SM_I(sbi
)->ipu_policy
;
537 /* IPU can be done only for the user data */
538 if (S_ISDIR(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
541 if (policy
& (0x1 << F2FS_IPU_FORCE
))
543 if (policy
& (0x1 << F2FS_IPU_SSR
) && need_SSR(sbi
))
545 if (policy
& (0x1 << F2FS_IPU_UTIL
) &&
546 utilization(sbi
) > SM_I(sbi
)->min_ipu_util
)
548 if (policy
& (0x1 << F2FS_IPU_SSR_UTIL
) && need_SSR(sbi
) &&
549 utilization(sbi
) > SM_I(sbi
)->min_ipu_util
)
552 /* this is only set during fdatasync */
553 if (policy
& (0x1 << F2FS_IPU_FSYNC
) &&
554 is_inode_flag_set(F2FS_I(inode
), FI_NEED_IPU
))
560 static inline unsigned int curseg_segno(struct f2fs_sb_info
*sbi
,
563 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
564 return curseg
->segno
;
567 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info
*sbi
,
570 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
571 return curseg
->alloc_type
;
574 static inline unsigned short curseg_blkoff(struct f2fs_sb_info
*sbi
, int type
)
576 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
577 return curseg
->next_blkoff
;
580 static inline void check_seg_range(struct f2fs_sb_info
*sbi
, unsigned int segno
)
582 f2fs_bug_on(sbi
, segno
> TOTAL_SEGS(sbi
) - 1);
585 static inline void verify_block_addr(struct f2fs_io_info
*fio
, block_t blk_addr
)
587 struct f2fs_sb_info
*sbi
= fio
->sbi
;
589 if (__is_meta_io(fio
))
590 verify_blkaddr(sbi
, blk_addr
, META_GENERIC
);
592 verify_blkaddr(sbi
, blk_addr
, DATA_GENERIC
);
596 * Summary block is always treated as an invalid block
598 static inline int check_block_count(struct f2fs_sb_info
*sbi
,
599 int segno
, struct f2fs_sit_entry
*raw_sit
)
601 bool is_valid
= test_bit_le(0, raw_sit
->valid_map
) ? true : false;
602 int valid_blocks
= 0;
603 int cur_pos
= 0, next_pos
;
605 /* check bitmap with valid block count */
608 next_pos
= find_next_zero_bit_le(&raw_sit
->valid_map
,
611 valid_blocks
+= next_pos
- cur_pos
;
613 next_pos
= find_next_bit_le(&raw_sit
->valid_map
,
617 is_valid
= !is_valid
;
618 } while (cur_pos
< sbi
->blocks_per_seg
);
620 if (unlikely(GET_SIT_VBLOCKS(raw_sit
) != valid_blocks
)) {
621 f2fs_msg(sbi
->sb
, KERN_ERR
,
622 "Mismatch valid blocks %d vs. %d",
623 GET_SIT_VBLOCKS(raw_sit
), valid_blocks
);
624 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
628 /* check segment usage, and check boundary of a given segment number */
629 if (unlikely(GET_SIT_VBLOCKS(raw_sit
) > sbi
->blocks_per_seg
630 || segno
> TOTAL_SEGS(sbi
) - 1)) {
631 f2fs_msg(sbi
->sb
, KERN_ERR
,
632 "Wrong valid blocks %d or segno %u",
633 GET_SIT_VBLOCKS(raw_sit
), segno
);
634 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
640 static inline pgoff_t
current_sit_addr(struct f2fs_sb_info
*sbi
,
643 struct sit_info
*sit_i
= SIT_I(sbi
);
644 unsigned int offset
= SIT_BLOCK_OFFSET(start
);
645 block_t blk_addr
= sit_i
->sit_base_addr
+ offset
;
647 check_seg_range(sbi
, start
);
649 /* calculate sit block address */
650 if (f2fs_test_bit(offset
, sit_i
->sit_bitmap
))
651 blk_addr
+= sit_i
->sit_blocks
;
656 static inline pgoff_t
next_sit_addr(struct f2fs_sb_info
*sbi
,
659 struct sit_info
*sit_i
= SIT_I(sbi
);
660 block_addr
-= sit_i
->sit_base_addr
;
661 if (block_addr
< sit_i
->sit_blocks
)
662 block_addr
+= sit_i
->sit_blocks
;
664 block_addr
-= sit_i
->sit_blocks
;
666 return block_addr
+ sit_i
->sit_base_addr
;
669 static inline void set_to_next_sit(struct sit_info
*sit_i
, unsigned int start
)
671 unsigned int block_off
= SIT_BLOCK_OFFSET(start
);
673 f2fs_change_bit(block_off
, sit_i
->sit_bitmap
);
676 static inline unsigned long long get_mtime(struct f2fs_sb_info
*sbi
)
678 struct sit_info
*sit_i
= SIT_I(sbi
);
679 return sit_i
->elapsed_time
+ CURRENT_TIME_SEC
.tv_sec
-
683 static inline void set_summary(struct f2fs_summary
*sum
, nid_t nid
,
684 unsigned int ofs_in_node
, unsigned char version
)
686 sum
->nid
= cpu_to_le32(nid
);
687 sum
->ofs_in_node
= cpu_to_le16(ofs_in_node
);
688 sum
->version
= version
;
691 static inline block_t
start_sum_block(struct f2fs_sb_info
*sbi
)
693 return __start_cp_addr(sbi
) +
694 le32_to_cpu(F2FS_CKPT(sbi
)->cp_pack_start_sum
);
697 static inline block_t
sum_blk_addr(struct f2fs_sb_info
*sbi
, int base
, int type
)
699 return __start_cp_addr(sbi
) +
700 le32_to_cpu(F2FS_CKPT(sbi
)->cp_pack_total_block_count
)
704 static inline bool sec_usage_check(struct f2fs_sb_info
*sbi
, unsigned int secno
)
706 if (IS_CURSEC(sbi
, secno
) || (sbi
->cur_victim_sec
== secno
))
711 static inline unsigned int max_hw_blocks(struct f2fs_sb_info
*sbi
)
713 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
714 struct request_queue
*q
= bdev_get_queue(bdev
);
715 return SECTOR_TO_BLOCK(queue_max_sectors(q
));
719 * It is very important to gather dirty pages and write at once, so that we can
720 * submit a big bio without interfering other data writes.
721 * By default, 512 pages for directory data,
722 * 512 pages (2MB) * 3 for three types of nodes, and
723 * max_bio_blocks for meta are set.
725 static inline int nr_pages_to_skip(struct f2fs_sb_info
*sbi
, int type
)
727 if (sbi
->sb
->s_bdi
->wb
.dirty_exceeded
)
731 return sbi
->blocks_per_seg
;
732 else if (type
== NODE
)
733 return 3 * sbi
->blocks_per_seg
;
734 else if (type
== META
)
735 return MAX_BIO_BLOCKS(sbi
);
741 * When writing pages, it'd better align nr_to_write for segment size.
743 static inline long nr_pages_to_write(struct f2fs_sb_info
*sbi
, int type
,
744 struct writeback_control
*wbc
)
746 long nr_to_write
, desired
;
748 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
751 nr_to_write
= wbc
->nr_to_write
;
755 else if (type
== NODE
)
756 desired
= 3 * max_hw_blocks(sbi
);
758 desired
= MAX_BIO_BLOCKS(sbi
);
760 wbc
->nr_to_write
= desired
;
761 return desired
- nr_to_write
;