4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
25 #include <trace/events/f2fs.h>
27 static struct kmem_cache
*winode_slab
;
29 static int gc_thread_func(void *data
)
31 struct f2fs_sb_info
*sbi
= data
;
32 wait_queue_head_t
*wq
= &sbi
->gc_thread
->gc_wait_queue_head
;
35 wait_ms
= GC_THREAD_MIN_SLEEP_TIME
;
41 wait_event_interruptible_timeout(*wq
,
42 kthread_should_stop(),
43 msecs_to_jiffies(wait_ms
));
44 if (kthread_should_stop())
47 if (sbi
->sb
->s_writers
.frozen
>= SB_FREEZE_WRITE
) {
48 wait_ms
= GC_THREAD_MAX_SLEEP_TIME
;
53 * [GC triggering condition]
54 * 0. GC is not conducted currently.
55 * 1. There are enough dirty segments.
56 * 2. IO subsystem is idle by checking the # of writeback pages.
57 * 3. IO subsystem is idle by checking the # of requests in
58 * bdev's request list.
60 * Note) We have to avoid triggering GCs too much frequently.
61 * Because it is possible that some segments can be
62 * invalidated soon after by user update or deletion.
63 * So, I'd like to wait some time to collect dirty segments.
65 if (!mutex_trylock(&sbi
->gc_mutex
))
69 wait_ms
= increase_sleep_time(wait_ms
);
70 mutex_unlock(&sbi
->gc_mutex
);
74 if (has_enough_invalid_blocks(sbi
))
75 wait_ms
= decrease_sleep_time(wait_ms
);
77 wait_ms
= increase_sleep_time(wait_ms
);
79 #ifdef CONFIG_F2FS_STAT_FS
83 /* if return value is not zero, no victim was selected */
85 wait_ms
= GC_THREAD_NOGC_SLEEP_TIME
;
86 } while (!kthread_should_stop());
90 int start_gc_thread(struct f2fs_sb_info
*sbi
)
92 struct f2fs_gc_kthread
*gc_th
;
93 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
96 if (!test_opt(sbi
, BG_GC
))
98 gc_th
= kmalloc(sizeof(struct f2fs_gc_kthread
), GFP_KERNEL
);
104 sbi
->gc_thread
= gc_th
;
105 init_waitqueue_head(&sbi
->gc_thread
->gc_wait_queue_head
);
106 sbi
->gc_thread
->f2fs_gc_task
= kthread_run(gc_thread_func
, sbi
,
107 "f2fs_gc-%u:%u", MAJOR(dev
), MINOR(dev
));
108 if (IS_ERR(gc_th
->f2fs_gc_task
)) {
109 err
= PTR_ERR(gc_th
->f2fs_gc_task
);
111 sbi
->gc_thread
= NULL
;
118 void stop_gc_thread(struct f2fs_sb_info
*sbi
)
120 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
123 kthread_stop(gc_th
->f2fs_gc_task
);
125 sbi
->gc_thread
= NULL
;
128 static int select_gc_type(int gc_type
)
130 return (gc_type
== BG_GC
) ? GC_CB
: GC_GREEDY
;
133 static void select_policy(struct f2fs_sb_info
*sbi
, int gc_type
,
134 int type
, struct victim_sel_policy
*p
)
136 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
138 if (p
->alloc_mode
== SSR
) {
139 p
->gc_mode
= GC_GREEDY
;
140 p
->dirty_segmap
= dirty_i
->dirty_segmap
[type
];
143 p
->gc_mode
= select_gc_type(gc_type
);
144 p
->dirty_segmap
= dirty_i
->dirty_segmap
[DIRTY
];
145 p
->ofs_unit
= sbi
->segs_per_sec
;
147 p
->offset
= sbi
->last_victim
[p
->gc_mode
];
150 static unsigned int get_max_cost(struct f2fs_sb_info
*sbi
,
151 struct victim_sel_policy
*p
)
153 /* SSR allocates in a segment unit */
154 if (p
->alloc_mode
== SSR
)
155 return 1 << sbi
->log_blocks_per_seg
;
156 if (p
->gc_mode
== GC_GREEDY
)
157 return (1 << sbi
->log_blocks_per_seg
) * p
->ofs_unit
;
158 else if (p
->gc_mode
== GC_CB
)
160 else /* No other gc_mode */
164 static unsigned int check_bg_victims(struct f2fs_sb_info
*sbi
)
166 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
167 unsigned int hint
= 0;
171 * If the gc_type is FG_GC, we can select victim segments
172 * selected by background GC before.
173 * Those segments guarantee they have small valid blocks.
176 secno
= find_next_bit(dirty_i
->victim_secmap
, TOTAL_SECS(sbi
), hint
++);
177 if (secno
< TOTAL_SECS(sbi
)) {
178 if (sec_usage_check(sbi
, secno
))
180 clear_bit(secno
, dirty_i
->victim_secmap
);
181 return secno
* sbi
->segs_per_sec
;
186 static unsigned int get_cb_cost(struct f2fs_sb_info
*sbi
, unsigned int segno
)
188 struct sit_info
*sit_i
= SIT_I(sbi
);
189 unsigned int secno
= GET_SECNO(sbi
, segno
);
190 unsigned int start
= secno
* sbi
->segs_per_sec
;
191 unsigned long long mtime
= 0;
192 unsigned int vblocks
;
193 unsigned char age
= 0;
197 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
198 mtime
+= get_seg_entry(sbi
, start
+ i
)->mtime
;
199 vblocks
= get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
201 mtime
= div_u64(mtime
, sbi
->segs_per_sec
);
202 vblocks
= div_u64(vblocks
, sbi
->segs_per_sec
);
204 u
= (vblocks
* 100) >> sbi
->log_blocks_per_seg
;
206 /* Handle if the system time is changed by user */
207 if (mtime
< sit_i
->min_mtime
)
208 sit_i
->min_mtime
= mtime
;
209 if (mtime
> sit_i
->max_mtime
)
210 sit_i
->max_mtime
= mtime
;
211 if (sit_i
->max_mtime
!= sit_i
->min_mtime
)
212 age
= 100 - div64_u64(100 * (mtime
- sit_i
->min_mtime
),
213 sit_i
->max_mtime
- sit_i
->min_mtime
);
215 return UINT_MAX
- ((100 * (100 - u
) * age
) / (100 + u
));
218 static unsigned int get_gc_cost(struct f2fs_sb_info
*sbi
, unsigned int segno
,
219 struct victim_sel_policy
*p
)
221 if (p
->alloc_mode
== SSR
)
222 return get_seg_entry(sbi
, segno
)->ckpt_valid_blocks
;
224 /* alloc_mode == LFS */
225 if (p
->gc_mode
== GC_GREEDY
)
226 return get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
228 return get_cb_cost(sbi
, segno
);
232 * This function is called from two paths.
233 * One is garbage collection and the other is SSR segment selection.
234 * When it is called during GC, it just gets a victim segment
235 * and it does not remove it from dirty seglist.
236 * When it is called from SSR segment selection, it finds a segment
237 * which has minimum valid blocks and removes it from dirty seglist.
239 static int get_victim_by_default(struct f2fs_sb_info
*sbi
,
240 unsigned int *result
, int gc_type
, int type
, char alloc_mode
)
242 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
243 struct victim_sel_policy p
;
244 unsigned int secno
, max_cost
;
247 p
.alloc_mode
= alloc_mode
;
248 select_policy(sbi
, gc_type
, type
, &p
);
250 p
.min_segno
= NULL_SEGNO
;
251 p
.min_cost
= max_cost
= get_max_cost(sbi
, &p
);
253 mutex_lock(&dirty_i
->seglist_lock
);
255 if (p
.alloc_mode
== LFS
&& gc_type
== FG_GC
) {
256 p
.min_segno
= check_bg_victims(sbi
);
257 if (p
.min_segno
!= NULL_SEGNO
)
265 segno
= find_next_bit(p
.dirty_segmap
,
266 TOTAL_SEGS(sbi
), p
.offset
);
267 if (segno
>= TOTAL_SEGS(sbi
)) {
268 if (sbi
->last_victim
[p
.gc_mode
]) {
269 sbi
->last_victim
[p
.gc_mode
] = 0;
275 p
.offset
= ((segno
/ p
.ofs_unit
) * p
.ofs_unit
) + p
.ofs_unit
;
276 secno
= GET_SECNO(sbi
, segno
);
278 if (sec_usage_check(sbi
, secno
))
280 if (gc_type
== BG_GC
&& test_bit(secno
, dirty_i
->victim_secmap
))
283 cost
= get_gc_cost(sbi
, segno
, &p
);
285 if (p
.min_cost
> cost
) {
290 if (cost
== max_cost
)
293 if (nsearched
++ >= MAX_VICTIM_SEARCH
) {
294 sbi
->last_victim
[p
.gc_mode
] = segno
;
298 if (p
.min_segno
!= NULL_SEGNO
) {
300 if (p
.alloc_mode
== LFS
) {
301 secno
= GET_SECNO(sbi
, p
.min_segno
);
302 if (gc_type
== FG_GC
)
303 sbi
->cur_victim_sec
= secno
;
305 set_bit(secno
, dirty_i
->victim_secmap
);
307 *result
= (p
.min_segno
/ p
.ofs_unit
) * p
.ofs_unit
;
309 trace_f2fs_get_victim(sbi
->sb
, type
, gc_type
, &p
,
311 prefree_segments(sbi
), free_segments(sbi
));
313 mutex_unlock(&dirty_i
->seglist_lock
);
315 return (p
.min_segno
== NULL_SEGNO
) ? 0 : 1;
318 static const struct victim_selection default_v_ops
= {
319 .get_victim
= get_victim_by_default
,
322 static struct inode
*find_gc_inode(nid_t ino
, struct list_head
*ilist
)
324 struct inode_entry
*ie
;
326 list_for_each_entry(ie
, ilist
, list
)
327 if (ie
->inode
->i_ino
== ino
)
332 static void add_gc_inode(struct inode
*inode
, struct list_head
*ilist
)
334 struct inode_entry
*new_ie
;
336 if (inode
== find_gc_inode(inode
->i_ino
, ilist
)) {
341 new_ie
= kmem_cache_alloc(winode_slab
, GFP_NOFS
);
346 new_ie
->inode
= inode
;
347 list_add_tail(&new_ie
->list
, ilist
);
350 static void put_gc_inode(struct list_head
*ilist
)
352 struct inode_entry
*ie
, *next_ie
;
353 list_for_each_entry_safe(ie
, next_ie
, ilist
, list
) {
356 kmem_cache_free(winode_slab
, ie
);
360 static int check_valid_map(struct f2fs_sb_info
*sbi
,
361 unsigned int segno
, int offset
)
363 struct sit_info
*sit_i
= SIT_I(sbi
);
364 struct seg_entry
*sentry
;
367 mutex_lock(&sit_i
->sentry_lock
);
368 sentry
= get_seg_entry(sbi
, segno
);
369 ret
= f2fs_test_bit(offset
, sentry
->cur_valid_map
);
370 mutex_unlock(&sit_i
->sentry_lock
);
375 * This function compares node address got in summary with that in NAT.
376 * On validity, copy that node with cold status, otherwise (invalid node)
379 static void gc_node_segment(struct f2fs_sb_info
*sbi
,
380 struct f2fs_summary
*sum
, unsigned int segno
, int gc_type
)
383 struct f2fs_summary
*entry
;
389 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
390 nid_t nid
= le32_to_cpu(entry
->nid
);
391 struct page
*node_page
;
393 /* stop BG_GC if there is not enough free sections. */
394 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0))
397 if (check_valid_map(sbi
, segno
, off
) == 0)
401 ra_node_page(sbi
, nid
);
404 node_page
= get_node_page(sbi
, nid
);
405 if (IS_ERR(node_page
))
408 /* set page dirty and write it */
409 if (gc_type
== FG_GC
) {
410 f2fs_submit_bio(sbi
, NODE
, true);
411 wait_on_page_writeback(node_page
);
412 set_page_dirty(node_page
);
414 if (!PageWriteback(node_page
))
415 set_page_dirty(node_page
);
417 f2fs_put_page(node_page
, 1);
418 stat_inc_node_blk_count(sbi
, 1);
426 if (gc_type
== FG_GC
) {
427 struct writeback_control wbc
= {
428 .sync_mode
= WB_SYNC_ALL
,
429 .nr_to_write
= LONG_MAX
,
432 sync_node_pages(sbi
, 0, &wbc
);
435 * In the case of FG_GC, it'd be better to reclaim this victim
438 if (get_valid_blocks(sbi
, segno
, 1) != 0)
444 * Calculate start block index indicating the given node offset.
445 * Be careful, caller should give this node offset only indicating direct node
446 * blocks. If any node offsets, which point the other types of node blocks such
447 * as indirect or double indirect node blocks, are given, it must be a caller's
450 block_t
start_bidx_of_node(unsigned int node_ofs
)
452 unsigned int indirect_blks
= 2 * NIDS_PER_BLOCK
+ 4;
460 } else if (node_ofs
<= indirect_blks
) {
461 int dec
= (node_ofs
- 4) / (NIDS_PER_BLOCK
+ 1);
462 bidx
= node_ofs
- 2 - dec
;
464 int dec
= (node_ofs
- indirect_blks
- 3) / (NIDS_PER_BLOCK
+ 1);
465 bidx
= node_ofs
- 5 - dec
;
467 return bidx
* ADDRS_PER_BLOCK
+ ADDRS_PER_INODE
;
470 static int check_dnode(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
471 struct node_info
*dni
, block_t blkaddr
, unsigned int *nofs
)
473 struct page
*node_page
;
475 unsigned int ofs_in_node
;
476 block_t source_blkaddr
;
478 nid
= le32_to_cpu(sum
->nid
);
479 ofs_in_node
= le16_to_cpu(sum
->ofs_in_node
);
481 node_page
= get_node_page(sbi
, nid
);
482 if (IS_ERR(node_page
))
485 get_node_info(sbi
, nid
, dni
);
487 if (sum
->version
!= dni
->version
) {
488 f2fs_put_page(node_page
, 1);
492 *nofs
= ofs_of_node(node_page
);
493 source_blkaddr
= datablock_addr(node_page
, ofs_in_node
);
494 f2fs_put_page(node_page
, 1);
496 if (source_blkaddr
!= blkaddr
)
501 static void move_data_page(struct inode
*inode
, struct page
*page
, int gc_type
)
503 if (gc_type
== BG_GC
) {
504 if (PageWriteback(page
))
506 set_page_dirty(page
);
509 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
511 if (PageWriteback(page
)) {
512 f2fs_submit_bio(sbi
, DATA
, true);
513 wait_on_page_writeback(page
);
516 if (clear_page_dirty_for_io(page
) &&
517 S_ISDIR(inode
->i_mode
)) {
518 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
519 inode_dec_dirty_dents(inode
);
522 do_write_data_page(page
);
523 clear_cold_data(page
);
526 f2fs_put_page(page
, 1);
530 * This function tries to get parent node of victim data block, and identifies
531 * data block validity. If the block is valid, copy that with cold status and
532 * modify parent node.
533 * If the parent node is not valid or the data block address is different,
534 * the victim data block is ignored.
536 static void gc_data_segment(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
537 struct list_head
*ilist
, unsigned int segno
, int gc_type
)
539 struct super_block
*sb
= sbi
->sb
;
540 struct f2fs_summary
*entry
;
545 start_addr
= START_BLOCK(sbi
, segno
);
550 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
551 struct page
*data_page
;
553 struct node_info dni
; /* dnode info for the data */
554 unsigned int ofs_in_node
, nofs
;
557 /* stop BG_GC if there is not enough free sections. */
558 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0))
561 if (check_valid_map(sbi
, segno
, off
) == 0)
565 ra_node_page(sbi
, le32_to_cpu(entry
->nid
));
569 /* Get an inode by ino with checking validity */
570 if (check_dnode(sbi
, entry
, &dni
, start_addr
+ off
, &nofs
) == 0)
574 ra_node_page(sbi
, dni
.ino
);
578 start_bidx
= start_bidx_of_node(nofs
);
579 ofs_in_node
= le16_to_cpu(entry
->ofs_in_node
);
582 inode
= f2fs_iget(sb
, dni
.ino
);
586 data_page
= find_data_page(inode
,
587 start_bidx
+ ofs_in_node
, false);
588 if (IS_ERR(data_page
))
591 f2fs_put_page(data_page
, 0);
592 add_gc_inode(inode
, ilist
);
594 inode
= find_gc_inode(dni
.ino
, ilist
);
596 data_page
= get_lock_data_page(inode
,
597 start_bidx
+ ofs_in_node
);
598 if (IS_ERR(data_page
))
600 move_data_page(inode
, data_page
, gc_type
);
601 stat_inc_data_blk_count(sbi
, 1);
612 if (gc_type
== FG_GC
) {
613 f2fs_submit_bio(sbi
, DATA
, true);
616 * In the case of FG_GC, it'd be better to reclaim this victim
619 if (get_valid_blocks(sbi
, segno
, 1) != 0) {
626 static int __get_victim(struct f2fs_sb_info
*sbi
, unsigned int *victim
,
627 int gc_type
, int type
)
629 struct sit_info
*sit_i
= SIT_I(sbi
);
631 mutex_lock(&sit_i
->sentry_lock
);
632 ret
= DIRTY_I(sbi
)->v_ops
->get_victim(sbi
, victim
, gc_type
, type
, LFS
);
633 mutex_unlock(&sit_i
->sentry_lock
);
637 static void do_garbage_collect(struct f2fs_sb_info
*sbi
, unsigned int segno
,
638 struct list_head
*ilist
, int gc_type
)
640 struct page
*sum_page
;
641 struct f2fs_summary_block
*sum
;
642 struct blk_plug plug
;
644 /* read segment summary of victim */
645 sum_page
= get_sum_page(sbi
, segno
);
646 if (IS_ERR(sum_page
))
649 blk_start_plug(&plug
);
651 sum
= page_address(sum_page
);
653 switch (GET_SUM_TYPE((&sum
->footer
))) {
655 gc_node_segment(sbi
, sum
->entries
, segno
, gc_type
);
658 gc_data_segment(sbi
, sum
->entries
, ilist
, segno
, gc_type
);
661 blk_finish_plug(&plug
);
663 stat_inc_seg_count(sbi
, GET_SUM_TYPE((&sum
->footer
)));
664 stat_inc_call_count(sbi
->stat_info
);
666 f2fs_put_page(sum_page
, 1);
669 int f2fs_gc(struct f2fs_sb_info
*sbi
)
671 struct list_head ilist
;
672 unsigned int segno
, i
;
677 INIT_LIST_HEAD(&ilist
);
679 if (!(sbi
->sb
->s_flags
& MS_ACTIVE
))
682 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, nfree
)) {
684 write_checkpoint(sbi
, false);
687 if (!__get_victim(sbi
, &segno
, gc_type
, NO_CHECK_TYPE
))
691 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
692 do_garbage_collect(sbi
, segno
+ i
, &ilist
, gc_type
);
694 if (gc_type
== FG_GC
) {
695 sbi
->cur_victim_sec
= NULL_SEGNO
;
697 WARN_ON(get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
));
700 if (has_not_enough_free_secs(sbi
, nfree
))
703 if (gc_type
== FG_GC
)
704 write_checkpoint(sbi
, false);
706 mutex_unlock(&sbi
->gc_mutex
);
708 put_gc_inode(&ilist
);
712 void build_gc_manager(struct f2fs_sb_info
*sbi
)
714 DIRTY_I(sbi
)->v_ops
= &default_v_ops
;
717 int __init
create_gc_caches(void)
719 winode_slab
= f2fs_kmem_cache_create("f2fs_gc_inodes",
720 sizeof(struct inode_entry
), NULL
);
726 void destroy_gc_caches(void)
728 kmem_cache_destroy(winode_slab
);