4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/proc_fs.h>
15 #include <linux/init.h>
16 #include <linux/f2fs_fs.h>
17 #include <linux/kthread.h>
18 #include <linux/delay.h>
19 #include <linux/freezer.h>
20 #include <linux/blkdev.h>
27 static struct kmem_cache
*winode_slab
;
29 static int gc_thread_func(void *data
)
31 struct f2fs_sb_info
*sbi
= data
;
32 wait_queue_head_t
*wq
= &sbi
->gc_thread
->gc_wait_queue_head
;
35 wait_ms
= GC_THREAD_MIN_SLEEP_TIME
;
41 wait_event_interruptible_timeout(*wq
,
42 kthread_should_stop(),
43 msecs_to_jiffies(wait_ms
));
44 if (kthread_should_stop())
49 if (!test_opt(sbi
, BG_GC
))
53 * [GC triggering condition]
54 * 0. GC is not conducted currently.
55 * 1. There are enough dirty segments.
56 * 2. IO subsystem is idle by checking the # of writeback pages.
57 * 3. IO subsystem is idle by checking the # of requests in
58 * bdev's request list.
60 * Note) We have to avoid triggering GCs too much frequently.
61 * Because it is possible that some segments can be
62 * invalidated soon after by user update or deletion.
63 * So, I'd like to wait some time to collect dirty segments.
65 if (!mutex_trylock(&sbi
->gc_mutex
))
69 wait_ms
= increase_sleep_time(wait_ms
);
70 mutex_unlock(&sbi
->gc_mutex
);
74 if (has_enough_invalid_blocks(sbi
))
75 wait_ms
= decrease_sleep_time(wait_ms
);
77 wait_ms
= increase_sleep_time(wait_ms
);
81 if (f2fs_gc(sbi
, 1) == GC_NONE
)
82 wait_ms
= GC_THREAD_NOGC_SLEEP_TIME
;
83 else if (wait_ms
== GC_THREAD_NOGC_SLEEP_TIME
)
84 wait_ms
= GC_THREAD_MAX_SLEEP_TIME
;
86 } while (!kthread_should_stop());
90 int start_gc_thread(struct f2fs_sb_info
*sbi
)
92 struct f2fs_gc_kthread
*gc_th
;
94 gc_th
= kmalloc(sizeof(struct f2fs_gc_kthread
), GFP_KERNEL
);
98 sbi
->gc_thread
= gc_th
;
99 init_waitqueue_head(&sbi
->gc_thread
->gc_wait_queue_head
);
100 sbi
->gc_thread
->f2fs_gc_task
= kthread_run(gc_thread_func
, sbi
,
102 if (IS_ERR(gc_th
->f2fs_gc_task
)) {
109 void stop_gc_thread(struct f2fs_sb_info
*sbi
)
111 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
114 kthread_stop(gc_th
->f2fs_gc_task
);
116 sbi
->gc_thread
= NULL
;
119 static int select_gc_type(int gc_type
)
121 return (gc_type
== BG_GC
) ? GC_CB
: GC_GREEDY
;
124 static void select_policy(struct f2fs_sb_info
*sbi
, int gc_type
,
125 int type
, struct victim_sel_policy
*p
)
127 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
130 p
->gc_mode
= GC_GREEDY
;
131 p
->dirty_segmap
= dirty_i
->dirty_segmap
[type
];
134 p
->gc_mode
= select_gc_type(gc_type
);
135 p
->dirty_segmap
= dirty_i
->dirty_segmap
[DIRTY
];
136 p
->ofs_unit
= sbi
->segs_per_sec
;
138 p
->offset
= sbi
->last_victim
[p
->gc_mode
];
141 static unsigned int get_max_cost(struct f2fs_sb_info
*sbi
,
142 struct victim_sel_policy
*p
)
144 if (p
->gc_mode
== GC_GREEDY
)
145 return (1 << sbi
->log_blocks_per_seg
) * p
->ofs_unit
;
146 else if (p
->gc_mode
== GC_CB
)
148 else /* No other gc_mode */
152 static unsigned int check_bg_victims(struct f2fs_sb_info
*sbi
)
154 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
158 * If the gc_type is FG_GC, we can select victim segments
159 * selected by background GC before.
160 * Those segments guarantee they have small valid blocks.
162 segno
= find_next_bit(dirty_i
->victim_segmap
[BG_GC
],
164 if (segno
< TOTAL_SEGS(sbi
)) {
165 clear_bit(segno
, dirty_i
->victim_segmap
[BG_GC
]);
171 static unsigned int get_cb_cost(struct f2fs_sb_info
*sbi
, unsigned int segno
)
173 struct sit_info
*sit_i
= SIT_I(sbi
);
174 unsigned int secno
= GET_SECNO(sbi
, segno
);
175 unsigned int start
= secno
* sbi
->segs_per_sec
;
176 unsigned long long mtime
= 0;
177 unsigned int vblocks
;
178 unsigned char age
= 0;
182 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
183 mtime
+= get_seg_entry(sbi
, start
+ i
)->mtime
;
184 vblocks
= get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
186 mtime
= div_u64(mtime
, sbi
->segs_per_sec
);
187 vblocks
= div_u64(vblocks
, sbi
->segs_per_sec
);
189 u
= (vblocks
* 100) >> sbi
->log_blocks_per_seg
;
191 /* Handle if the system time is changed by user */
192 if (mtime
< sit_i
->min_mtime
)
193 sit_i
->min_mtime
= mtime
;
194 if (mtime
> sit_i
->max_mtime
)
195 sit_i
->max_mtime
= mtime
;
196 if (sit_i
->max_mtime
!= sit_i
->min_mtime
)
197 age
= 100 - div64_u64(100 * (mtime
- sit_i
->min_mtime
),
198 sit_i
->max_mtime
- sit_i
->min_mtime
);
200 return UINT_MAX
- ((100 * (100 - u
) * age
) / (100 + u
));
203 static unsigned int get_gc_cost(struct f2fs_sb_info
*sbi
, unsigned int segno
,
204 struct victim_sel_policy
*p
)
206 if (p
->alloc_mode
== SSR
)
207 return get_seg_entry(sbi
, segno
)->ckpt_valid_blocks
;
209 /* alloc_mode == LFS */
210 if (p
->gc_mode
== GC_GREEDY
)
211 return get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
213 return get_cb_cost(sbi
, segno
);
217 * This function is called from two pathes.
218 * One is garbage collection and the other is SSR segment selection.
219 * When it is called during GC, it just gets a victim segment
220 * and it does not remove it from dirty seglist.
221 * When it is called from SSR segment selection, it finds a segment
222 * which has minimum valid blocks and removes it from dirty seglist.
224 static int get_victim_by_default(struct f2fs_sb_info
*sbi
,
225 unsigned int *result
, int gc_type
, int type
, char alloc_mode
)
227 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
228 struct victim_sel_policy p
;
232 p
.alloc_mode
= alloc_mode
;
233 select_policy(sbi
, gc_type
, type
, &p
);
235 p
.min_segno
= NULL_SEGNO
;
236 p
.min_cost
= get_max_cost(sbi
, &p
);
238 mutex_lock(&dirty_i
->seglist_lock
);
240 if (p
.alloc_mode
== LFS
&& gc_type
== FG_GC
) {
241 p
.min_segno
= check_bg_victims(sbi
);
242 if (p
.min_segno
!= NULL_SEGNO
)
249 segno
= find_next_bit(p
.dirty_segmap
,
250 TOTAL_SEGS(sbi
), p
.offset
);
251 if (segno
>= TOTAL_SEGS(sbi
)) {
252 if (sbi
->last_victim
[p
.gc_mode
]) {
253 sbi
->last_victim
[p
.gc_mode
] = 0;
259 p
.offset
= ((segno
/ p
.ofs_unit
) * p
.ofs_unit
) + p
.ofs_unit
;
261 if (test_bit(segno
, dirty_i
->victim_segmap
[FG_GC
]))
263 if (gc_type
== BG_GC
&&
264 test_bit(segno
, dirty_i
->victim_segmap
[BG_GC
]))
266 if (IS_CURSEC(sbi
, GET_SECNO(sbi
, segno
)))
269 cost
= get_gc_cost(sbi
, segno
, &p
);
271 if (p
.min_cost
> cost
) {
276 if (cost
== get_max_cost(sbi
, &p
))
279 if (nsearched
++ >= MAX_VICTIM_SEARCH
) {
280 sbi
->last_victim
[p
.gc_mode
] = segno
;
285 if (p
.min_segno
!= NULL_SEGNO
) {
286 *result
= (p
.min_segno
/ p
.ofs_unit
) * p
.ofs_unit
;
287 if (p
.alloc_mode
== LFS
) {
289 for (i
= 0; i
< p
.ofs_unit
; i
++)
291 dirty_i
->victim_segmap
[gc_type
]);
294 mutex_unlock(&dirty_i
->seglist_lock
);
296 return (p
.min_segno
== NULL_SEGNO
) ? 0 : 1;
299 static const struct victim_selection default_v_ops
= {
300 .get_victim
= get_victim_by_default
,
303 static struct inode
*find_gc_inode(nid_t ino
, struct list_head
*ilist
)
305 struct list_head
*this;
306 struct inode_entry
*ie
;
308 list_for_each(this, ilist
) {
309 ie
= list_entry(this, struct inode_entry
, list
);
310 if (ie
->inode
->i_ino
== ino
)
316 static void add_gc_inode(struct inode
*inode
, struct list_head
*ilist
)
318 struct list_head
*this;
319 struct inode_entry
*new_ie
, *ie
;
321 list_for_each(this, ilist
) {
322 ie
= list_entry(this, struct inode_entry
, list
);
323 if (ie
->inode
== inode
) {
329 new_ie
= kmem_cache_alloc(winode_slab
, GFP_NOFS
);
334 new_ie
->inode
= inode
;
335 list_add_tail(&new_ie
->list
, ilist
);
338 static void put_gc_inode(struct list_head
*ilist
)
340 struct inode_entry
*ie
, *next_ie
;
341 list_for_each_entry_safe(ie
, next_ie
, ilist
, list
) {
344 kmem_cache_free(winode_slab
, ie
);
348 static int check_valid_map(struct f2fs_sb_info
*sbi
,
349 unsigned int segno
, int offset
)
351 struct sit_info
*sit_i
= SIT_I(sbi
);
352 struct seg_entry
*sentry
;
355 mutex_lock(&sit_i
->sentry_lock
);
356 sentry
= get_seg_entry(sbi
, segno
);
357 ret
= f2fs_test_bit(offset
, sentry
->cur_valid_map
);
358 mutex_unlock(&sit_i
->sentry_lock
);
359 return ret
? GC_OK
: GC_NEXT
;
363 * This function compares node address got in summary with that in NAT.
364 * On validity, copy that node with cold status, otherwise (invalid node)
367 static int gc_node_segment(struct f2fs_sb_info
*sbi
,
368 struct f2fs_summary
*sum
, unsigned int segno
, int gc_type
)
371 struct f2fs_summary
*entry
;
376 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
377 nid_t nid
= le32_to_cpu(entry
->nid
);
378 struct page
*node_page
;
382 * It makes sure that free segments are able to write
383 * all the dirty node pages before CP after this CP.
384 * So let's check the space of dirty node pages.
386 if (should_do_checkpoint(sbi
)) {
387 mutex_lock(&sbi
->cp_mutex
);
388 block_operations(sbi
);
392 err
= check_valid_map(sbi
, segno
, off
);
397 ra_node_page(sbi
, nid
);
400 node_page
= get_node_page(sbi
, nid
);
401 if (IS_ERR(node_page
))
404 /* set page dirty and write it */
405 if (!PageWriteback(node_page
))
406 set_page_dirty(node_page
);
407 f2fs_put_page(node_page
, 1);
408 stat_inc_node_blk_count(sbi
, 1);
415 if (gc_type
== FG_GC
) {
416 struct writeback_control wbc
= {
417 .sync_mode
= WB_SYNC_ALL
,
418 .nr_to_write
= LONG_MAX
,
421 sync_node_pages(sbi
, 0, &wbc
);
427 * Calculate start block index that this node page contains
429 block_t
start_bidx_of_node(unsigned int node_ofs
)
431 unsigned int indirect_blks
= 2 * NIDS_PER_BLOCK
+ 4;
439 } else if (node_ofs
<= indirect_blks
) {
440 int dec
= (node_ofs
- 4) / (NIDS_PER_BLOCK
+ 1);
441 bidx
= node_ofs
- 2 - dec
;
443 int dec
= (node_ofs
- indirect_blks
- 3) / (NIDS_PER_BLOCK
+ 1);
444 bidx
= node_ofs
- 5 - dec
;
446 return bidx
* ADDRS_PER_BLOCK
+ ADDRS_PER_INODE
;
449 static int check_dnode(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
450 struct node_info
*dni
, block_t blkaddr
, unsigned int *nofs
)
452 struct page
*node_page
;
454 unsigned int ofs_in_node
;
455 block_t source_blkaddr
;
457 nid
= le32_to_cpu(sum
->nid
);
458 ofs_in_node
= le16_to_cpu(sum
->ofs_in_node
);
460 node_page
= get_node_page(sbi
, nid
);
461 if (IS_ERR(node_page
))
464 get_node_info(sbi
, nid
, dni
);
466 if (sum
->version
!= dni
->version
) {
467 f2fs_put_page(node_page
, 1);
471 *nofs
= ofs_of_node(node_page
);
472 source_blkaddr
= datablock_addr(node_page
, ofs_in_node
);
473 f2fs_put_page(node_page
, 1);
475 if (source_blkaddr
!= blkaddr
)
480 static void move_data_page(struct inode
*inode
, struct page
*page
, int gc_type
)
482 if (page
->mapping
!= inode
->i_mapping
)
485 if (inode
!= page
->mapping
->host
)
488 if (PageWriteback(page
))
491 if (gc_type
== BG_GC
) {
492 set_page_dirty(page
);
495 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
496 mutex_lock_op(sbi
, DATA_WRITE
);
497 if (clear_page_dirty_for_io(page
) &&
498 S_ISDIR(inode
->i_mode
)) {
499 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
500 inode_dec_dirty_dents(inode
);
503 do_write_data_page(page
);
504 mutex_unlock_op(sbi
, DATA_WRITE
);
505 clear_cold_data(page
);
508 f2fs_put_page(page
, 1);
512 * This function tries to get parent node of victim data block, and identifies
513 * data block validity. If the block is valid, copy that with cold status and
514 * modify parent node.
515 * If the parent node is not valid or the data block address is different,
516 * the victim data block is ignored.
518 static int gc_data_segment(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
519 struct list_head
*ilist
, unsigned int segno
, int gc_type
)
521 struct super_block
*sb
= sbi
->sb
;
522 struct f2fs_summary
*entry
;
527 start_addr
= START_BLOCK(sbi
, segno
);
531 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
532 struct page
*data_page
;
534 struct node_info dni
; /* dnode info for the data */
535 unsigned int ofs_in_node
, nofs
;
539 * It makes sure that free segments are able to write
540 * all the dirty node pages before CP after this CP.
541 * So let's check the space of dirty node pages.
543 if (should_do_checkpoint(sbi
)) {
544 mutex_lock(&sbi
->cp_mutex
);
545 block_operations(sbi
);
550 err
= check_valid_map(sbi
, segno
, off
);
555 ra_node_page(sbi
, le32_to_cpu(entry
->nid
));
559 /* Get an inode by ino with checking validity */
560 err
= check_dnode(sbi
, entry
, &dni
, start_addr
+ off
, &nofs
);
565 ra_node_page(sbi
, dni
.ino
);
569 start_bidx
= start_bidx_of_node(nofs
);
570 ofs_in_node
= le16_to_cpu(entry
->ofs_in_node
);
573 inode
= f2fs_iget_nowait(sb
, dni
.ino
);
577 data_page
= find_data_page(inode
,
578 start_bidx
+ ofs_in_node
);
579 if (IS_ERR(data_page
))
582 f2fs_put_page(data_page
, 0);
583 add_gc_inode(inode
, ilist
);
585 inode
= find_gc_inode(dni
.ino
, ilist
);
587 data_page
= get_lock_data_page(inode
,
588 start_bidx
+ ofs_in_node
);
589 if (IS_ERR(data_page
))
591 move_data_page(inode
, data_page
, gc_type
);
592 stat_inc_data_blk_count(sbi
, 1);
603 if (gc_type
== FG_GC
)
604 f2fs_submit_bio(sbi
, DATA
, true);
608 static int __get_victim(struct f2fs_sb_info
*sbi
, unsigned int *victim
,
609 int gc_type
, int type
)
611 struct sit_info
*sit_i
= SIT_I(sbi
);
613 mutex_lock(&sit_i
->sentry_lock
);
614 ret
= DIRTY_I(sbi
)->v_ops
->get_victim(sbi
, victim
, gc_type
, type
, LFS
);
615 mutex_unlock(&sit_i
->sentry_lock
);
619 static int do_garbage_collect(struct f2fs_sb_info
*sbi
, unsigned int segno
,
620 struct list_head
*ilist
, int gc_type
)
622 struct page
*sum_page
;
623 struct f2fs_summary_block
*sum
;
626 /* read segment summary of victim */
627 sum_page
= get_sum_page(sbi
, segno
);
628 if (IS_ERR(sum_page
))
632 * CP needs to lock sum_page. In this time, we don't need
633 * to lock this page, because this summary page is not gone anywhere.
634 * Also, this page is not gonna be updated before GC is done.
636 unlock_page(sum_page
);
637 sum
= page_address(sum_page
);
639 switch (GET_SUM_TYPE((&sum
->footer
))) {
641 ret
= gc_node_segment(sbi
, sum
->entries
, segno
, gc_type
);
644 ret
= gc_data_segment(sbi
, sum
->entries
, ilist
, segno
, gc_type
);
647 stat_inc_seg_count(sbi
, GET_SUM_TYPE((&sum
->footer
)));
648 stat_inc_call_count(sbi
->stat_info
);
650 f2fs_put_page(sum_page
, 0);
654 int f2fs_gc(struct f2fs_sb_info
*sbi
, int nGC
)
657 int old_free_secs
, cur_free_secs
;
658 int gc_status
, nfree
;
659 struct list_head ilist
;
662 INIT_LIST_HEAD(&ilist
);
667 if (has_not_enough_free_secs(sbi
))
668 old_free_secs
= reserved_sections(sbi
);
670 old_free_secs
= free_sections(sbi
);
672 while (sbi
->sb
->s_flags
& MS_ACTIVE
) {
674 if (has_not_enough_free_secs(sbi
))
677 cur_free_secs
= free_sections(sbi
) + nfree
;
679 /* We got free space successfully. */
680 if (nGC
< cur_free_secs
- old_free_secs
)
683 if (!__get_victim(sbi
, &segno
, gc_type
, NO_CHECK_TYPE
))
686 for (i
= 0; i
< sbi
->segs_per_sec
; i
++) {
688 * do_garbage_collect will give us three gc_status:
689 * GC_ERROR, GC_DONE, and GC_BLOCKED.
690 * If GC is finished uncleanly, we have to return
691 * the victim to dirty segment list.
693 gc_status
= do_garbage_collect(sbi
, segno
+ i
,
695 if (gc_status
!= GC_DONE
)
701 if (has_not_enough_free_secs(sbi
) || gc_status
== GC_BLOCKED
) {
702 write_checkpoint(sbi
, (gc_status
== GC_BLOCKED
), false);
706 mutex_unlock(&sbi
->gc_mutex
);
708 put_gc_inode(&ilist
);
709 BUG_ON(!list_empty(&ilist
));
713 void build_gc_manager(struct f2fs_sb_info
*sbi
)
715 DIRTY_I(sbi
)->v_ops
= &default_v_ops
;
718 int create_gc_caches(void)
720 winode_slab
= f2fs_kmem_cache_create("f2fs_gc_inodes",
721 sizeof(struct inode_entry
), NULL
);
727 void destroy_gc_caches(void)
729 kmem_cache_destroy(winode_slab
);