Linux 4.2.1
[linux/fpc-iii.git] / fs / f2fs / gc.c
blob22fb5ef37966210cb50f5b8679b7c661803128f9
1 /*
2 * fs/f2fs/gc.c
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
21 #include "f2fs.h"
22 #include "node.h"
23 #include "segment.h"
24 #include "gc.h"
25 #include <trace/events/f2fs.h>
27 static int gc_thread_func(void *data)
29 struct f2fs_sb_info *sbi = data;
30 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
31 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
32 long wait_ms;
34 wait_ms = gc_th->min_sleep_time;
36 do {
37 if (try_to_freeze())
38 continue;
39 else
40 wait_event_interruptible_timeout(*wq,
41 kthread_should_stop(),
42 msecs_to_jiffies(wait_ms));
43 if (kthread_should_stop())
44 break;
46 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
47 increase_sleep_time(gc_th, &wait_ms);
48 continue;
52 * [GC triggering condition]
53 * 0. GC is not conducted currently.
54 * 1. There are enough dirty segments.
55 * 2. IO subsystem is idle by checking the # of writeback pages.
56 * 3. IO subsystem is idle by checking the # of requests in
57 * bdev's request list.
59 * Note) We have to avoid triggering GCs frequently.
60 * Because it is possible that some segments can be
61 * invalidated soon after by user update or deletion.
62 * So, I'd like to wait some time to collect dirty segments.
64 if (!mutex_trylock(&sbi->gc_mutex))
65 continue;
67 if (!is_idle(sbi)) {
68 increase_sleep_time(gc_th, &wait_ms);
69 mutex_unlock(&sbi->gc_mutex);
70 continue;
73 if (has_enough_invalid_blocks(sbi))
74 decrease_sleep_time(gc_th, &wait_ms);
75 else
76 increase_sleep_time(gc_th, &wait_ms);
78 stat_inc_bggc_count(sbi);
80 /* if return value is not zero, no victim was selected */
81 if (f2fs_gc(sbi))
82 wait_ms = gc_th->no_gc_sleep_time;
84 /* balancing f2fs's metadata periodically */
85 f2fs_balance_fs_bg(sbi);
87 } while (!kthread_should_stop());
88 return 0;
91 int start_gc_thread(struct f2fs_sb_info *sbi)
93 struct f2fs_gc_kthread *gc_th;
94 dev_t dev = sbi->sb->s_bdev->bd_dev;
95 int err = 0;
97 gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
98 if (!gc_th) {
99 err = -ENOMEM;
100 goto out;
103 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
104 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
105 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
107 gc_th->gc_idle = 0;
109 sbi->gc_thread = gc_th;
110 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
111 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
112 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
113 if (IS_ERR(gc_th->f2fs_gc_task)) {
114 err = PTR_ERR(gc_th->f2fs_gc_task);
115 kfree(gc_th);
116 sbi->gc_thread = NULL;
118 out:
119 return err;
122 void stop_gc_thread(struct f2fs_sb_info *sbi)
124 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
125 if (!gc_th)
126 return;
127 kthread_stop(gc_th->f2fs_gc_task);
128 kfree(gc_th);
129 sbi->gc_thread = NULL;
132 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
134 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
136 if (gc_th && gc_th->gc_idle) {
137 if (gc_th->gc_idle == 1)
138 gc_mode = GC_CB;
139 else if (gc_th->gc_idle == 2)
140 gc_mode = GC_GREEDY;
142 return gc_mode;
145 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
146 int type, struct victim_sel_policy *p)
148 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
150 if (p->alloc_mode == SSR) {
151 p->gc_mode = GC_GREEDY;
152 p->dirty_segmap = dirty_i->dirty_segmap[type];
153 p->max_search = dirty_i->nr_dirty[type];
154 p->ofs_unit = 1;
155 } else {
156 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
157 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
158 p->max_search = dirty_i->nr_dirty[DIRTY];
159 p->ofs_unit = sbi->segs_per_sec;
162 if (p->max_search > sbi->max_victim_search)
163 p->max_search = sbi->max_victim_search;
165 p->offset = sbi->last_victim[p->gc_mode];
168 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
169 struct victim_sel_policy *p)
171 /* SSR allocates in a segment unit */
172 if (p->alloc_mode == SSR)
173 return 1 << sbi->log_blocks_per_seg;
174 if (p->gc_mode == GC_GREEDY)
175 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
176 else if (p->gc_mode == GC_CB)
177 return UINT_MAX;
178 else /* No other gc_mode */
179 return 0;
182 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
184 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
185 unsigned int secno;
188 * If the gc_type is FG_GC, we can select victim segments
189 * selected by background GC before.
190 * Those segments guarantee they have small valid blocks.
192 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
193 if (sec_usage_check(sbi, secno))
194 continue;
195 clear_bit(secno, dirty_i->victim_secmap);
196 return secno * sbi->segs_per_sec;
198 return NULL_SEGNO;
201 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
203 struct sit_info *sit_i = SIT_I(sbi);
204 unsigned int secno = GET_SECNO(sbi, segno);
205 unsigned int start = secno * sbi->segs_per_sec;
206 unsigned long long mtime = 0;
207 unsigned int vblocks;
208 unsigned char age = 0;
209 unsigned char u;
210 unsigned int i;
212 for (i = 0; i < sbi->segs_per_sec; i++)
213 mtime += get_seg_entry(sbi, start + i)->mtime;
214 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
216 mtime = div_u64(mtime, sbi->segs_per_sec);
217 vblocks = div_u64(vblocks, sbi->segs_per_sec);
219 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
221 /* Handle if the system time has changed by the user */
222 if (mtime < sit_i->min_mtime)
223 sit_i->min_mtime = mtime;
224 if (mtime > sit_i->max_mtime)
225 sit_i->max_mtime = mtime;
226 if (sit_i->max_mtime != sit_i->min_mtime)
227 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
228 sit_i->max_mtime - sit_i->min_mtime);
230 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
233 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
234 unsigned int segno, struct victim_sel_policy *p)
236 if (p->alloc_mode == SSR)
237 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
239 /* alloc_mode == LFS */
240 if (p->gc_mode == GC_GREEDY)
241 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
242 else
243 return get_cb_cost(sbi, segno);
247 * This function is called from two paths.
248 * One is garbage collection and the other is SSR segment selection.
249 * When it is called during GC, it just gets a victim segment
250 * and it does not remove it from dirty seglist.
251 * When it is called from SSR segment selection, it finds a segment
252 * which has minimum valid blocks and removes it from dirty seglist.
254 static int get_victim_by_default(struct f2fs_sb_info *sbi,
255 unsigned int *result, int gc_type, int type, char alloc_mode)
257 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
258 struct victim_sel_policy p;
259 unsigned int secno, max_cost;
260 int nsearched = 0;
262 mutex_lock(&dirty_i->seglist_lock);
264 p.alloc_mode = alloc_mode;
265 select_policy(sbi, gc_type, type, &p);
267 p.min_segno = NULL_SEGNO;
268 p.min_cost = max_cost = get_max_cost(sbi, &p);
270 if (p.alloc_mode == LFS && gc_type == FG_GC) {
271 p.min_segno = check_bg_victims(sbi);
272 if (p.min_segno != NULL_SEGNO)
273 goto got_it;
276 while (1) {
277 unsigned long cost;
278 unsigned int segno;
280 segno = find_next_bit(p.dirty_segmap, MAIN_SEGS(sbi), p.offset);
281 if (segno >= MAIN_SEGS(sbi)) {
282 if (sbi->last_victim[p.gc_mode]) {
283 sbi->last_victim[p.gc_mode] = 0;
284 p.offset = 0;
285 continue;
287 break;
290 p.offset = segno + p.ofs_unit;
291 if (p.ofs_unit > 1)
292 p.offset -= segno % p.ofs_unit;
294 secno = GET_SECNO(sbi, segno);
296 if (sec_usage_check(sbi, secno))
297 continue;
298 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
299 continue;
301 cost = get_gc_cost(sbi, segno, &p);
303 if (p.min_cost > cost) {
304 p.min_segno = segno;
305 p.min_cost = cost;
306 } else if (unlikely(cost == max_cost)) {
307 continue;
310 if (nsearched++ >= p.max_search) {
311 sbi->last_victim[p.gc_mode] = segno;
312 break;
315 if (p.min_segno != NULL_SEGNO) {
316 got_it:
317 if (p.alloc_mode == LFS) {
318 secno = GET_SECNO(sbi, p.min_segno);
319 if (gc_type == FG_GC)
320 sbi->cur_victim_sec = secno;
321 else
322 set_bit(secno, dirty_i->victim_secmap);
324 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
326 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
327 sbi->cur_victim_sec,
328 prefree_segments(sbi), free_segments(sbi));
330 mutex_unlock(&dirty_i->seglist_lock);
332 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
335 static const struct victim_selection default_v_ops = {
336 .get_victim = get_victim_by_default,
339 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
341 struct inode_entry *ie;
343 ie = radix_tree_lookup(&gc_list->iroot, ino);
344 if (ie)
345 return ie->inode;
346 return NULL;
349 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
351 struct inode_entry *new_ie;
353 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
354 iput(inode);
355 return;
357 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
358 new_ie->inode = inode;
360 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
361 list_add_tail(&new_ie->list, &gc_list->ilist);
364 static void put_gc_inode(struct gc_inode_list *gc_list)
366 struct inode_entry *ie, *next_ie;
367 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
368 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
369 iput(ie->inode);
370 list_del(&ie->list);
371 kmem_cache_free(inode_entry_slab, ie);
375 static int check_valid_map(struct f2fs_sb_info *sbi,
376 unsigned int segno, int offset)
378 struct sit_info *sit_i = SIT_I(sbi);
379 struct seg_entry *sentry;
380 int ret;
382 mutex_lock(&sit_i->sentry_lock);
383 sentry = get_seg_entry(sbi, segno);
384 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
385 mutex_unlock(&sit_i->sentry_lock);
386 return ret;
390 * This function compares node address got in summary with that in NAT.
391 * On validity, copy that node with cold status, otherwise (invalid node)
392 * ignore that.
394 static void gc_node_segment(struct f2fs_sb_info *sbi,
395 struct f2fs_summary *sum, unsigned int segno, int gc_type)
397 bool initial = true;
398 struct f2fs_summary *entry;
399 int off;
401 next_step:
402 entry = sum;
404 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
405 nid_t nid = le32_to_cpu(entry->nid);
406 struct page *node_page;
408 /* stop BG_GC if there is not enough free sections. */
409 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
410 return;
412 if (check_valid_map(sbi, segno, off) == 0)
413 continue;
415 if (initial) {
416 ra_node_page(sbi, nid);
417 continue;
419 node_page = get_node_page(sbi, nid);
420 if (IS_ERR(node_page))
421 continue;
423 /* block may become invalid during get_node_page */
424 if (check_valid_map(sbi, segno, off) == 0) {
425 f2fs_put_page(node_page, 1);
426 continue;
429 /* set page dirty and write it */
430 if (gc_type == FG_GC) {
431 f2fs_wait_on_page_writeback(node_page, NODE);
432 set_page_dirty(node_page);
433 } else {
434 if (!PageWriteback(node_page))
435 set_page_dirty(node_page);
437 f2fs_put_page(node_page, 1);
438 stat_inc_node_blk_count(sbi, 1, gc_type);
441 if (initial) {
442 initial = false;
443 goto next_step;
446 if (gc_type == FG_GC) {
447 struct writeback_control wbc = {
448 .sync_mode = WB_SYNC_ALL,
449 .nr_to_write = LONG_MAX,
450 .for_reclaim = 0,
452 sync_node_pages(sbi, 0, &wbc);
455 * In the case of FG_GC, it'd be better to reclaim this victim
456 * completely.
458 if (get_valid_blocks(sbi, segno, 1) != 0)
459 goto next_step;
464 * Calculate start block index indicating the given node offset.
465 * Be careful, caller should give this node offset only indicating direct node
466 * blocks. If any node offsets, which point the other types of node blocks such
467 * as indirect or double indirect node blocks, are given, it must be a caller's
468 * bug.
470 block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
472 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
473 unsigned int bidx;
475 if (node_ofs == 0)
476 return 0;
478 if (node_ofs <= 2) {
479 bidx = node_ofs - 1;
480 } else if (node_ofs <= indirect_blks) {
481 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
482 bidx = node_ofs - 2 - dec;
483 } else {
484 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
485 bidx = node_ofs - 5 - dec;
487 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
490 static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
491 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
493 struct page *node_page;
494 nid_t nid;
495 unsigned int ofs_in_node;
496 block_t source_blkaddr;
498 nid = le32_to_cpu(sum->nid);
499 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
501 node_page = get_node_page(sbi, nid);
502 if (IS_ERR(node_page))
503 return 0;
505 get_node_info(sbi, nid, dni);
507 if (sum->version != dni->version) {
508 f2fs_put_page(node_page, 1);
509 return 0;
512 *nofs = ofs_of_node(node_page);
513 source_blkaddr = datablock_addr(node_page, ofs_in_node);
514 f2fs_put_page(node_page, 1);
516 if (source_blkaddr != blkaddr)
517 return 0;
518 return 1;
521 static void move_encrypted_block(struct inode *inode, block_t bidx)
523 struct f2fs_io_info fio = {
524 .sbi = F2FS_I_SB(inode),
525 .type = DATA,
526 .rw = READ_SYNC,
527 .encrypted_page = NULL,
529 struct dnode_of_data dn;
530 struct f2fs_summary sum;
531 struct node_info ni;
532 struct page *page;
533 int err;
535 /* do not read out */
536 page = grab_cache_page(inode->i_mapping, bidx);
537 if (!page)
538 return;
540 set_new_dnode(&dn, inode, NULL, NULL, 0);
541 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
542 if (err)
543 goto out;
545 if (unlikely(dn.data_blkaddr == NULL_ADDR))
546 goto put_out;
548 get_node_info(fio.sbi, dn.nid, &ni);
549 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
551 /* read page */
552 fio.page = page;
553 fio.blk_addr = dn.data_blkaddr;
555 fio.encrypted_page = grab_cache_page(META_MAPPING(fio.sbi), fio.blk_addr);
556 if (!fio.encrypted_page)
557 goto put_out;
559 err = f2fs_submit_page_bio(&fio);
560 if (err)
561 goto put_page_out;
563 /* write page */
564 lock_page(fio.encrypted_page);
566 if (unlikely(!PageUptodate(fio.encrypted_page)))
567 goto put_page_out;
568 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
569 goto put_page_out;
571 set_page_dirty(fio.encrypted_page);
572 f2fs_wait_on_page_writeback(fio.encrypted_page, META);
573 if (clear_page_dirty_for_io(fio.encrypted_page))
574 dec_page_count(fio.sbi, F2FS_DIRTY_META);
576 set_page_writeback(fio.encrypted_page);
578 /* allocate block address */
579 f2fs_wait_on_page_writeback(dn.node_page, NODE);
580 allocate_data_block(fio.sbi, NULL, fio.blk_addr,
581 &fio.blk_addr, &sum, CURSEG_COLD_DATA);
582 fio.rw = WRITE_SYNC;
583 f2fs_submit_page_mbio(&fio);
585 dn.data_blkaddr = fio.blk_addr;
586 set_data_blkaddr(&dn);
587 f2fs_update_extent_cache(&dn);
588 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
589 if (page->index == 0)
590 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
591 put_page_out:
592 f2fs_put_page(fio.encrypted_page, 1);
593 put_out:
594 f2fs_put_dnode(&dn);
595 out:
596 f2fs_put_page(page, 1);
599 static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
601 struct page *page;
603 page = get_lock_data_page(inode, bidx);
604 if (IS_ERR(page))
605 return;
607 if (gc_type == BG_GC) {
608 if (PageWriteback(page))
609 goto out;
610 set_page_dirty(page);
611 set_cold_data(page);
612 } else {
613 struct f2fs_io_info fio = {
614 .sbi = F2FS_I_SB(inode),
615 .type = DATA,
616 .rw = WRITE_SYNC,
617 .page = page,
618 .encrypted_page = NULL,
620 set_page_dirty(page);
621 f2fs_wait_on_page_writeback(page, DATA);
622 if (clear_page_dirty_for_io(page))
623 inode_dec_dirty_pages(inode);
624 set_cold_data(page);
625 do_write_data_page(&fio);
626 clear_cold_data(page);
628 out:
629 f2fs_put_page(page, 1);
633 * This function tries to get parent node of victim data block, and identifies
634 * data block validity. If the block is valid, copy that with cold status and
635 * modify parent node.
636 * If the parent node is not valid or the data block address is different,
637 * the victim data block is ignored.
639 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
640 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
642 struct super_block *sb = sbi->sb;
643 struct f2fs_summary *entry;
644 block_t start_addr;
645 int off;
646 int phase = 0;
648 start_addr = START_BLOCK(sbi, segno);
650 next_step:
651 entry = sum;
653 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
654 struct page *data_page;
655 struct inode *inode;
656 struct node_info dni; /* dnode info for the data */
657 unsigned int ofs_in_node, nofs;
658 block_t start_bidx;
660 /* stop BG_GC if there is not enough free sections. */
661 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
662 return;
664 if (check_valid_map(sbi, segno, off) == 0)
665 continue;
667 if (phase == 0) {
668 ra_node_page(sbi, le32_to_cpu(entry->nid));
669 continue;
672 /* Get an inode by ino with checking validity */
673 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
674 continue;
676 if (phase == 1) {
677 ra_node_page(sbi, dni.ino);
678 continue;
681 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
683 if (phase == 2) {
684 inode = f2fs_iget(sb, dni.ino);
685 if (IS_ERR(inode) || is_bad_inode(inode))
686 continue;
688 /* if encrypted inode, let's go phase 3 */
689 if (f2fs_encrypted_inode(inode) &&
690 S_ISREG(inode->i_mode)) {
691 add_gc_inode(gc_list, inode);
692 continue;
695 start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
696 data_page = get_read_data_page(inode,
697 start_bidx + ofs_in_node, READA);
698 if (IS_ERR(data_page)) {
699 iput(inode);
700 continue;
703 f2fs_put_page(data_page, 0);
704 add_gc_inode(gc_list, inode);
705 continue;
708 /* phase 3 */
709 inode = find_gc_inode(gc_list, dni.ino);
710 if (inode) {
711 start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
712 + ofs_in_node;
713 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
714 move_encrypted_block(inode, start_bidx);
715 else
716 move_data_page(inode, start_bidx, gc_type);
717 stat_inc_data_blk_count(sbi, 1, gc_type);
721 if (++phase < 4)
722 goto next_step;
724 if (gc_type == FG_GC) {
725 f2fs_submit_merged_bio(sbi, DATA, WRITE);
728 * In the case of FG_GC, it'd be better to reclaim this victim
729 * completely.
731 if (get_valid_blocks(sbi, segno, 1) != 0) {
732 phase = 2;
733 goto next_step;
738 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
739 int gc_type)
741 struct sit_info *sit_i = SIT_I(sbi);
742 int ret;
744 mutex_lock(&sit_i->sentry_lock);
745 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
746 NO_CHECK_TYPE, LFS);
747 mutex_unlock(&sit_i->sentry_lock);
748 return ret;
751 static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
752 struct gc_inode_list *gc_list, int gc_type)
754 struct page *sum_page;
755 struct f2fs_summary_block *sum;
756 struct blk_plug plug;
758 /* read segment summary of victim */
759 sum_page = get_sum_page(sbi, segno);
761 blk_start_plug(&plug);
763 sum = page_address(sum_page);
766 * this is to avoid deadlock:
767 * - lock_page(sum_page) - f2fs_replace_block
768 * - check_valid_map() - mutex_lock(sentry_lock)
769 * - mutex_lock(sentry_lock) - change_curseg()
770 * - lock_page(sum_page)
772 unlock_page(sum_page);
774 switch (GET_SUM_TYPE((&sum->footer))) {
775 case SUM_TYPE_NODE:
776 gc_node_segment(sbi, sum->entries, segno, gc_type);
777 break;
778 case SUM_TYPE_DATA:
779 gc_data_segment(sbi, sum->entries, gc_list, segno, gc_type);
780 break;
782 blk_finish_plug(&plug);
784 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
785 stat_inc_call_count(sbi->stat_info);
787 f2fs_put_page(sum_page, 0);
790 int f2fs_gc(struct f2fs_sb_info *sbi)
792 unsigned int segno, i;
793 int gc_type = BG_GC;
794 int nfree = 0;
795 int ret = -1;
796 struct cp_control cpc;
797 struct gc_inode_list gc_list = {
798 .ilist = LIST_HEAD_INIT(gc_list.ilist),
799 .iroot = RADIX_TREE_INIT(GFP_NOFS),
802 cpc.reason = __get_cp_reason(sbi);
803 gc_more:
804 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
805 goto stop;
806 if (unlikely(f2fs_cp_error(sbi)))
807 goto stop;
809 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
810 gc_type = FG_GC;
811 write_checkpoint(sbi, &cpc);
814 if (!__get_victim(sbi, &segno, gc_type))
815 goto stop;
816 ret = 0;
818 /* readahead multi ssa blocks those have contiguous address */
819 if (sbi->segs_per_sec > 1)
820 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
821 META_SSA);
823 for (i = 0; i < sbi->segs_per_sec; i++)
824 do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
826 if (gc_type == FG_GC) {
827 sbi->cur_victim_sec = NULL_SEGNO;
828 nfree++;
829 WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
832 if (has_not_enough_free_secs(sbi, nfree))
833 goto gc_more;
835 if (gc_type == FG_GC)
836 write_checkpoint(sbi, &cpc);
837 stop:
838 mutex_unlock(&sbi->gc_mutex);
840 put_gc_inode(&gc_list);
841 return ret;
844 void build_gc_manager(struct f2fs_sb_info *sbi)
846 DIRTY_I(sbi)->v_ops = &default_v_ops;