Linux 4.18.10
[linux/fpc-iii.git] / fs / f2fs / gc.c
blob37ab2d10a872d831fb2389fa80cceb277945cccb
1 /*
2 * fs/f2fs/gc.c
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
20 #include "f2fs.h"
21 #include "node.h"
22 #include "segment.h"
23 #include "gc.h"
24 #include <trace/events/f2fs.h>
26 static int gc_thread_func(void *data)
28 struct f2fs_sb_info *sbi = data;
29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 unsigned int wait_ms;
33 wait_ms = gc_th->min_sleep_time;
35 set_freezable();
36 do {
37 wait_event_interruptible_timeout(*wq,
38 kthread_should_stop() || freezing(current) ||
39 gc_th->gc_wake,
40 msecs_to_jiffies(wait_ms));
42 /* give it a try one time */
43 if (gc_th->gc_wake)
44 gc_th->gc_wake = 0;
46 if (try_to_freeze())
47 continue;
48 if (kthread_should_stop())
49 break;
51 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
52 increase_sleep_time(gc_th, &wait_ms);
53 continue;
56 #ifdef CONFIG_F2FS_FAULT_INJECTION
57 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
58 f2fs_show_injection_info(FAULT_CHECKPOINT);
59 f2fs_stop_checkpoint(sbi, false);
61 #endif
63 if (!sb_start_write_trylock(sbi->sb))
64 continue;
67 * [GC triggering condition]
68 * 0. GC is not conducted currently.
69 * 1. There are enough dirty segments.
70 * 2. IO subsystem is idle by checking the # of writeback pages.
71 * 3. IO subsystem is idle by checking the # of requests in
72 * bdev's request list.
74 * Note) We have to avoid triggering GCs frequently.
75 * Because it is possible that some segments can be
76 * invalidated soon after by user update or deletion.
77 * So, I'd like to wait some time to collect dirty segments.
79 if (sbi->gc_mode == GC_URGENT) {
80 wait_ms = gc_th->urgent_sleep_time;
81 mutex_lock(&sbi->gc_mutex);
82 goto do_gc;
85 if (!mutex_trylock(&sbi->gc_mutex))
86 goto next;
88 if (!is_idle(sbi)) {
89 increase_sleep_time(gc_th, &wait_ms);
90 mutex_unlock(&sbi->gc_mutex);
91 goto next;
94 if (has_enough_invalid_blocks(sbi))
95 decrease_sleep_time(gc_th, &wait_ms);
96 else
97 increase_sleep_time(gc_th, &wait_ms);
98 do_gc:
99 stat_inc_bggc_count(sbi);
101 /* if return value is not zero, no victim was selected */
102 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
103 wait_ms = gc_th->no_gc_sleep_time;
105 trace_f2fs_background_gc(sbi->sb, wait_ms,
106 prefree_segments(sbi), free_segments(sbi));
108 /* balancing f2fs's metadata periodically */
109 f2fs_balance_fs_bg(sbi);
110 next:
111 sb_end_write(sbi->sb);
113 } while (!kthread_should_stop());
114 return 0;
117 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
119 struct f2fs_gc_kthread *gc_th;
120 dev_t dev = sbi->sb->s_bdev->bd_dev;
121 int err = 0;
123 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
124 if (!gc_th) {
125 err = -ENOMEM;
126 goto out;
129 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
130 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
131 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
132 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
134 gc_th->gc_wake= 0;
136 sbi->gc_thread = gc_th;
137 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
138 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
139 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
140 if (IS_ERR(gc_th->f2fs_gc_task)) {
141 err = PTR_ERR(gc_th->f2fs_gc_task);
142 kfree(gc_th);
143 sbi->gc_thread = NULL;
145 out:
146 return err;
149 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
151 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
152 if (!gc_th)
153 return;
154 kthread_stop(gc_th->f2fs_gc_task);
155 kfree(gc_th);
156 sbi->gc_thread = NULL;
159 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
161 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
163 switch (sbi->gc_mode) {
164 case GC_IDLE_CB:
165 gc_mode = GC_CB;
166 break;
167 case GC_IDLE_GREEDY:
168 case GC_URGENT:
169 gc_mode = GC_GREEDY;
170 break;
172 return gc_mode;
175 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
176 int type, struct victim_sel_policy *p)
178 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
180 if (p->alloc_mode == SSR) {
181 p->gc_mode = GC_GREEDY;
182 p->dirty_segmap = dirty_i->dirty_segmap[type];
183 p->max_search = dirty_i->nr_dirty[type];
184 p->ofs_unit = 1;
185 } else {
186 p->gc_mode = select_gc_type(sbi, gc_type);
187 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
188 p->max_search = dirty_i->nr_dirty[DIRTY];
189 p->ofs_unit = sbi->segs_per_sec;
192 /* we need to check every dirty segments in the FG_GC case */
193 if (gc_type != FG_GC &&
194 (sbi->gc_mode != GC_URGENT) &&
195 p->max_search > sbi->max_victim_search)
196 p->max_search = sbi->max_victim_search;
198 /* let's select beginning hot/small space first in no_heap mode*/
199 if (test_opt(sbi, NOHEAP) &&
200 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
201 p->offset = 0;
202 else
203 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
206 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
207 struct victim_sel_policy *p)
209 /* SSR allocates in a segment unit */
210 if (p->alloc_mode == SSR)
211 return sbi->blocks_per_seg;
212 if (p->gc_mode == GC_GREEDY)
213 return 2 * sbi->blocks_per_seg * p->ofs_unit;
214 else if (p->gc_mode == GC_CB)
215 return UINT_MAX;
216 else /* No other gc_mode */
217 return 0;
220 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
222 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
223 unsigned int secno;
226 * If the gc_type is FG_GC, we can select victim segments
227 * selected by background GC before.
228 * Those segments guarantee they have small valid blocks.
230 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
231 if (sec_usage_check(sbi, secno))
232 continue;
233 clear_bit(secno, dirty_i->victim_secmap);
234 return GET_SEG_FROM_SEC(sbi, secno);
236 return NULL_SEGNO;
239 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
241 struct sit_info *sit_i = SIT_I(sbi);
242 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
243 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
244 unsigned long long mtime = 0;
245 unsigned int vblocks;
246 unsigned char age = 0;
247 unsigned char u;
248 unsigned int i;
250 for (i = 0; i < sbi->segs_per_sec; i++)
251 mtime += get_seg_entry(sbi, start + i)->mtime;
252 vblocks = get_valid_blocks(sbi, segno, true);
254 mtime = div_u64(mtime, sbi->segs_per_sec);
255 vblocks = div_u64(vblocks, sbi->segs_per_sec);
257 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
259 /* Handle if the system time has changed by the user */
260 if (mtime < sit_i->min_mtime)
261 sit_i->min_mtime = mtime;
262 if (mtime > sit_i->max_mtime)
263 sit_i->max_mtime = mtime;
264 if (sit_i->max_mtime != sit_i->min_mtime)
265 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
266 sit_i->max_mtime - sit_i->min_mtime);
268 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
271 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
272 unsigned int segno, struct victim_sel_policy *p)
274 if (p->alloc_mode == SSR)
275 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
277 /* alloc_mode == LFS */
278 if (p->gc_mode == GC_GREEDY)
279 return get_valid_blocks(sbi, segno, true);
280 else
281 return get_cb_cost(sbi, segno);
284 static unsigned int count_bits(const unsigned long *addr,
285 unsigned int offset, unsigned int len)
287 unsigned int end = offset + len, sum = 0;
289 while (offset < end) {
290 if (test_bit(offset++, addr))
291 ++sum;
293 return sum;
297 * This function is called from two paths.
298 * One is garbage collection and the other is SSR segment selection.
299 * When it is called during GC, it just gets a victim segment
300 * and it does not remove it from dirty seglist.
301 * When it is called from SSR segment selection, it finds a segment
302 * which has minimum valid blocks and removes it from dirty seglist.
304 static int get_victim_by_default(struct f2fs_sb_info *sbi,
305 unsigned int *result, int gc_type, int type, char alloc_mode)
307 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
308 struct sit_info *sm = SIT_I(sbi);
309 struct victim_sel_policy p;
310 unsigned int secno, last_victim;
311 unsigned int last_segment = MAIN_SEGS(sbi);
312 unsigned int nsearched = 0;
314 mutex_lock(&dirty_i->seglist_lock);
316 p.alloc_mode = alloc_mode;
317 select_policy(sbi, gc_type, type, &p);
319 p.min_segno = NULL_SEGNO;
320 p.min_cost = get_max_cost(sbi, &p);
322 if (*result != NULL_SEGNO) {
323 if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
324 get_valid_blocks(sbi, *result, false) &&
325 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
326 p.min_segno = *result;
327 goto out;
330 if (p.max_search == 0)
331 goto out;
333 last_victim = sm->last_victim[p.gc_mode];
334 if (p.alloc_mode == LFS && gc_type == FG_GC) {
335 p.min_segno = check_bg_victims(sbi);
336 if (p.min_segno != NULL_SEGNO)
337 goto got_it;
340 while (1) {
341 unsigned long cost;
342 unsigned int segno;
344 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
345 if (segno >= last_segment) {
346 if (sm->last_victim[p.gc_mode]) {
347 last_segment =
348 sm->last_victim[p.gc_mode];
349 sm->last_victim[p.gc_mode] = 0;
350 p.offset = 0;
351 continue;
353 break;
356 p.offset = segno + p.ofs_unit;
357 if (p.ofs_unit > 1) {
358 p.offset -= segno % p.ofs_unit;
359 nsearched += count_bits(p.dirty_segmap,
360 p.offset - p.ofs_unit,
361 p.ofs_unit);
362 } else {
363 nsearched++;
366 secno = GET_SEC_FROM_SEG(sbi, segno);
368 if (sec_usage_check(sbi, secno))
369 goto next;
370 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
371 goto next;
373 cost = get_gc_cost(sbi, segno, &p);
375 if (p.min_cost > cost) {
376 p.min_segno = segno;
377 p.min_cost = cost;
379 next:
380 if (nsearched >= p.max_search) {
381 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
382 sm->last_victim[p.gc_mode] = last_victim + 1;
383 else
384 sm->last_victim[p.gc_mode] = segno + 1;
385 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
386 break;
389 if (p.min_segno != NULL_SEGNO) {
390 got_it:
391 if (p.alloc_mode == LFS) {
392 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
393 if (gc_type == FG_GC)
394 sbi->cur_victim_sec = secno;
395 else
396 set_bit(secno, dirty_i->victim_secmap);
398 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
400 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
401 sbi->cur_victim_sec,
402 prefree_segments(sbi), free_segments(sbi));
404 out:
405 mutex_unlock(&dirty_i->seglist_lock);
407 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
410 static const struct victim_selection default_v_ops = {
411 .get_victim = get_victim_by_default,
414 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
416 struct inode_entry *ie;
418 ie = radix_tree_lookup(&gc_list->iroot, ino);
419 if (ie)
420 return ie->inode;
421 return NULL;
424 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
426 struct inode_entry *new_ie;
428 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
429 iput(inode);
430 return;
432 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
433 new_ie->inode = inode;
435 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
436 list_add_tail(&new_ie->list, &gc_list->ilist);
439 static void put_gc_inode(struct gc_inode_list *gc_list)
441 struct inode_entry *ie, *next_ie;
442 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
443 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
444 iput(ie->inode);
445 list_del(&ie->list);
446 kmem_cache_free(f2fs_inode_entry_slab, ie);
450 static int check_valid_map(struct f2fs_sb_info *sbi,
451 unsigned int segno, int offset)
453 struct sit_info *sit_i = SIT_I(sbi);
454 struct seg_entry *sentry;
455 int ret;
457 down_read(&sit_i->sentry_lock);
458 sentry = get_seg_entry(sbi, segno);
459 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
460 up_read(&sit_i->sentry_lock);
461 return ret;
465 * This function compares node address got in summary with that in NAT.
466 * On validity, copy that node with cold status, otherwise (invalid node)
467 * ignore that.
469 static void gc_node_segment(struct f2fs_sb_info *sbi,
470 struct f2fs_summary *sum, unsigned int segno, int gc_type)
472 struct f2fs_summary *entry;
473 block_t start_addr;
474 int off;
475 int phase = 0;
476 bool fggc = (gc_type == FG_GC);
478 start_addr = START_BLOCK(sbi, segno);
480 next_step:
481 entry = sum;
483 if (fggc && phase == 2)
484 atomic_inc(&sbi->wb_sync_req[NODE]);
486 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
487 nid_t nid = le32_to_cpu(entry->nid);
488 struct page *node_page;
489 struct node_info ni;
491 /* stop BG_GC if there is not enough free sections. */
492 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
493 return;
495 if (check_valid_map(sbi, segno, off) == 0)
496 continue;
498 if (phase == 0) {
499 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
500 META_NAT, true);
501 continue;
504 if (phase == 1) {
505 f2fs_ra_node_page(sbi, nid);
506 continue;
509 /* phase == 2 */
510 node_page = f2fs_get_node_page(sbi, nid);
511 if (IS_ERR(node_page))
512 continue;
514 /* block may become invalid during f2fs_get_node_page */
515 if (check_valid_map(sbi, segno, off) == 0) {
516 f2fs_put_page(node_page, 1);
517 continue;
520 f2fs_get_node_info(sbi, nid, &ni);
521 if (ni.blk_addr != start_addr + off) {
522 f2fs_put_page(node_page, 1);
523 continue;
526 f2fs_move_node_page(node_page, gc_type);
527 stat_inc_node_blk_count(sbi, 1, gc_type);
530 if (++phase < 3)
531 goto next_step;
533 if (fggc)
534 atomic_dec(&sbi->wb_sync_req[NODE]);
538 * Calculate start block index indicating the given node offset.
539 * Be careful, caller should give this node offset only indicating direct node
540 * blocks. If any node offsets, which point the other types of node blocks such
541 * as indirect or double indirect node blocks, are given, it must be a caller's
542 * bug.
544 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
546 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
547 unsigned int bidx;
549 if (node_ofs == 0)
550 return 0;
552 if (node_ofs <= 2) {
553 bidx = node_ofs - 1;
554 } else if (node_ofs <= indirect_blks) {
555 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
556 bidx = node_ofs - 2 - dec;
557 } else {
558 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
559 bidx = node_ofs - 5 - dec;
561 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
564 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
565 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
567 struct page *node_page;
568 nid_t nid;
569 unsigned int ofs_in_node;
570 block_t source_blkaddr;
572 nid = le32_to_cpu(sum->nid);
573 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
575 node_page = f2fs_get_node_page(sbi, nid);
576 if (IS_ERR(node_page))
577 return false;
579 f2fs_get_node_info(sbi, nid, dni);
581 if (sum->version != dni->version) {
582 f2fs_msg(sbi->sb, KERN_WARNING,
583 "%s: valid data with mismatched node version.",
584 __func__);
585 set_sbi_flag(sbi, SBI_NEED_FSCK);
588 *nofs = ofs_of_node(node_page);
589 source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
590 f2fs_put_page(node_page, 1);
592 if (source_blkaddr != blkaddr)
593 return false;
594 return true;
598 * Move data block via META_MAPPING while keeping locked data page.
599 * This can be used to move blocks, aka LBAs, directly on disk.
601 static void move_data_block(struct inode *inode, block_t bidx,
602 int gc_type, unsigned int segno, int off)
604 struct f2fs_io_info fio = {
605 .sbi = F2FS_I_SB(inode),
606 .ino = inode->i_ino,
607 .type = DATA,
608 .temp = COLD,
609 .op = REQ_OP_READ,
610 .op_flags = 0,
611 .encrypted_page = NULL,
612 .in_list = false,
613 .retry = false,
615 struct dnode_of_data dn;
616 struct f2fs_summary sum;
617 struct node_info ni;
618 struct page *page;
619 block_t newaddr;
620 int err;
621 bool lfs_mode = test_opt(fio.sbi, LFS);
623 /* do not read out */
624 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
625 if (!page)
626 return;
628 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
629 goto out;
631 if (f2fs_is_atomic_file(inode)) {
632 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
633 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
634 goto out;
637 if (f2fs_is_pinned_file(inode)) {
638 f2fs_pin_file_control(inode, true);
639 goto out;
642 set_new_dnode(&dn, inode, NULL, NULL, 0);
643 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
644 if (err)
645 goto out;
647 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
648 ClearPageUptodate(page);
649 goto put_out;
653 * don't cache encrypted data into meta inode until previous dirty
654 * data were writebacked to avoid racing between GC and flush.
656 f2fs_wait_on_page_writeback(page, DATA, true);
658 f2fs_get_node_info(fio.sbi, dn.nid, &ni);
659 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
661 /* read page */
662 fio.page = page;
663 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
665 if (lfs_mode)
666 down_write(&fio.sbi->io_order_lock);
668 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
669 &sum, CURSEG_COLD_DATA, NULL, false);
671 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
672 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
673 if (!fio.encrypted_page) {
674 err = -ENOMEM;
675 goto recover_block;
678 err = f2fs_submit_page_bio(&fio);
679 if (err)
680 goto put_page_out;
682 /* write page */
683 lock_page(fio.encrypted_page);
685 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
686 err = -EIO;
687 goto put_page_out;
689 if (unlikely(!PageUptodate(fio.encrypted_page))) {
690 err = -EIO;
691 goto put_page_out;
694 set_page_dirty(fio.encrypted_page);
695 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
696 if (clear_page_dirty_for_io(fio.encrypted_page))
697 dec_page_count(fio.sbi, F2FS_DIRTY_META);
699 set_page_writeback(fio.encrypted_page);
700 ClearPageError(page);
702 /* allocate block address */
703 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
705 fio.op = REQ_OP_WRITE;
706 fio.op_flags = REQ_SYNC;
707 fio.new_blkaddr = newaddr;
708 f2fs_submit_page_write(&fio);
709 if (fio.retry) {
710 if (PageWriteback(fio.encrypted_page))
711 end_page_writeback(fio.encrypted_page);
712 goto put_page_out;
715 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
717 f2fs_update_data_blkaddr(&dn, newaddr);
718 set_inode_flag(inode, FI_APPEND_WRITE);
719 if (page->index == 0)
720 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
721 put_page_out:
722 f2fs_put_page(fio.encrypted_page, 1);
723 recover_block:
724 if (lfs_mode)
725 up_write(&fio.sbi->io_order_lock);
726 if (err)
727 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
728 true, true);
729 put_out:
730 f2fs_put_dnode(&dn);
731 out:
732 f2fs_put_page(page, 1);
735 static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
736 unsigned int segno, int off)
738 struct page *page;
740 page = f2fs_get_lock_data_page(inode, bidx, true);
741 if (IS_ERR(page))
742 return;
744 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
745 goto out;
747 if (f2fs_is_atomic_file(inode)) {
748 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
749 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
750 goto out;
752 if (f2fs_is_pinned_file(inode)) {
753 if (gc_type == FG_GC)
754 f2fs_pin_file_control(inode, true);
755 goto out;
758 if (gc_type == BG_GC) {
759 if (PageWriteback(page))
760 goto out;
761 set_page_dirty(page);
762 set_cold_data(page);
763 } else {
764 struct f2fs_io_info fio = {
765 .sbi = F2FS_I_SB(inode),
766 .ino = inode->i_ino,
767 .type = DATA,
768 .temp = COLD,
769 .op = REQ_OP_WRITE,
770 .op_flags = REQ_SYNC,
771 .old_blkaddr = NULL_ADDR,
772 .page = page,
773 .encrypted_page = NULL,
774 .need_lock = LOCK_REQ,
775 .io_type = FS_GC_DATA_IO,
777 bool is_dirty = PageDirty(page);
778 int err;
780 retry:
781 set_page_dirty(page);
782 f2fs_wait_on_page_writeback(page, DATA, true);
783 if (clear_page_dirty_for_io(page)) {
784 inode_dec_dirty_pages(inode);
785 f2fs_remove_dirty_inode(inode);
788 set_cold_data(page);
790 err = f2fs_do_write_data_page(&fio);
791 if (err) {
792 clear_cold_data(page);
793 if (err == -ENOMEM) {
794 congestion_wait(BLK_RW_ASYNC, HZ/50);
795 goto retry;
797 if (is_dirty)
798 set_page_dirty(page);
801 out:
802 f2fs_put_page(page, 1);
806 * This function tries to get parent node of victim data block, and identifies
807 * data block validity. If the block is valid, copy that with cold status and
808 * modify parent node.
809 * If the parent node is not valid or the data block address is different,
810 * the victim data block is ignored.
812 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
813 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
815 struct super_block *sb = sbi->sb;
816 struct f2fs_summary *entry;
817 block_t start_addr;
818 int off;
819 int phase = 0;
821 start_addr = START_BLOCK(sbi, segno);
823 next_step:
824 entry = sum;
826 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
827 struct page *data_page;
828 struct inode *inode;
829 struct node_info dni; /* dnode info for the data */
830 unsigned int ofs_in_node, nofs;
831 block_t start_bidx;
832 nid_t nid = le32_to_cpu(entry->nid);
834 /* stop BG_GC if there is not enough free sections. */
835 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
836 return;
838 if (check_valid_map(sbi, segno, off) == 0)
839 continue;
841 if (phase == 0) {
842 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
843 META_NAT, true);
844 continue;
847 if (phase == 1) {
848 f2fs_ra_node_page(sbi, nid);
849 continue;
852 /* Get an inode by ino with checking validity */
853 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
854 continue;
856 if (phase == 2) {
857 f2fs_ra_node_page(sbi, dni.ino);
858 continue;
861 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
863 if (phase == 3) {
864 inode = f2fs_iget(sb, dni.ino);
865 if (IS_ERR(inode) || is_bad_inode(inode))
866 continue;
868 /* if inode uses special I/O path, let's go phase 3 */
869 if (f2fs_post_read_required(inode)) {
870 add_gc_inode(gc_list, inode);
871 continue;
874 if (!down_write_trylock(
875 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
876 iput(inode);
877 continue;
880 start_bidx = f2fs_start_bidx_of_node(nofs, inode);
881 data_page = f2fs_get_read_data_page(inode,
882 start_bidx + ofs_in_node, REQ_RAHEAD,
883 true);
884 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
885 if (IS_ERR(data_page)) {
886 iput(inode);
887 continue;
890 f2fs_put_page(data_page, 0);
891 add_gc_inode(gc_list, inode);
892 continue;
895 /* phase 4 */
896 inode = find_gc_inode(gc_list, dni.ino);
897 if (inode) {
898 struct f2fs_inode_info *fi = F2FS_I(inode);
899 bool locked = false;
901 if (S_ISREG(inode->i_mode)) {
902 if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
903 continue;
904 if (!down_write_trylock(
905 &fi->i_gc_rwsem[WRITE])) {
906 up_write(&fi->i_gc_rwsem[READ]);
907 continue;
909 locked = true;
911 /* wait for all inflight aio data */
912 inode_dio_wait(inode);
915 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
916 + ofs_in_node;
917 if (f2fs_post_read_required(inode))
918 move_data_block(inode, start_bidx, gc_type,
919 segno, off);
920 else
921 move_data_page(inode, start_bidx, gc_type,
922 segno, off);
924 if (locked) {
925 up_write(&fi->i_gc_rwsem[WRITE]);
926 up_write(&fi->i_gc_rwsem[READ]);
929 stat_inc_data_blk_count(sbi, 1, gc_type);
933 if (++phase < 5)
934 goto next_step;
937 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
938 int gc_type)
940 struct sit_info *sit_i = SIT_I(sbi);
941 int ret;
943 down_write(&sit_i->sentry_lock);
944 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
945 NO_CHECK_TYPE, LFS);
946 up_write(&sit_i->sentry_lock);
947 return ret;
950 static int do_garbage_collect(struct f2fs_sb_info *sbi,
951 unsigned int start_segno,
952 struct gc_inode_list *gc_list, int gc_type)
954 struct page *sum_page;
955 struct f2fs_summary_block *sum;
956 struct blk_plug plug;
957 unsigned int segno = start_segno;
958 unsigned int end_segno = start_segno + sbi->segs_per_sec;
959 int seg_freed = 0;
960 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
961 SUM_TYPE_DATA : SUM_TYPE_NODE;
963 /* readahead multi ssa blocks those have contiguous address */
964 if (sbi->segs_per_sec > 1)
965 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
966 sbi->segs_per_sec, META_SSA, true);
968 /* reference all summary page */
969 while (segno < end_segno) {
970 sum_page = f2fs_get_sum_page(sbi, segno++);
971 unlock_page(sum_page);
974 blk_start_plug(&plug);
976 for (segno = start_segno; segno < end_segno; segno++) {
978 /* find segment summary of victim */
979 sum_page = find_get_page(META_MAPPING(sbi),
980 GET_SUM_BLOCK(sbi, segno));
981 f2fs_put_page(sum_page, 0);
983 if (get_valid_blocks(sbi, segno, false) == 0 ||
984 !PageUptodate(sum_page) ||
985 unlikely(f2fs_cp_error(sbi)))
986 goto next;
988 sum = page_address(sum_page);
989 if (type != GET_SUM_TYPE((&sum->footer))) {
990 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
991 "type [%d, %d] in SSA and SIT",
992 segno, type, GET_SUM_TYPE((&sum->footer)));
993 set_sbi_flag(sbi, SBI_NEED_FSCK);
994 goto next;
998 * this is to avoid deadlock:
999 * - lock_page(sum_page) - f2fs_replace_block
1000 * - check_valid_map() - down_write(sentry_lock)
1001 * - down_read(sentry_lock) - change_curseg()
1002 * - lock_page(sum_page)
1004 if (type == SUM_TYPE_NODE)
1005 gc_node_segment(sbi, sum->entries, segno, gc_type);
1006 else
1007 gc_data_segment(sbi, sum->entries, gc_list, segno,
1008 gc_type);
1010 stat_inc_seg_count(sbi, type, gc_type);
1012 if (gc_type == FG_GC &&
1013 get_valid_blocks(sbi, segno, false) == 0)
1014 seg_freed++;
1015 next:
1016 f2fs_put_page(sum_page, 0);
1019 if (gc_type == FG_GC)
1020 f2fs_submit_merged_write(sbi,
1021 (type == SUM_TYPE_NODE) ? NODE : DATA);
1023 blk_finish_plug(&plug);
1025 stat_inc_call_count(sbi->stat_info);
1027 return seg_freed;
1030 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1031 bool background, unsigned int segno)
1033 int gc_type = sync ? FG_GC : BG_GC;
1034 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1035 int ret = 0;
1036 struct cp_control cpc;
1037 unsigned int init_segno = segno;
1038 struct gc_inode_list gc_list = {
1039 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1040 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1042 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1043 unsigned int skipped_round = 0, round = 0;
1045 trace_f2fs_gc_begin(sbi->sb, sync, background,
1046 get_pages(sbi, F2FS_DIRTY_NODES),
1047 get_pages(sbi, F2FS_DIRTY_DENTS),
1048 get_pages(sbi, F2FS_DIRTY_IMETA),
1049 free_sections(sbi),
1050 free_segments(sbi),
1051 reserved_segments(sbi),
1052 prefree_segments(sbi));
1054 cpc.reason = __get_cp_reason(sbi);
1055 gc_more:
1056 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1057 ret = -EINVAL;
1058 goto stop;
1060 if (unlikely(f2fs_cp_error(sbi))) {
1061 ret = -EIO;
1062 goto stop;
1065 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1067 * For example, if there are many prefree_segments below given
1068 * threshold, we can make them free by checkpoint. Then, we
1069 * secure free segments which doesn't need fggc any more.
1071 if (prefree_segments(sbi)) {
1072 ret = f2fs_write_checkpoint(sbi, &cpc);
1073 if (ret)
1074 goto stop;
1076 if (has_not_enough_free_secs(sbi, 0, 0))
1077 gc_type = FG_GC;
1080 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1081 if (gc_type == BG_GC && !background) {
1082 ret = -EINVAL;
1083 goto stop;
1085 if (!__get_victim(sbi, &segno, gc_type)) {
1086 ret = -ENODATA;
1087 goto stop;
1090 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1091 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1092 sec_freed++;
1093 total_freed += seg_freed;
1095 if (gc_type == FG_GC) {
1096 if (sbi->skipped_atomic_files[FG_GC] > last_skipped)
1097 skipped_round++;
1098 last_skipped = sbi->skipped_atomic_files[FG_GC];
1099 round++;
1102 if (gc_type == FG_GC)
1103 sbi->cur_victim_sec = NULL_SEGNO;
1105 if (!sync) {
1106 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1107 if (skipped_round > MAX_SKIP_ATOMIC_COUNT &&
1108 skipped_round * 2 >= round)
1109 f2fs_drop_inmem_pages_all(sbi, true);
1110 segno = NULL_SEGNO;
1111 goto gc_more;
1114 if (gc_type == FG_GC)
1115 ret = f2fs_write_checkpoint(sbi, &cpc);
1117 stop:
1118 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1119 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1121 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1122 get_pages(sbi, F2FS_DIRTY_NODES),
1123 get_pages(sbi, F2FS_DIRTY_DENTS),
1124 get_pages(sbi, F2FS_DIRTY_IMETA),
1125 free_sections(sbi),
1126 free_segments(sbi),
1127 reserved_segments(sbi),
1128 prefree_segments(sbi));
1130 mutex_unlock(&sbi->gc_mutex);
1132 put_gc_inode(&gc_list);
1134 if (sync)
1135 ret = sec_freed ? 0 : -EAGAIN;
1136 return ret;
1139 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1141 DIRTY_I(sbi)->v_ops = &default_v_ops;
1143 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1145 /* give warm/cold data area from slower device */
1146 if (sbi->s_ndevs && sbi->segs_per_sec == 1)
1147 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1148 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;