perf bpf: Move perf_event_output() from stdio.h to bpf.h
[linux/fpc-iii.git] / fs / f2fs / gc.c
bloba07241fb85370f16df2c171141a8d73995b28376
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/gc.c
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/init.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/freezer.h>
17 #include "f2fs.h"
18 #include "node.h"
19 #include "segment.h"
20 #include "gc.h"
21 #include <trace/events/f2fs.h>
23 static int gc_thread_func(void *data)
25 struct f2fs_sb_info *sbi = data;
26 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
27 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
28 unsigned int wait_ms;
30 wait_ms = gc_th->min_sleep_time;
32 set_freezable();
33 do {
34 wait_event_interruptible_timeout(*wq,
35 kthread_should_stop() || freezing(current) ||
36 gc_th->gc_wake,
37 msecs_to_jiffies(wait_ms));
39 /* give it a try one time */
40 if (gc_th->gc_wake)
41 gc_th->gc_wake = 0;
43 if (try_to_freeze()) {
44 stat_other_skip_bggc_count(sbi);
45 continue;
47 if (kthread_should_stop())
48 break;
50 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
51 increase_sleep_time(gc_th, &wait_ms);
52 stat_other_skip_bggc_count(sbi);
53 continue;
56 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
57 f2fs_show_injection_info(FAULT_CHECKPOINT);
58 f2fs_stop_checkpoint(sbi, false);
61 if (!sb_start_write_trylock(sbi->sb)) {
62 stat_other_skip_bggc_count(sbi);
63 continue;
67 * [GC triggering condition]
68 * 0. GC is not conducted currently.
69 * 1. There are enough dirty segments.
70 * 2. IO subsystem is idle by checking the # of writeback pages.
71 * 3. IO subsystem is idle by checking the # of requests in
72 * bdev's request list.
74 * Note) We have to avoid triggering GCs frequently.
75 * Because it is possible that some segments can be
76 * invalidated soon after by user update or deletion.
77 * So, I'd like to wait some time to collect dirty segments.
79 if (sbi->gc_mode == GC_URGENT) {
80 wait_ms = gc_th->urgent_sleep_time;
81 mutex_lock(&sbi->gc_mutex);
82 goto do_gc;
85 if (!mutex_trylock(&sbi->gc_mutex)) {
86 stat_other_skip_bggc_count(sbi);
87 goto next;
90 if (!is_idle(sbi, GC_TIME)) {
91 increase_sleep_time(gc_th, &wait_ms);
92 mutex_unlock(&sbi->gc_mutex);
93 stat_io_skip_bggc_count(sbi);
94 goto next;
97 if (has_enough_invalid_blocks(sbi))
98 decrease_sleep_time(gc_th, &wait_ms);
99 else
100 increase_sleep_time(gc_th, &wait_ms);
101 do_gc:
102 stat_inc_bggc_count(sbi);
104 /* if return value is not zero, no victim was selected */
105 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
106 wait_ms = gc_th->no_gc_sleep_time;
108 trace_f2fs_background_gc(sbi->sb, wait_ms,
109 prefree_segments(sbi), free_segments(sbi));
111 /* balancing f2fs's metadata periodically */
112 f2fs_balance_fs_bg(sbi);
113 next:
114 sb_end_write(sbi->sb);
116 } while (!kthread_should_stop());
117 return 0;
120 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
122 struct f2fs_gc_kthread *gc_th;
123 dev_t dev = sbi->sb->s_bdev->bd_dev;
124 int err = 0;
126 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
127 if (!gc_th) {
128 err = -ENOMEM;
129 goto out;
132 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
133 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
134 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
135 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
137 gc_th->gc_wake= 0;
139 sbi->gc_thread = gc_th;
140 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
141 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
142 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
143 if (IS_ERR(gc_th->f2fs_gc_task)) {
144 err = PTR_ERR(gc_th->f2fs_gc_task);
145 kfree(gc_th);
146 sbi->gc_thread = NULL;
148 out:
149 return err;
152 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
154 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
155 if (!gc_th)
156 return;
157 kthread_stop(gc_th->f2fs_gc_task);
158 kfree(gc_th);
159 sbi->gc_thread = NULL;
162 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
164 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
166 switch (sbi->gc_mode) {
167 case GC_IDLE_CB:
168 gc_mode = GC_CB;
169 break;
170 case GC_IDLE_GREEDY:
171 case GC_URGENT:
172 gc_mode = GC_GREEDY;
173 break;
175 return gc_mode;
178 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
179 int type, struct victim_sel_policy *p)
181 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
183 if (p->alloc_mode == SSR) {
184 p->gc_mode = GC_GREEDY;
185 p->dirty_segmap = dirty_i->dirty_segmap[type];
186 p->max_search = dirty_i->nr_dirty[type];
187 p->ofs_unit = 1;
188 } else {
189 p->gc_mode = select_gc_type(sbi, gc_type);
190 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
191 p->max_search = dirty_i->nr_dirty[DIRTY];
192 p->ofs_unit = sbi->segs_per_sec;
195 /* we need to check every dirty segments in the FG_GC case */
196 if (gc_type != FG_GC &&
197 (sbi->gc_mode != GC_URGENT) &&
198 p->max_search > sbi->max_victim_search)
199 p->max_search = sbi->max_victim_search;
201 /* let's select beginning hot/small space first in no_heap mode*/
202 if (test_opt(sbi, NOHEAP) &&
203 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
204 p->offset = 0;
205 else
206 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
209 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
210 struct victim_sel_policy *p)
212 /* SSR allocates in a segment unit */
213 if (p->alloc_mode == SSR)
214 return sbi->blocks_per_seg;
215 if (p->gc_mode == GC_GREEDY)
216 return 2 * sbi->blocks_per_seg * p->ofs_unit;
217 else if (p->gc_mode == GC_CB)
218 return UINT_MAX;
219 else /* No other gc_mode */
220 return 0;
223 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
225 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
226 unsigned int secno;
229 * If the gc_type is FG_GC, we can select victim segments
230 * selected by background GC before.
231 * Those segments guarantee they have small valid blocks.
233 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
234 if (sec_usage_check(sbi, secno))
235 continue;
236 clear_bit(secno, dirty_i->victim_secmap);
237 return GET_SEG_FROM_SEC(sbi, secno);
239 return NULL_SEGNO;
242 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
244 struct sit_info *sit_i = SIT_I(sbi);
245 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
246 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
247 unsigned long long mtime = 0;
248 unsigned int vblocks;
249 unsigned char age = 0;
250 unsigned char u;
251 unsigned int i;
253 for (i = 0; i < sbi->segs_per_sec; i++)
254 mtime += get_seg_entry(sbi, start + i)->mtime;
255 vblocks = get_valid_blocks(sbi, segno, true);
257 mtime = div_u64(mtime, sbi->segs_per_sec);
258 vblocks = div_u64(vblocks, sbi->segs_per_sec);
260 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
262 /* Handle if the system time has changed by the user */
263 if (mtime < sit_i->min_mtime)
264 sit_i->min_mtime = mtime;
265 if (mtime > sit_i->max_mtime)
266 sit_i->max_mtime = mtime;
267 if (sit_i->max_mtime != sit_i->min_mtime)
268 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
269 sit_i->max_mtime - sit_i->min_mtime);
271 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
274 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
275 unsigned int segno, struct victim_sel_policy *p)
277 if (p->alloc_mode == SSR)
278 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
280 /* alloc_mode == LFS */
281 if (p->gc_mode == GC_GREEDY)
282 return get_valid_blocks(sbi, segno, true);
283 else
284 return get_cb_cost(sbi, segno);
287 static unsigned int count_bits(const unsigned long *addr,
288 unsigned int offset, unsigned int len)
290 unsigned int end = offset + len, sum = 0;
292 while (offset < end) {
293 if (test_bit(offset++, addr))
294 ++sum;
296 return sum;
300 * This function is called from two paths.
301 * One is garbage collection and the other is SSR segment selection.
302 * When it is called during GC, it just gets a victim segment
303 * and it does not remove it from dirty seglist.
304 * When it is called from SSR segment selection, it finds a segment
305 * which has minimum valid blocks and removes it from dirty seglist.
307 static int get_victim_by_default(struct f2fs_sb_info *sbi,
308 unsigned int *result, int gc_type, int type, char alloc_mode)
310 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
311 struct sit_info *sm = SIT_I(sbi);
312 struct victim_sel_policy p;
313 unsigned int secno, last_victim;
314 unsigned int last_segment = MAIN_SEGS(sbi);
315 unsigned int nsearched = 0;
317 mutex_lock(&dirty_i->seglist_lock);
319 p.alloc_mode = alloc_mode;
320 select_policy(sbi, gc_type, type, &p);
322 p.min_segno = NULL_SEGNO;
323 p.min_cost = get_max_cost(sbi, &p);
325 if (*result != NULL_SEGNO) {
326 if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
327 get_valid_blocks(sbi, *result, false) &&
328 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
329 p.min_segno = *result;
330 goto out;
333 if (p.max_search == 0)
334 goto out;
336 last_victim = sm->last_victim[p.gc_mode];
337 if (p.alloc_mode == LFS && gc_type == FG_GC) {
338 p.min_segno = check_bg_victims(sbi);
339 if (p.min_segno != NULL_SEGNO)
340 goto got_it;
343 while (1) {
344 unsigned long cost;
345 unsigned int segno;
347 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
348 if (segno >= last_segment) {
349 if (sm->last_victim[p.gc_mode]) {
350 last_segment =
351 sm->last_victim[p.gc_mode];
352 sm->last_victim[p.gc_mode] = 0;
353 p.offset = 0;
354 continue;
356 break;
359 p.offset = segno + p.ofs_unit;
360 if (p.ofs_unit > 1) {
361 p.offset -= segno % p.ofs_unit;
362 nsearched += count_bits(p.dirty_segmap,
363 p.offset - p.ofs_unit,
364 p.ofs_unit);
365 } else {
366 nsearched++;
369 secno = GET_SEC_FROM_SEG(sbi, segno);
371 if (sec_usage_check(sbi, secno))
372 goto next;
373 /* Don't touch checkpointed data */
374 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
375 get_ckpt_valid_blocks(sbi, segno)))
376 goto next;
377 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
378 goto next;
380 cost = get_gc_cost(sbi, segno, &p);
382 if (p.min_cost > cost) {
383 p.min_segno = segno;
384 p.min_cost = cost;
386 next:
387 if (nsearched >= p.max_search) {
388 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
389 sm->last_victim[p.gc_mode] = last_victim + 1;
390 else
391 sm->last_victim[p.gc_mode] = segno + 1;
392 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
393 break;
396 if (p.min_segno != NULL_SEGNO) {
397 got_it:
398 if (p.alloc_mode == LFS) {
399 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
400 if (gc_type == FG_GC)
401 sbi->cur_victim_sec = secno;
402 else
403 set_bit(secno, dirty_i->victim_secmap);
405 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
407 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
408 sbi->cur_victim_sec,
409 prefree_segments(sbi), free_segments(sbi));
411 out:
412 mutex_unlock(&dirty_i->seglist_lock);
414 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
417 static const struct victim_selection default_v_ops = {
418 .get_victim = get_victim_by_default,
421 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
423 struct inode_entry *ie;
425 ie = radix_tree_lookup(&gc_list->iroot, ino);
426 if (ie)
427 return ie->inode;
428 return NULL;
431 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
433 struct inode_entry *new_ie;
435 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
436 iput(inode);
437 return;
439 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
440 new_ie->inode = inode;
442 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
443 list_add_tail(&new_ie->list, &gc_list->ilist);
446 static void put_gc_inode(struct gc_inode_list *gc_list)
448 struct inode_entry *ie, *next_ie;
449 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
450 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
451 iput(ie->inode);
452 list_del(&ie->list);
453 kmem_cache_free(f2fs_inode_entry_slab, ie);
457 static int check_valid_map(struct f2fs_sb_info *sbi,
458 unsigned int segno, int offset)
460 struct sit_info *sit_i = SIT_I(sbi);
461 struct seg_entry *sentry;
462 int ret;
464 down_read(&sit_i->sentry_lock);
465 sentry = get_seg_entry(sbi, segno);
466 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
467 up_read(&sit_i->sentry_lock);
468 return ret;
472 * This function compares node address got in summary with that in NAT.
473 * On validity, copy that node with cold status, otherwise (invalid node)
474 * ignore that.
476 static int gc_node_segment(struct f2fs_sb_info *sbi,
477 struct f2fs_summary *sum, unsigned int segno, int gc_type)
479 struct f2fs_summary *entry;
480 block_t start_addr;
481 int off;
482 int phase = 0;
483 bool fggc = (gc_type == FG_GC);
484 int submitted = 0;
486 start_addr = START_BLOCK(sbi, segno);
488 next_step:
489 entry = sum;
491 if (fggc && phase == 2)
492 atomic_inc(&sbi->wb_sync_req[NODE]);
494 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
495 nid_t nid = le32_to_cpu(entry->nid);
496 struct page *node_page;
497 struct node_info ni;
498 int err;
500 /* stop BG_GC if there is not enough free sections. */
501 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
502 return submitted;
504 if (check_valid_map(sbi, segno, off) == 0)
505 continue;
507 if (phase == 0) {
508 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
509 META_NAT, true);
510 continue;
513 if (phase == 1) {
514 f2fs_ra_node_page(sbi, nid);
515 continue;
518 /* phase == 2 */
519 node_page = f2fs_get_node_page(sbi, nid);
520 if (IS_ERR(node_page))
521 continue;
523 /* block may become invalid during f2fs_get_node_page */
524 if (check_valid_map(sbi, segno, off) == 0) {
525 f2fs_put_page(node_page, 1);
526 continue;
529 if (f2fs_get_node_info(sbi, nid, &ni)) {
530 f2fs_put_page(node_page, 1);
531 continue;
534 if (ni.blk_addr != start_addr + off) {
535 f2fs_put_page(node_page, 1);
536 continue;
539 err = f2fs_move_node_page(node_page, gc_type);
540 if (!err && gc_type == FG_GC)
541 submitted++;
542 stat_inc_node_blk_count(sbi, 1, gc_type);
545 if (++phase < 3)
546 goto next_step;
548 if (fggc)
549 atomic_dec(&sbi->wb_sync_req[NODE]);
550 return submitted;
554 * Calculate start block index indicating the given node offset.
555 * Be careful, caller should give this node offset only indicating direct node
556 * blocks. If any node offsets, which point the other types of node blocks such
557 * as indirect or double indirect node blocks, are given, it must be a caller's
558 * bug.
560 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
562 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
563 unsigned int bidx;
565 if (node_ofs == 0)
566 return 0;
568 if (node_ofs <= 2) {
569 bidx = node_ofs - 1;
570 } else if (node_ofs <= indirect_blks) {
571 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
572 bidx = node_ofs - 2 - dec;
573 } else {
574 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
575 bidx = node_ofs - 5 - dec;
577 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
580 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
581 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
583 struct page *node_page;
584 nid_t nid;
585 unsigned int ofs_in_node;
586 block_t source_blkaddr;
588 nid = le32_to_cpu(sum->nid);
589 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
591 node_page = f2fs_get_node_page(sbi, nid);
592 if (IS_ERR(node_page))
593 return false;
595 if (f2fs_get_node_info(sbi, nid, dni)) {
596 f2fs_put_page(node_page, 1);
597 return false;
600 if (sum->version != dni->version) {
601 f2fs_msg(sbi->sb, KERN_WARNING,
602 "%s: valid data with mismatched node version.",
603 __func__);
604 set_sbi_flag(sbi, SBI_NEED_FSCK);
607 *nofs = ofs_of_node(node_page);
608 source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
609 f2fs_put_page(node_page, 1);
611 if (source_blkaddr != blkaddr)
612 return false;
613 return true;
616 static int ra_data_block(struct inode *inode, pgoff_t index)
618 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
619 struct address_space *mapping = inode->i_mapping;
620 struct dnode_of_data dn;
621 struct page *page;
622 struct extent_info ei = {0, 0, 0};
623 struct f2fs_io_info fio = {
624 .sbi = sbi,
625 .ino = inode->i_ino,
626 .type = DATA,
627 .temp = COLD,
628 .op = REQ_OP_READ,
629 .op_flags = 0,
630 .encrypted_page = NULL,
631 .in_list = false,
632 .retry = false,
634 int err;
636 page = f2fs_grab_cache_page(mapping, index, true);
637 if (!page)
638 return -ENOMEM;
640 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
641 dn.data_blkaddr = ei.blk + index - ei.fofs;
642 goto got_it;
645 set_new_dnode(&dn, inode, NULL, NULL, 0);
646 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
647 if (err)
648 goto put_page;
649 f2fs_put_dnode(&dn);
651 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
652 DATA_GENERIC))) {
653 err = -EFAULT;
654 goto put_page;
656 got_it:
657 /* read page */
658 fio.page = page;
659 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
661 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
662 dn.data_blkaddr,
663 FGP_LOCK | FGP_CREAT, GFP_NOFS);
664 if (!fio.encrypted_page) {
665 err = -ENOMEM;
666 goto put_page;
669 err = f2fs_submit_page_bio(&fio);
670 if (err)
671 goto put_encrypted_page;
672 f2fs_put_page(fio.encrypted_page, 0);
673 f2fs_put_page(page, 1);
674 return 0;
675 put_encrypted_page:
676 f2fs_put_page(fio.encrypted_page, 1);
677 put_page:
678 f2fs_put_page(page, 1);
679 return err;
683 * Move data block via META_MAPPING while keeping locked data page.
684 * This can be used to move blocks, aka LBAs, directly on disk.
686 static int move_data_block(struct inode *inode, block_t bidx,
687 int gc_type, unsigned int segno, int off)
689 struct f2fs_io_info fio = {
690 .sbi = F2FS_I_SB(inode),
691 .ino = inode->i_ino,
692 .type = DATA,
693 .temp = COLD,
694 .op = REQ_OP_READ,
695 .op_flags = 0,
696 .encrypted_page = NULL,
697 .in_list = false,
698 .retry = false,
700 struct dnode_of_data dn;
701 struct f2fs_summary sum;
702 struct node_info ni;
703 struct page *page, *mpage;
704 block_t newaddr;
705 int err = 0;
706 bool lfs_mode = test_opt(fio.sbi, LFS);
708 /* do not read out */
709 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
710 if (!page)
711 return -ENOMEM;
713 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
714 err = -ENOENT;
715 goto out;
718 if (f2fs_is_atomic_file(inode)) {
719 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
720 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
721 err = -EAGAIN;
722 goto out;
725 if (f2fs_is_pinned_file(inode)) {
726 f2fs_pin_file_control(inode, true);
727 err = -EAGAIN;
728 goto out;
731 set_new_dnode(&dn, inode, NULL, NULL, 0);
732 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
733 if (err)
734 goto out;
736 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
737 ClearPageUptodate(page);
738 err = -ENOENT;
739 goto put_out;
743 * don't cache encrypted data into meta inode until previous dirty
744 * data were writebacked to avoid racing between GC and flush.
746 f2fs_wait_on_page_writeback(page, DATA, true);
748 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
749 if (err)
750 goto put_out;
752 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
754 /* read page */
755 fio.page = page;
756 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
758 if (lfs_mode)
759 down_write(&fio.sbi->io_order_lock);
761 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
762 &sum, CURSEG_COLD_DATA, NULL, false);
764 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
765 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
766 if (!fio.encrypted_page) {
767 err = -ENOMEM;
768 goto recover_block;
771 mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
772 fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
773 if (mpage) {
774 bool updated = false;
776 if (PageUptodate(mpage)) {
777 memcpy(page_address(fio.encrypted_page),
778 page_address(mpage), PAGE_SIZE);
779 updated = true;
781 f2fs_put_page(mpage, 1);
782 invalidate_mapping_pages(META_MAPPING(fio.sbi),
783 fio.old_blkaddr, fio.old_blkaddr);
784 if (updated)
785 goto write_page;
788 err = f2fs_submit_page_bio(&fio);
789 if (err)
790 goto put_page_out;
792 /* write page */
793 lock_page(fio.encrypted_page);
795 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
796 err = -EIO;
797 goto put_page_out;
799 if (unlikely(!PageUptodate(fio.encrypted_page))) {
800 err = -EIO;
801 goto put_page_out;
804 write_page:
805 set_page_dirty(fio.encrypted_page);
806 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
807 if (clear_page_dirty_for_io(fio.encrypted_page))
808 dec_page_count(fio.sbi, F2FS_DIRTY_META);
810 set_page_writeback(fio.encrypted_page);
811 ClearPageError(page);
813 /* allocate block address */
814 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
816 fio.op = REQ_OP_WRITE;
817 fio.op_flags = REQ_SYNC;
818 fio.new_blkaddr = newaddr;
819 f2fs_submit_page_write(&fio);
820 if (fio.retry) {
821 err = -EAGAIN;
822 if (PageWriteback(fio.encrypted_page))
823 end_page_writeback(fio.encrypted_page);
824 goto put_page_out;
827 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
829 f2fs_update_data_blkaddr(&dn, newaddr);
830 set_inode_flag(inode, FI_APPEND_WRITE);
831 if (page->index == 0)
832 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
833 put_page_out:
834 f2fs_put_page(fio.encrypted_page, 1);
835 recover_block:
836 if (lfs_mode)
837 up_write(&fio.sbi->io_order_lock);
838 if (err)
839 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
840 true, true);
841 put_out:
842 f2fs_put_dnode(&dn);
843 out:
844 f2fs_put_page(page, 1);
845 return err;
848 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
849 unsigned int segno, int off)
851 struct page *page;
852 int err = 0;
854 page = f2fs_get_lock_data_page(inode, bidx, true);
855 if (IS_ERR(page))
856 return PTR_ERR(page);
858 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
859 err = -ENOENT;
860 goto out;
863 if (f2fs_is_atomic_file(inode)) {
864 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
865 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
866 err = -EAGAIN;
867 goto out;
869 if (f2fs_is_pinned_file(inode)) {
870 if (gc_type == FG_GC)
871 f2fs_pin_file_control(inode, true);
872 err = -EAGAIN;
873 goto out;
876 if (gc_type == BG_GC) {
877 if (PageWriteback(page)) {
878 err = -EAGAIN;
879 goto out;
881 set_page_dirty(page);
882 set_cold_data(page);
883 } else {
884 struct f2fs_io_info fio = {
885 .sbi = F2FS_I_SB(inode),
886 .ino = inode->i_ino,
887 .type = DATA,
888 .temp = COLD,
889 .op = REQ_OP_WRITE,
890 .op_flags = REQ_SYNC,
891 .old_blkaddr = NULL_ADDR,
892 .page = page,
893 .encrypted_page = NULL,
894 .need_lock = LOCK_REQ,
895 .io_type = FS_GC_DATA_IO,
897 bool is_dirty = PageDirty(page);
899 retry:
900 set_page_dirty(page);
901 f2fs_wait_on_page_writeback(page, DATA, true);
902 if (clear_page_dirty_for_io(page)) {
903 inode_dec_dirty_pages(inode);
904 f2fs_remove_dirty_inode(inode);
907 set_cold_data(page);
909 err = f2fs_do_write_data_page(&fio);
910 if (err) {
911 clear_cold_data(page);
912 if (err == -ENOMEM) {
913 congestion_wait(BLK_RW_ASYNC, HZ/50);
914 goto retry;
916 if (is_dirty)
917 set_page_dirty(page);
920 out:
921 f2fs_put_page(page, 1);
922 return err;
926 * This function tries to get parent node of victim data block, and identifies
927 * data block validity. If the block is valid, copy that with cold status and
928 * modify parent node.
929 * If the parent node is not valid or the data block address is different,
930 * the victim data block is ignored.
932 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
933 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
935 struct super_block *sb = sbi->sb;
936 struct f2fs_summary *entry;
937 block_t start_addr;
938 int off;
939 int phase = 0;
940 int submitted = 0;
942 start_addr = START_BLOCK(sbi, segno);
944 next_step:
945 entry = sum;
947 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
948 struct page *data_page;
949 struct inode *inode;
950 struct node_info dni; /* dnode info for the data */
951 unsigned int ofs_in_node, nofs;
952 block_t start_bidx;
953 nid_t nid = le32_to_cpu(entry->nid);
955 /* stop BG_GC if there is not enough free sections. */
956 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
957 return submitted;
959 if (check_valid_map(sbi, segno, off) == 0)
960 continue;
962 if (phase == 0) {
963 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
964 META_NAT, true);
965 continue;
968 if (phase == 1) {
969 f2fs_ra_node_page(sbi, nid);
970 continue;
973 /* Get an inode by ino with checking validity */
974 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
975 continue;
977 if (phase == 2) {
978 f2fs_ra_node_page(sbi, dni.ino);
979 continue;
982 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
984 if (phase == 3) {
985 inode = f2fs_iget(sb, dni.ino);
986 if (IS_ERR(inode) || is_bad_inode(inode))
987 continue;
989 if (!down_write_trylock(
990 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
991 iput(inode);
992 sbi->skipped_gc_rwsem++;
993 continue;
996 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
997 ofs_in_node;
999 if (f2fs_post_read_required(inode)) {
1000 int err = ra_data_block(inode, start_bidx);
1002 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1003 if (err) {
1004 iput(inode);
1005 continue;
1007 add_gc_inode(gc_list, inode);
1008 continue;
1011 data_page = f2fs_get_read_data_page(inode,
1012 start_bidx, REQ_RAHEAD, true);
1013 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1014 if (IS_ERR(data_page)) {
1015 iput(inode);
1016 continue;
1019 f2fs_put_page(data_page, 0);
1020 add_gc_inode(gc_list, inode);
1021 continue;
1024 /* phase 4 */
1025 inode = find_gc_inode(gc_list, dni.ino);
1026 if (inode) {
1027 struct f2fs_inode_info *fi = F2FS_I(inode);
1028 bool locked = false;
1029 int err;
1031 if (S_ISREG(inode->i_mode)) {
1032 if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
1033 continue;
1034 if (!down_write_trylock(
1035 &fi->i_gc_rwsem[WRITE])) {
1036 sbi->skipped_gc_rwsem++;
1037 up_write(&fi->i_gc_rwsem[READ]);
1038 continue;
1040 locked = true;
1042 /* wait for all inflight aio data */
1043 inode_dio_wait(inode);
1046 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1047 + ofs_in_node;
1048 if (f2fs_post_read_required(inode))
1049 err = move_data_block(inode, start_bidx,
1050 gc_type, segno, off);
1051 else
1052 err = move_data_page(inode, start_bidx, gc_type,
1053 segno, off);
1055 if (!err && (gc_type == FG_GC ||
1056 f2fs_post_read_required(inode)))
1057 submitted++;
1059 if (locked) {
1060 up_write(&fi->i_gc_rwsem[WRITE]);
1061 up_write(&fi->i_gc_rwsem[READ]);
1064 stat_inc_data_blk_count(sbi, 1, gc_type);
1068 if (++phase < 5)
1069 goto next_step;
1071 return submitted;
1074 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1075 int gc_type)
1077 struct sit_info *sit_i = SIT_I(sbi);
1078 int ret;
1080 down_write(&sit_i->sentry_lock);
1081 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1082 NO_CHECK_TYPE, LFS);
1083 up_write(&sit_i->sentry_lock);
1084 return ret;
1087 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1088 unsigned int start_segno,
1089 struct gc_inode_list *gc_list, int gc_type)
1091 struct page *sum_page;
1092 struct f2fs_summary_block *sum;
1093 struct blk_plug plug;
1094 unsigned int segno = start_segno;
1095 unsigned int end_segno = start_segno + sbi->segs_per_sec;
1096 int seg_freed = 0;
1097 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1098 SUM_TYPE_DATA : SUM_TYPE_NODE;
1099 int submitted = 0;
1101 /* readahead multi ssa blocks those have contiguous address */
1102 if (sbi->segs_per_sec > 1)
1103 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1104 sbi->segs_per_sec, META_SSA, true);
1106 /* reference all summary page */
1107 while (segno < end_segno) {
1108 sum_page = f2fs_get_sum_page(sbi, segno++);
1109 if (IS_ERR(sum_page)) {
1110 int err = PTR_ERR(sum_page);
1112 end_segno = segno - 1;
1113 for (segno = start_segno; segno < end_segno; segno++) {
1114 sum_page = find_get_page(META_MAPPING(sbi),
1115 GET_SUM_BLOCK(sbi, segno));
1116 f2fs_put_page(sum_page, 0);
1117 f2fs_put_page(sum_page, 0);
1119 return err;
1121 unlock_page(sum_page);
1124 blk_start_plug(&plug);
1126 for (segno = start_segno; segno < end_segno; segno++) {
1128 /* find segment summary of victim */
1129 sum_page = find_get_page(META_MAPPING(sbi),
1130 GET_SUM_BLOCK(sbi, segno));
1131 f2fs_put_page(sum_page, 0);
1133 if (get_valid_blocks(sbi, segno, false) == 0 ||
1134 !PageUptodate(sum_page) ||
1135 unlikely(f2fs_cp_error(sbi)))
1136 goto next;
1138 sum = page_address(sum_page);
1139 if (type != GET_SUM_TYPE((&sum->footer))) {
1140 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
1141 "type [%d, %d] in SSA and SIT",
1142 segno, type, GET_SUM_TYPE((&sum->footer)));
1143 set_sbi_flag(sbi, SBI_NEED_FSCK);
1144 goto next;
1148 * this is to avoid deadlock:
1149 * - lock_page(sum_page) - f2fs_replace_block
1150 * - check_valid_map() - down_write(sentry_lock)
1151 * - down_read(sentry_lock) - change_curseg()
1152 * - lock_page(sum_page)
1154 if (type == SUM_TYPE_NODE)
1155 submitted += gc_node_segment(sbi, sum->entries, segno,
1156 gc_type);
1157 else
1158 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1159 segno, gc_type);
1161 stat_inc_seg_count(sbi, type, gc_type);
1163 if (gc_type == FG_GC &&
1164 get_valid_blocks(sbi, segno, false) == 0)
1165 seg_freed++;
1166 next:
1167 f2fs_put_page(sum_page, 0);
1170 if (submitted)
1171 f2fs_submit_merged_write(sbi,
1172 (type == SUM_TYPE_NODE) ? NODE : DATA);
1174 blk_finish_plug(&plug);
1176 stat_inc_call_count(sbi->stat_info);
1178 return seg_freed;
1181 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1182 bool background, unsigned int segno)
1184 int gc_type = sync ? FG_GC : BG_GC;
1185 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1186 int ret = 0;
1187 struct cp_control cpc;
1188 unsigned int init_segno = segno;
1189 struct gc_inode_list gc_list = {
1190 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1191 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1193 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1194 unsigned long long first_skipped;
1195 unsigned int skipped_round = 0, round = 0;
1197 trace_f2fs_gc_begin(sbi->sb, sync, background,
1198 get_pages(sbi, F2FS_DIRTY_NODES),
1199 get_pages(sbi, F2FS_DIRTY_DENTS),
1200 get_pages(sbi, F2FS_DIRTY_IMETA),
1201 free_sections(sbi),
1202 free_segments(sbi),
1203 reserved_segments(sbi),
1204 prefree_segments(sbi));
1206 cpc.reason = __get_cp_reason(sbi);
1207 sbi->skipped_gc_rwsem = 0;
1208 first_skipped = last_skipped;
1209 gc_more:
1210 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1211 ret = -EINVAL;
1212 goto stop;
1214 if (unlikely(f2fs_cp_error(sbi))) {
1215 ret = -EIO;
1216 goto stop;
1219 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1221 * For example, if there are many prefree_segments below given
1222 * threshold, we can make them free by checkpoint. Then, we
1223 * secure free segments which doesn't need fggc any more.
1225 if (prefree_segments(sbi) &&
1226 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1227 ret = f2fs_write_checkpoint(sbi, &cpc);
1228 if (ret)
1229 goto stop;
1231 if (has_not_enough_free_secs(sbi, 0, 0))
1232 gc_type = FG_GC;
1235 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1236 if (gc_type == BG_GC && !background) {
1237 ret = -EINVAL;
1238 goto stop;
1240 if (!__get_victim(sbi, &segno, gc_type)) {
1241 ret = -ENODATA;
1242 goto stop;
1245 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1246 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1247 sec_freed++;
1248 total_freed += seg_freed;
1250 if (gc_type == FG_GC) {
1251 if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1252 sbi->skipped_gc_rwsem)
1253 skipped_round++;
1254 last_skipped = sbi->skipped_atomic_files[FG_GC];
1255 round++;
1258 if (gc_type == FG_GC)
1259 sbi->cur_victim_sec = NULL_SEGNO;
1261 if (sync)
1262 goto stop;
1264 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1265 if (skipped_round <= MAX_SKIP_GC_COUNT ||
1266 skipped_round * 2 < round) {
1267 segno = NULL_SEGNO;
1268 goto gc_more;
1271 if (first_skipped < last_skipped &&
1272 (last_skipped - first_skipped) >
1273 sbi->skipped_gc_rwsem) {
1274 f2fs_drop_inmem_pages_all(sbi, true);
1275 segno = NULL_SEGNO;
1276 goto gc_more;
1278 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1279 ret = f2fs_write_checkpoint(sbi, &cpc);
1281 stop:
1282 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1283 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1285 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1286 get_pages(sbi, F2FS_DIRTY_NODES),
1287 get_pages(sbi, F2FS_DIRTY_DENTS),
1288 get_pages(sbi, F2FS_DIRTY_IMETA),
1289 free_sections(sbi),
1290 free_segments(sbi),
1291 reserved_segments(sbi),
1292 prefree_segments(sbi));
1294 mutex_unlock(&sbi->gc_mutex);
1296 put_gc_inode(&gc_list);
1298 if (sync && !ret)
1299 ret = sec_freed ? 0 : -EAGAIN;
1300 return ret;
1303 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1305 DIRTY_I(sbi)->v_ops = &default_v_ops;
1307 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1309 /* give warm/cold data area from slower device */
1310 if (sbi->s_ndevs && sbi->segs_per_sec == 1)
1311 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1312 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;