4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
24 #include <trace/events/f2fs.h>
26 static int gc_thread_func(void *data
)
28 struct f2fs_sb_info
*sbi
= data
;
29 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
30 wait_queue_head_t
*wq
= &sbi
->gc_thread
->gc_wait_queue_head
;
33 wait_ms
= gc_th
->min_sleep_time
;
37 wait_event_interruptible_timeout(*wq
,
38 kthread_should_stop() || freezing(current
) ||
40 msecs_to_jiffies(wait_ms
));
42 /* give it a try one time */
48 if (kthread_should_stop())
51 if (sbi
->sb
->s_writers
.frozen
>= SB_FREEZE_WRITE
) {
52 increase_sleep_time(gc_th
, &wait_ms
);
56 #ifdef CONFIG_F2FS_FAULT_INJECTION
57 if (time_to_inject(sbi
, FAULT_CHECKPOINT
)) {
58 f2fs_show_injection_info(FAULT_CHECKPOINT
);
59 f2fs_stop_checkpoint(sbi
, false);
63 if (!sb_start_write_trylock(sbi
->sb
))
67 * [GC triggering condition]
68 * 0. GC is not conducted currently.
69 * 1. There are enough dirty segments.
70 * 2. IO subsystem is idle by checking the # of writeback pages.
71 * 3. IO subsystem is idle by checking the # of requests in
72 * bdev's request list.
74 * Note) We have to avoid triggering GCs frequently.
75 * Because it is possible that some segments can be
76 * invalidated soon after by user update or deletion.
77 * So, I'd like to wait some time to collect dirty segments.
79 if (sbi
->gc_mode
== GC_URGENT
) {
80 wait_ms
= gc_th
->urgent_sleep_time
;
81 mutex_lock(&sbi
->gc_mutex
);
85 if (!mutex_trylock(&sbi
->gc_mutex
))
89 increase_sleep_time(gc_th
, &wait_ms
);
90 mutex_unlock(&sbi
->gc_mutex
);
94 if (has_enough_invalid_blocks(sbi
))
95 decrease_sleep_time(gc_th
, &wait_ms
);
97 increase_sleep_time(gc_th
, &wait_ms
);
99 stat_inc_bggc_count(sbi
);
101 /* if return value is not zero, no victim was selected */
102 if (f2fs_gc(sbi
, test_opt(sbi
, FORCE_FG_GC
), true, NULL_SEGNO
))
103 wait_ms
= gc_th
->no_gc_sleep_time
;
105 trace_f2fs_background_gc(sbi
->sb
, wait_ms
,
106 prefree_segments(sbi
), free_segments(sbi
));
108 /* balancing f2fs's metadata periodically */
109 f2fs_balance_fs_bg(sbi
);
111 sb_end_write(sbi
->sb
);
113 } while (!kthread_should_stop());
117 int f2fs_start_gc_thread(struct f2fs_sb_info
*sbi
)
119 struct f2fs_gc_kthread
*gc_th
;
120 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
123 gc_th
= f2fs_kmalloc(sbi
, sizeof(struct f2fs_gc_kthread
), GFP_KERNEL
);
129 gc_th
->urgent_sleep_time
= DEF_GC_THREAD_URGENT_SLEEP_TIME
;
130 gc_th
->min_sleep_time
= DEF_GC_THREAD_MIN_SLEEP_TIME
;
131 gc_th
->max_sleep_time
= DEF_GC_THREAD_MAX_SLEEP_TIME
;
132 gc_th
->no_gc_sleep_time
= DEF_GC_THREAD_NOGC_SLEEP_TIME
;
136 sbi
->gc_thread
= gc_th
;
137 init_waitqueue_head(&sbi
->gc_thread
->gc_wait_queue_head
);
138 sbi
->gc_thread
->f2fs_gc_task
= kthread_run(gc_thread_func
, sbi
,
139 "f2fs_gc-%u:%u", MAJOR(dev
), MINOR(dev
));
140 if (IS_ERR(gc_th
->f2fs_gc_task
)) {
141 err
= PTR_ERR(gc_th
->f2fs_gc_task
);
143 sbi
->gc_thread
= NULL
;
149 void f2fs_stop_gc_thread(struct f2fs_sb_info
*sbi
)
151 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
154 kthread_stop(gc_th
->f2fs_gc_task
);
156 sbi
->gc_thread
= NULL
;
159 static int select_gc_type(struct f2fs_sb_info
*sbi
, int gc_type
)
161 int gc_mode
= (gc_type
== BG_GC
) ? GC_CB
: GC_GREEDY
;
163 switch (sbi
->gc_mode
) {
175 static void select_policy(struct f2fs_sb_info
*sbi
, int gc_type
,
176 int type
, struct victim_sel_policy
*p
)
178 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
180 if (p
->alloc_mode
== SSR
) {
181 p
->gc_mode
= GC_GREEDY
;
182 p
->dirty_segmap
= dirty_i
->dirty_segmap
[type
];
183 p
->max_search
= dirty_i
->nr_dirty
[type
];
186 p
->gc_mode
= select_gc_type(sbi
, gc_type
);
187 p
->dirty_segmap
= dirty_i
->dirty_segmap
[DIRTY
];
188 p
->max_search
= dirty_i
->nr_dirty
[DIRTY
];
189 p
->ofs_unit
= sbi
->segs_per_sec
;
192 /* we need to check every dirty segments in the FG_GC case */
193 if (gc_type
!= FG_GC
&&
194 (sbi
->gc_mode
!= GC_URGENT
) &&
195 p
->max_search
> sbi
->max_victim_search
)
196 p
->max_search
= sbi
->max_victim_search
;
198 /* let's select beginning hot/small space first in no_heap mode*/
199 if (test_opt(sbi
, NOHEAP
) &&
200 (type
== CURSEG_HOT_DATA
|| IS_NODESEG(type
)))
203 p
->offset
= SIT_I(sbi
)->last_victim
[p
->gc_mode
];
206 static unsigned int get_max_cost(struct f2fs_sb_info
*sbi
,
207 struct victim_sel_policy
*p
)
209 /* SSR allocates in a segment unit */
210 if (p
->alloc_mode
== SSR
)
211 return sbi
->blocks_per_seg
;
212 if (p
->gc_mode
== GC_GREEDY
)
213 return 2 * sbi
->blocks_per_seg
* p
->ofs_unit
;
214 else if (p
->gc_mode
== GC_CB
)
216 else /* No other gc_mode */
220 static unsigned int check_bg_victims(struct f2fs_sb_info
*sbi
)
222 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
226 * If the gc_type is FG_GC, we can select victim segments
227 * selected by background GC before.
228 * Those segments guarantee they have small valid blocks.
230 for_each_set_bit(secno
, dirty_i
->victim_secmap
, MAIN_SECS(sbi
)) {
231 if (sec_usage_check(sbi
, secno
))
233 clear_bit(secno
, dirty_i
->victim_secmap
);
234 return GET_SEG_FROM_SEC(sbi
, secno
);
239 static unsigned int get_cb_cost(struct f2fs_sb_info
*sbi
, unsigned int segno
)
241 struct sit_info
*sit_i
= SIT_I(sbi
);
242 unsigned int secno
= GET_SEC_FROM_SEG(sbi
, segno
);
243 unsigned int start
= GET_SEG_FROM_SEC(sbi
, secno
);
244 unsigned long long mtime
= 0;
245 unsigned int vblocks
;
246 unsigned char age
= 0;
250 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
251 mtime
+= get_seg_entry(sbi
, start
+ i
)->mtime
;
252 vblocks
= get_valid_blocks(sbi
, segno
, true);
254 mtime
= div_u64(mtime
, sbi
->segs_per_sec
);
255 vblocks
= div_u64(vblocks
, sbi
->segs_per_sec
);
257 u
= (vblocks
* 100) >> sbi
->log_blocks_per_seg
;
259 /* Handle if the system time has changed by the user */
260 if (mtime
< sit_i
->min_mtime
)
261 sit_i
->min_mtime
= mtime
;
262 if (mtime
> sit_i
->max_mtime
)
263 sit_i
->max_mtime
= mtime
;
264 if (sit_i
->max_mtime
!= sit_i
->min_mtime
)
265 age
= 100 - div64_u64(100 * (mtime
- sit_i
->min_mtime
),
266 sit_i
->max_mtime
- sit_i
->min_mtime
);
268 return UINT_MAX
- ((100 * (100 - u
) * age
) / (100 + u
));
271 static inline unsigned int get_gc_cost(struct f2fs_sb_info
*sbi
,
272 unsigned int segno
, struct victim_sel_policy
*p
)
274 if (p
->alloc_mode
== SSR
)
275 return get_seg_entry(sbi
, segno
)->ckpt_valid_blocks
;
277 /* alloc_mode == LFS */
278 if (p
->gc_mode
== GC_GREEDY
)
279 return get_valid_blocks(sbi
, segno
, true);
281 return get_cb_cost(sbi
, segno
);
284 static unsigned int count_bits(const unsigned long *addr
,
285 unsigned int offset
, unsigned int len
)
287 unsigned int end
= offset
+ len
, sum
= 0;
289 while (offset
< end
) {
290 if (test_bit(offset
++, addr
))
297 * This function is called from two paths.
298 * One is garbage collection and the other is SSR segment selection.
299 * When it is called during GC, it just gets a victim segment
300 * and it does not remove it from dirty seglist.
301 * When it is called from SSR segment selection, it finds a segment
302 * which has minimum valid blocks and removes it from dirty seglist.
304 static int get_victim_by_default(struct f2fs_sb_info
*sbi
,
305 unsigned int *result
, int gc_type
, int type
, char alloc_mode
)
307 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
308 struct sit_info
*sm
= SIT_I(sbi
);
309 struct victim_sel_policy p
;
310 unsigned int secno
, last_victim
;
311 unsigned int last_segment
= MAIN_SEGS(sbi
);
312 unsigned int nsearched
= 0;
314 mutex_lock(&dirty_i
->seglist_lock
);
316 p
.alloc_mode
= alloc_mode
;
317 select_policy(sbi
, gc_type
, type
, &p
);
319 p
.min_segno
= NULL_SEGNO
;
320 p
.min_cost
= get_max_cost(sbi
, &p
);
322 if (*result
!= NULL_SEGNO
) {
323 if (IS_DATASEG(get_seg_entry(sbi
, *result
)->type
) &&
324 get_valid_blocks(sbi
, *result
, false) &&
325 !sec_usage_check(sbi
, GET_SEC_FROM_SEG(sbi
, *result
)))
326 p
.min_segno
= *result
;
330 if (p
.max_search
== 0)
333 last_victim
= sm
->last_victim
[p
.gc_mode
];
334 if (p
.alloc_mode
== LFS
&& gc_type
== FG_GC
) {
335 p
.min_segno
= check_bg_victims(sbi
);
336 if (p
.min_segno
!= NULL_SEGNO
)
344 segno
= find_next_bit(p
.dirty_segmap
, last_segment
, p
.offset
);
345 if (segno
>= last_segment
) {
346 if (sm
->last_victim
[p
.gc_mode
]) {
348 sm
->last_victim
[p
.gc_mode
];
349 sm
->last_victim
[p
.gc_mode
] = 0;
356 p
.offset
= segno
+ p
.ofs_unit
;
357 if (p
.ofs_unit
> 1) {
358 p
.offset
-= segno
% p
.ofs_unit
;
359 nsearched
+= count_bits(p
.dirty_segmap
,
360 p
.offset
- p
.ofs_unit
,
366 secno
= GET_SEC_FROM_SEG(sbi
, segno
);
368 if (sec_usage_check(sbi
, secno
))
370 if (gc_type
== BG_GC
&& test_bit(secno
, dirty_i
->victim_secmap
))
373 cost
= get_gc_cost(sbi
, segno
, &p
);
375 if (p
.min_cost
> cost
) {
380 if (nsearched
>= p
.max_search
) {
381 if (!sm
->last_victim
[p
.gc_mode
] && segno
<= last_victim
)
382 sm
->last_victim
[p
.gc_mode
] = last_victim
+ 1;
384 sm
->last_victim
[p
.gc_mode
] = segno
+ 1;
385 sm
->last_victim
[p
.gc_mode
] %= MAIN_SEGS(sbi
);
389 if (p
.min_segno
!= NULL_SEGNO
) {
391 if (p
.alloc_mode
== LFS
) {
392 secno
= GET_SEC_FROM_SEG(sbi
, p
.min_segno
);
393 if (gc_type
== FG_GC
)
394 sbi
->cur_victim_sec
= secno
;
396 set_bit(secno
, dirty_i
->victim_secmap
);
398 *result
= (p
.min_segno
/ p
.ofs_unit
) * p
.ofs_unit
;
400 trace_f2fs_get_victim(sbi
->sb
, type
, gc_type
, &p
,
402 prefree_segments(sbi
), free_segments(sbi
));
405 mutex_unlock(&dirty_i
->seglist_lock
);
407 return (p
.min_segno
== NULL_SEGNO
) ? 0 : 1;
410 static const struct victim_selection default_v_ops
= {
411 .get_victim
= get_victim_by_default
,
414 static struct inode
*find_gc_inode(struct gc_inode_list
*gc_list
, nid_t ino
)
416 struct inode_entry
*ie
;
418 ie
= radix_tree_lookup(&gc_list
->iroot
, ino
);
424 static void add_gc_inode(struct gc_inode_list
*gc_list
, struct inode
*inode
)
426 struct inode_entry
*new_ie
;
428 if (inode
== find_gc_inode(gc_list
, inode
->i_ino
)) {
432 new_ie
= f2fs_kmem_cache_alloc(f2fs_inode_entry_slab
, GFP_NOFS
);
433 new_ie
->inode
= inode
;
435 f2fs_radix_tree_insert(&gc_list
->iroot
, inode
->i_ino
, new_ie
);
436 list_add_tail(&new_ie
->list
, &gc_list
->ilist
);
439 static void put_gc_inode(struct gc_inode_list
*gc_list
)
441 struct inode_entry
*ie
, *next_ie
;
442 list_for_each_entry_safe(ie
, next_ie
, &gc_list
->ilist
, list
) {
443 radix_tree_delete(&gc_list
->iroot
, ie
->inode
->i_ino
);
446 kmem_cache_free(f2fs_inode_entry_slab
, ie
);
450 static int check_valid_map(struct f2fs_sb_info
*sbi
,
451 unsigned int segno
, int offset
)
453 struct sit_info
*sit_i
= SIT_I(sbi
);
454 struct seg_entry
*sentry
;
457 down_read(&sit_i
->sentry_lock
);
458 sentry
= get_seg_entry(sbi
, segno
);
459 ret
= f2fs_test_bit(offset
, sentry
->cur_valid_map
);
460 up_read(&sit_i
->sentry_lock
);
465 * This function compares node address got in summary with that in NAT.
466 * On validity, copy that node with cold status, otherwise (invalid node)
469 static void gc_node_segment(struct f2fs_sb_info
*sbi
,
470 struct f2fs_summary
*sum
, unsigned int segno
, int gc_type
)
472 struct f2fs_summary
*entry
;
476 bool fggc
= (gc_type
== FG_GC
);
478 start_addr
= START_BLOCK(sbi
, segno
);
483 if (fggc
&& phase
== 2)
484 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
486 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
487 nid_t nid
= le32_to_cpu(entry
->nid
);
488 struct page
*node_page
;
491 /* stop BG_GC if there is not enough free sections. */
492 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0))
495 if (check_valid_map(sbi
, segno
, off
) == 0)
499 f2fs_ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), 1,
505 f2fs_ra_node_page(sbi
, nid
);
510 node_page
= f2fs_get_node_page(sbi
, nid
);
511 if (IS_ERR(node_page
))
514 /* block may become invalid during f2fs_get_node_page */
515 if (check_valid_map(sbi
, segno
, off
) == 0) {
516 f2fs_put_page(node_page
, 1);
520 f2fs_get_node_info(sbi
, nid
, &ni
);
521 if (ni
.blk_addr
!= start_addr
+ off
) {
522 f2fs_put_page(node_page
, 1);
526 f2fs_move_node_page(node_page
, gc_type
);
527 stat_inc_node_blk_count(sbi
, 1, gc_type
);
534 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
538 * Calculate start block index indicating the given node offset.
539 * Be careful, caller should give this node offset only indicating direct node
540 * blocks. If any node offsets, which point the other types of node blocks such
541 * as indirect or double indirect node blocks, are given, it must be a caller's
544 block_t
f2fs_start_bidx_of_node(unsigned int node_ofs
, struct inode
*inode
)
546 unsigned int indirect_blks
= 2 * NIDS_PER_BLOCK
+ 4;
554 } else if (node_ofs
<= indirect_blks
) {
555 int dec
= (node_ofs
- 4) / (NIDS_PER_BLOCK
+ 1);
556 bidx
= node_ofs
- 2 - dec
;
558 int dec
= (node_ofs
- indirect_blks
- 3) / (NIDS_PER_BLOCK
+ 1);
559 bidx
= node_ofs
- 5 - dec
;
561 return bidx
* ADDRS_PER_BLOCK
+ ADDRS_PER_INODE(inode
);
564 static bool is_alive(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
565 struct node_info
*dni
, block_t blkaddr
, unsigned int *nofs
)
567 struct page
*node_page
;
569 unsigned int ofs_in_node
;
570 block_t source_blkaddr
;
572 nid
= le32_to_cpu(sum
->nid
);
573 ofs_in_node
= le16_to_cpu(sum
->ofs_in_node
);
575 node_page
= f2fs_get_node_page(sbi
, nid
);
576 if (IS_ERR(node_page
))
579 f2fs_get_node_info(sbi
, nid
, dni
);
581 if (sum
->version
!= dni
->version
) {
582 f2fs_msg(sbi
->sb
, KERN_WARNING
,
583 "%s: valid data with mismatched node version.",
585 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
588 *nofs
= ofs_of_node(node_page
);
589 source_blkaddr
= datablock_addr(NULL
, node_page
, ofs_in_node
);
590 f2fs_put_page(node_page
, 1);
592 if (source_blkaddr
!= blkaddr
)
598 * Move data block via META_MAPPING while keeping locked data page.
599 * This can be used to move blocks, aka LBAs, directly on disk.
601 static void move_data_block(struct inode
*inode
, block_t bidx
,
602 int gc_type
, unsigned int segno
, int off
)
604 struct f2fs_io_info fio
= {
605 .sbi
= F2FS_I_SB(inode
),
611 .encrypted_page
= NULL
,
615 struct dnode_of_data dn
;
616 struct f2fs_summary sum
;
621 bool lfs_mode
= test_opt(fio
.sbi
, LFS
);
623 /* do not read out */
624 page
= f2fs_grab_cache_page(inode
->i_mapping
, bidx
, false);
628 if (!check_valid_map(F2FS_I_SB(inode
), segno
, off
))
631 if (f2fs_is_atomic_file(inode
)) {
632 F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_ATOMIC
]++;
633 F2FS_I_SB(inode
)->skipped_atomic_files
[gc_type
]++;
637 if (f2fs_is_pinned_file(inode
)) {
638 f2fs_pin_file_control(inode
, true);
642 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
643 err
= f2fs_get_dnode_of_data(&dn
, bidx
, LOOKUP_NODE
);
647 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
648 ClearPageUptodate(page
);
653 * don't cache encrypted data into meta inode until previous dirty
654 * data were writebacked to avoid racing between GC and flush.
656 f2fs_wait_on_page_writeback(page
, DATA
, true);
658 f2fs_get_node_info(fio
.sbi
, dn
.nid
, &ni
);
659 set_summary(&sum
, dn
.nid
, dn
.ofs_in_node
, ni
.version
);
663 fio
.new_blkaddr
= fio
.old_blkaddr
= dn
.data_blkaddr
;
666 down_write(&fio
.sbi
->io_order_lock
);
668 f2fs_allocate_data_block(fio
.sbi
, NULL
, fio
.old_blkaddr
, &newaddr
,
669 &sum
, CURSEG_COLD_DATA
, NULL
, false);
671 fio
.encrypted_page
= f2fs_pagecache_get_page(META_MAPPING(fio
.sbi
),
672 newaddr
, FGP_LOCK
| FGP_CREAT
, GFP_NOFS
);
673 if (!fio
.encrypted_page
) {
678 err
= f2fs_submit_page_bio(&fio
);
683 lock_page(fio
.encrypted_page
);
685 if (unlikely(fio
.encrypted_page
->mapping
!= META_MAPPING(fio
.sbi
))) {
689 if (unlikely(!PageUptodate(fio
.encrypted_page
))) {
694 set_page_dirty(fio
.encrypted_page
);
695 f2fs_wait_on_page_writeback(fio
.encrypted_page
, DATA
, true);
696 if (clear_page_dirty_for_io(fio
.encrypted_page
))
697 dec_page_count(fio
.sbi
, F2FS_DIRTY_META
);
699 set_page_writeback(fio
.encrypted_page
);
700 ClearPageError(page
);
702 /* allocate block address */
703 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
, true);
705 fio
.op
= REQ_OP_WRITE
;
706 fio
.op_flags
= REQ_SYNC
;
707 fio
.new_blkaddr
= newaddr
;
708 f2fs_submit_page_write(&fio
);
710 if (PageWriteback(fio
.encrypted_page
))
711 end_page_writeback(fio
.encrypted_page
);
715 f2fs_update_iostat(fio
.sbi
, FS_GC_DATA_IO
, F2FS_BLKSIZE
);
717 f2fs_update_data_blkaddr(&dn
, newaddr
);
718 set_inode_flag(inode
, FI_APPEND_WRITE
);
719 if (page
->index
== 0)
720 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
722 f2fs_put_page(fio
.encrypted_page
, 1);
725 up_write(&fio
.sbi
->io_order_lock
);
727 f2fs_do_replace_block(fio
.sbi
, &sum
, newaddr
, fio
.old_blkaddr
,
732 f2fs_put_page(page
, 1);
735 static void move_data_page(struct inode
*inode
, block_t bidx
, int gc_type
,
736 unsigned int segno
, int off
)
740 page
= f2fs_get_lock_data_page(inode
, bidx
, true);
744 if (!check_valid_map(F2FS_I_SB(inode
), segno
, off
))
747 if (f2fs_is_atomic_file(inode
)) {
748 F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_ATOMIC
]++;
749 F2FS_I_SB(inode
)->skipped_atomic_files
[gc_type
]++;
752 if (f2fs_is_pinned_file(inode
)) {
753 if (gc_type
== FG_GC
)
754 f2fs_pin_file_control(inode
, true);
758 if (gc_type
== BG_GC
) {
759 if (PageWriteback(page
))
761 set_page_dirty(page
);
764 struct f2fs_io_info fio
= {
765 .sbi
= F2FS_I_SB(inode
),
770 .op_flags
= REQ_SYNC
,
771 .old_blkaddr
= NULL_ADDR
,
773 .encrypted_page
= NULL
,
774 .need_lock
= LOCK_REQ
,
775 .io_type
= FS_GC_DATA_IO
,
777 bool is_dirty
= PageDirty(page
);
781 set_page_dirty(page
);
782 f2fs_wait_on_page_writeback(page
, DATA
, true);
783 if (clear_page_dirty_for_io(page
)) {
784 inode_dec_dirty_pages(inode
);
785 f2fs_remove_dirty_inode(inode
);
790 err
= f2fs_do_write_data_page(&fio
);
792 clear_cold_data(page
);
793 if (err
== -ENOMEM
) {
794 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
798 set_page_dirty(page
);
802 f2fs_put_page(page
, 1);
806 * This function tries to get parent node of victim data block, and identifies
807 * data block validity. If the block is valid, copy that with cold status and
808 * modify parent node.
809 * If the parent node is not valid or the data block address is different,
810 * the victim data block is ignored.
812 static void gc_data_segment(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
813 struct gc_inode_list
*gc_list
, unsigned int segno
, int gc_type
)
815 struct super_block
*sb
= sbi
->sb
;
816 struct f2fs_summary
*entry
;
821 start_addr
= START_BLOCK(sbi
, segno
);
826 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
827 struct page
*data_page
;
829 struct node_info dni
; /* dnode info for the data */
830 unsigned int ofs_in_node
, nofs
;
832 nid_t nid
= le32_to_cpu(entry
->nid
);
834 /* stop BG_GC if there is not enough free sections. */
835 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0))
838 if (check_valid_map(sbi
, segno
, off
) == 0)
842 f2fs_ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), 1,
848 f2fs_ra_node_page(sbi
, nid
);
852 /* Get an inode by ino with checking validity */
853 if (!is_alive(sbi
, entry
, &dni
, start_addr
+ off
, &nofs
))
857 f2fs_ra_node_page(sbi
, dni
.ino
);
861 ofs_in_node
= le16_to_cpu(entry
->ofs_in_node
);
864 inode
= f2fs_iget(sb
, dni
.ino
);
865 if (IS_ERR(inode
) || is_bad_inode(inode
))
868 /* if inode uses special I/O path, let's go phase 3 */
869 if (f2fs_post_read_required(inode
)) {
870 add_gc_inode(gc_list
, inode
);
874 if (!down_write_trylock(
875 &F2FS_I(inode
)->i_gc_rwsem
[WRITE
])) {
880 start_bidx
= f2fs_start_bidx_of_node(nofs
, inode
);
881 data_page
= f2fs_get_read_data_page(inode
,
882 start_bidx
+ ofs_in_node
, REQ_RAHEAD
,
884 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
885 if (IS_ERR(data_page
)) {
890 f2fs_put_page(data_page
, 0);
891 add_gc_inode(gc_list
, inode
);
896 inode
= find_gc_inode(gc_list
, dni
.ino
);
898 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
901 if (S_ISREG(inode
->i_mode
)) {
902 if (!down_write_trylock(&fi
->i_gc_rwsem
[READ
]))
904 if (!down_write_trylock(
905 &fi
->i_gc_rwsem
[WRITE
])) {
906 up_write(&fi
->i_gc_rwsem
[READ
]);
911 /* wait for all inflight aio data */
912 inode_dio_wait(inode
);
915 start_bidx
= f2fs_start_bidx_of_node(nofs
, inode
)
917 if (f2fs_post_read_required(inode
))
918 move_data_block(inode
, start_bidx
, gc_type
,
921 move_data_page(inode
, start_bidx
, gc_type
,
925 up_write(&fi
->i_gc_rwsem
[WRITE
]);
926 up_write(&fi
->i_gc_rwsem
[READ
]);
929 stat_inc_data_blk_count(sbi
, 1, gc_type
);
937 static int __get_victim(struct f2fs_sb_info
*sbi
, unsigned int *victim
,
940 struct sit_info
*sit_i
= SIT_I(sbi
);
943 down_write(&sit_i
->sentry_lock
);
944 ret
= DIRTY_I(sbi
)->v_ops
->get_victim(sbi
, victim
, gc_type
,
946 up_write(&sit_i
->sentry_lock
);
950 static int do_garbage_collect(struct f2fs_sb_info
*sbi
,
951 unsigned int start_segno
,
952 struct gc_inode_list
*gc_list
, int gc_type
)
954 struct page
*sum_page
;
955 struct f2fs_summary_block
*sum
;
956 struct blk_plug plug
;
957 unsigned int segno
= start_segno
;
958 unsigned int end_segno
= start_segno
+ sbi
->segs_per_sec
;
960 unsigned char type
= IS_DATASEG(get_seg_entry(sbi
, segno
)->type
) ?
961 SUM_TYPE_DATA
: SUM_TYPE_NODE
;
963 /* readahead multi ssa blocks those have contiguous address */
964 if (sbi
->segs_per_sec
> 1)
965 f2fs_ra_meta_pages(sbi
, GET_SUM_BLOCK(sbi
, segno
),
966 sbi
->segs_per_sec
, META_SSA
, true);
968 /* reference all summary page */
969 while (segno
< end_segno
) {
970 sum_page
= f2fs_get_sum_page(sbi
, segno
++);
971 unlock_page(sum_page
);
974 blk_start_plug(&plug
);
976 for (segno
= start_segno
; segno
< end_segno
; segno
++) {
978 /* find segment summary of victim */
979 sum_page
= find_get_page(META_MAPPING(sbi
),
980 GET_SUM_BLOCK(sbi
, segno
));
981 f2fs_put_page(sum_page
, 0);
983 if (get_valid_blocks(sbi
, segno
, false) == 0 ||
984 !PageUptodate(sum_page
) ||
985 unlikely(f2fs_cp_error(sbi
)))
988 sum
= page_address(sum_page
);
989 if (type
!= GET_SUM_TYPE((&sum
->footer
))) {
990 f2fs_msg(sbi
->sb
, KERN_ERR
, "Inconsistent segment (%u) "
991 "type [%d, %d] in SSA and SIT",
992 segno
, type
, GET_SUM_TYPE((&sum
->footer
)));
993 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
998 * this is to avoid deadlock:
999 * - lock_page(sum_page) - f2fs_replace_block
1000 * - check_valid_map() - down_write(sentry_lock)
1001 * - down_read(sentry_lock) - change_curseg()
1002 * - lock_page(sum_page)
1004 if (type
== SUM_TYPE_NODE
)
1005 gc_node_segment(sbi
, sum
->entries
, segno
, gc_type
);
1007 gc_data_segment(sbi
, sum
->entries
, gc_list
, segno
,
1010 stat_inc_seg_count(sbi
, type
, gc_type
);
1012 if (gc_type
== FG_GC
&&
1013 get_valid_blocks(sbi
, segno
, false) == 0)
1016 f2fs_put_page(sum_page
, 0);
1019 if (gc_type
== FG_GC
)
1020 f2fs_submit_merged_write(sbi
,
1021 (type
== SUM_TYPE_NODE
) ? NODE
: DATA
);
1023 blk_finish_plug(&plug
);
1025 stat_inc_call_count(sbi
->stat_info
);
1030 int f2fs_gc(struct f2fs_sb_info
*sbi
, bool sync
,
1031 bool background
, unsigned int segno
)
1033 int gc_type
= sync
? FG_GC
: BG_GC
;
1034 int sec_freed
= 0, seg_freed
= 0, total_freed
= 0;
1036 struct cp_control cpc
;
1037 unsigned int init_segno
= segno
;
1038 struct gc_inode_list gc_list
= {
1039 .ilist
= LIST_HEAD_INIT(gc_list
.ilist
),
1040 .iroot
= RADIX_TREE_INIT(gc_list
.iroot
, GFP_NOFS
),
1042 unsigned long long last_skipped
= sbi
->skipped_atomic_files
[FG_GC
];
1043 unsigned int skipped_round
= 0, round
= 0;
1045 trace_f2fs_gc_begin(sbi
->sb
, sync
, background
,
1046 get_pages(sbi
, F2FS_DIRTY_NODES
),
1047 get_pages(sbi
, F2FS_DIRTY_DENTS
),
1048 get_pages(sbi
, F2FS_DIRTY_IMETA
),
1051 reserved_segments(sbi
),
1052 prefree_segments(sbi
));
1054 cpc
.reason
= __get_cp_reason(sbi
);
1056 if (unlikely(!(sbi
->sb
->s_flags
& SB_ACTIVE
))) {
1060 if (unlikely(f2fs_cp_error(sbi
))) {
1065 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0)) {
1067 * For example, if there are many prefree_segments below given
1068 * threshold, we can make them free by checkpoint. Then, we
1069 * secure free segments which doesn't need fggc any more.
1071 if (prefree_segments(sbi
)) {
1072 ret
= f2fs_write_checkpoint(sbi
, &cpc
);
1076 if (has_not_enough_free_secs(sbi
, 0, 0))
1080 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1081 if (gc_type
== BG_GC
&& !background
) {
1085 if (!__get_victim(sbi
, &segno
, gc_type
)) {
1090 seg_freed
= do_garbage_collect(sbi
, segno
, &gc_list
, gc_type
);
1091 if (gc_type
== FG_GC
&& seg_freed
== sbi
->segs_per_sec
)
1093 total_freed
+= seg_freed
;
1095 if (gc_type
== FG_GC
) {
1096 if (sbi
->skipped_atomic_files
[FG_GC
] > last_skipped
)
1098 last_skipped
= sbi
->skipped_atomic_files
[FG_GC
];
1102 if (gc_type
== FG_GC
)
1103 sbi
->cur_victim_sec
= NULL_SEGNO
;
1106 if (has_not_enough_free_secs(sbi
, sec_freed
, 0)) {
1107 if (skipped_round
> MAX_SKIP_ATOMIC_COUNT
&&
1108 skipped_round
* 2 >= round
)
1109 f2fs_drop_inmem_pages_all(sbi
, true);
1114 if (gc_type
== FG_GC
)
1115 ret
= f2fs_write_checkpoint(sbi
, &cpc
);
1118 SIT_I(sbi
)->last_victim
[ALLOC_NEXT
] = 0;
1119 SIT_I(sbi
)->last_victim
[FLUSH_DEVICE
] = init_segno
;
1121 trace_f2fs_gc_end(sbi
->sb
, ret
, total_freed
, sec_freed
,
1122 get_pages(sbi
, F2FS_DIRTY_NODES
),
1123 get_pages(sbi
, F2FS_DIRTY_DENTS
),
1124 get_pages(sbi
, F2FS_DIRTY_IMETA
),
1127 reserved_segments(sbi
),
1128 prefree_segments(sbi
));
1130 mutex_unlock(&sbi
->gc_mutex
);
1132 put_gc_inode(&gc_list
);
1135 ret
= sec_freed
? 0 : -EAGAIN
;
1139 void f2fs_build_gc_manager(struct f2fs_sb_info
*sbi
)
1141 DIRTY_I(sbi
)->v_ops
= &default_v_ops
;
1143 sbi
->gc_pin_file_threshold
= DEF_GC_FAILED_PINNED_FILES
;
1145 /* give warm/cold data area from slower device */
1146 if (sbi
->s_ndevs
&& sbi
->segs_per_sec
== 1)
1147 SIT_I(sbi
)->last_victim
[ALLOC_NEXT
] =
1148 GET_SEGNO(sbi
, FDEV(0).end_blk
) + 1;