4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
25 #include <trace/events/f2fs.h>
27 static int gc_thread_func(void *data
)
29 struct f2fs_sb_info
*sbi
= data
;
30 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
31 wait_queue_head_t
*wq
= &sbi
->gc_thread
->gc_wait_queue_head
;
34 wait_ms
= gc_th
->min_sleep_time
;
40 wait_event_interruptible_timeout(*wq
,
41 kthread_should_stop(),
42 msecs_to_jiffies(wait_ms
));
43 if (kthread_should_stop())
46 if (sbi
->sb
->s_writers
.frozen
>= SB_FREEZE_WRITE
) {
47 increase_sleep_time(gc_th
, &wait_ms
);
52 * [GC triggering condition]
53 * 0. GC is not conducted currently.
54 * 1. There are enough dirty segments.
55 * 2. IO subsystem is idle by checking the # of writeback pages.
56 * 3. IO subsystem is idle by checking the # of requests in
57 * bdev's request list.
59 * Note) We have to avoid triggering GCs frequently.
60 * Because it is possible that some segments can be
61 * invalidated soon after by user update or deletion.
62 * So, I'd like to wait some time to collect dirty segments.
64 if (!mutex_trylock(&sbi
->gc_mutex
))
68 increase_sleep_time(gc_th
, &wait_ms
);
69 mutex_unlock(&sbi
->gc_mutex
);
73 if (has_enough_invalid_blocks(sbi
))
74 decrease_sleep_time(gc_th
, &wait_ms
);
76 increase_sleep_time(gc_th
, &wait_ms
);
78 stat_inc_bggc_count(sbi
);
80 /* if return value is not zero, no victim was selected */
82 wait_ms
= gc_th
->no_gc_sleep_time
;
84 /* balancing f2fs's metadata periodically */
85 f2fs_balance_fs_bg(sbi
);
87 } while (!kthread_should_stop());
91 int start_gc_thread(struct f2fs_sb_info
*sbi
)
93 struct f2fs_gc_kthread
*gc_th
;
94 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
97 gc_th
= kmalloc(sizeof(struct f2fs_gc_kthread
), GFP_KERNEL
);
103 gc_th
->min_sleep_time
= DEF_GC_THREAD_MIN_SLEEP_TIME
;
104 gc_th
->max_sleep_time
= DEF_GC_THREAD_MAX_SLEEP_TIME
;
105 gc_th
->no_gc_sleep_time
= DEF_GC_THREAD_NOGC_SLEEP_TIME
;
109 sbi
->gc_thread
= gc_th
;
110 init_waitqueue_head(&sbi
->gc_thread
->gc_wait_queue_head
);
111 sbi
->gc_thread
->f2fs_gc_task
= kthread_run(gc_thread_func
, sbi
,
112 "f2fs_gc-%u:%u", MAJOR(dev
), MINOR(dev
));
113 if (IS_ERR(gc_th
->f2fs_gc_task
)) {
114 err
= PTR_ERR(gc_th
->f2fs_gc_task
);
116 sbi
->gc_thread
= NULL
;
122 void stop_gc_thread(struct f2fs_sb_info
*sbi
)
124 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
127 kthread_stop(gc_th
->f2fs_gc_task
);
129 sbi
->gc_thread
= NULL
;
132 static int select_gc_type(struct f2fs_gc_kthread
*gc_th
, int gc_type
)
134 int gc_mode
= (gc_type
== BG_GC
) ? GC_CB
: GC_GREEDY
;
136 if (gc_th
&& gc_th
->gc_idle
) {
137 if (gc_th
->gc_idle
== 1)
139 else if (gc_th
->gc_idle
== 2)
145 static void select_policy(struct f2fs_sb_info
*sbi
, int gc_type
,
146 int type
, struct victim_sel_policy
*p
)
148 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
150 if (p
->alloc_mode
== SSR
) {
151 p
->gc_mode
= GC_GREEDY
;
152 p
->dirty_segmap
= dirty_i
->dirty_segmap
[type
];
153 p
->max_search
= dirty_i
->nr_dirty
[type
];
156 p
->gc_mode
= select_gc_type(sbi
->gc_thread
, gc_type
);
157 p
->dirty_segmap
= dirty_i
->dirty_segmap
[DIRTY
];
158 p
->max_search
= dirty_i
->nr_dirty
[DIRTY
];
159 p
->ofs_unit
= sbi
->segs_per_sec
;
162 if (p
->max_search
> sbi
->max_victim_search
)
163 p
->max_search
= sbi
->max_victim_search
;
165 p
->offset
= sbi
->last_victim
[p
->gc_mode
];
168 static unsigned int get_max_cost(struct f2fs_sb_info
*sbi
,
169 struct victim_sel_policy
*p
)
171 /* SSR allocates in a segment unit */
172 if (p
->alloc_mode
== SSR
)
173 return 1 << sbi
->log_blocks_per_seg
;
174 if (p
->gc_mode
== GC_GREEDY
)
175 return (1 << sbi
->log_blocks_per_seg
) * p
->ofs_unit
;
176 else if (p
->gc_mode
== GC_CB
)
178 else /* No other gc_mode */
182 static unsigned int check_bg_victims(struct f2fs_sb_info
*sbi
)
184 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
188 * If the gc_type is FG_GC, we can select victim segments
189 * selected by background GC before.
190 * Those segments guarantee they have small valid blocks.
192 for_each_set_bit(secno
, dirty_i
->victim_secmap
, MAIN_SECS(sbi
)) {
193 if (sec_usage_check(sbi
, secno
))
195 clear_bit(secno
, dirty_i
->victim_secmap
);
196 return secno
* sbi
->segs_per_sec
;
201 static unsigned int get_cb_cost(struct f2fs_sb_info
*sbi
, unsigned int segno
)
203 struct sit_info
*sit_i
= SIT_I(sbi
);
204 unsigned int secno
= GET_SECNO(sbi
, segno
);
205 unsigned int start
= secno
* sbi
->segs_per_sec
;
206 unsigned long long mtime
= 0;
207 unsigned int vblocks
;
208 unsigned char age
= 0;
212 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
213 mtime
+= get_seg_entry(sbi
, start
+ i
)->mtime
;
214 vblocks
= get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
216 mtime
= div_u64(mtime
, sbi
->segs_per_sec
);
217 vblocks
= div_u64(vblocks
, sbi
->segs_per_sec
);
219 u
= (vblocks
* 100) >> sbi
->log_blocks_per_seg
;
221 /* Handle if the system time has changed by the user */
222 if (mtime
< sit_i
->min_mtime
)
223 sit_i
->min_mtime
= mtime
;
224 if (mtime
> sit_i
->max_mtime
)
225 sit_i
->max_mtime
= mtime
;
226 if (sit_i
->max_mtime
!= sit_i
->min_mtime
)
227 age
= 100 - div64_u64(100 * (mtime
- sit_i
->min_mtime
),
228 sit_i
->max_mtime
- sit_i
->min_mtime
);
230 return UINT_MAX
- ((100 * (100 - u
) * age
) / (100 + u
));
233 static inline unsigned int get_gc_cost(struct f2fs_sb_info
*sbi
,
234 unsigned int segno
, struct victim_sel_policy
*p
)
236 if (p
->alloc_mode
== SSR
)
237 return get_seg_entry(sbi
, segno
)->ckpt_valid_blocks
;
239 /* alloc_mode == LFS */
240 if (p
->gc_mode
== GC_GREEDY
)
241 return get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
243 return get_cb_cost(sbi
, segno
);
247 * This function is called from two paths.
248 * One is garbage collection and the other is SSR segment selection.
249 * When it is called during GC, it just gets a victim segment
250 * and it does not remove it from dirty seglist.
251 * When it is called from SSR segment selection, it finds a segment
252 * which has minimum valid blocks and removes it from dirty seglist.
254 static int get_victim_by_default(struct f2fs_sb_info
*sbi
,
255 unsigned int *result
, int gc_type
, int type
, char alloc_mode
)
257 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
258 struct victim_sel_policy p
;
259 unsigned int secno
, max_cost
;
262 mutex_lock(&dirty_i
->seglist_lock
);
264 p
.alloc_mode
= alloc_mode
;
265 select_policy(sbi
, gc_type
, type
, &p
);
267 p
.min_segno
= NULL_SEGNO
;
268 p
.min_cost
= max_cost
= get_max_cost(sbi
, &p
);
270 if (p
.alloc_mode
== LFS
&& gc_type
== FG_GC
) {
271 p
.min_segno
= check_bg_victims(sbi
);
272 if (p
.min_segno
!= NULL_SEGNO
)
280 segno
= find_next_bit(p
.dirty_segmap
, MAIN_SEGS(sbi
), p
.offset
);
281 if (segno
>= MAIN_SEGS(sbi
)) {
282 if (sbi
->last_victim
[p
.gc_mode
]) {
283 sbi
->last_victim
[p
.gc_mode
] = 0;
290 p
.offset
= segno
+ p
.ofs_unit
;
292 p
.offset
-= segno
% p
.ofs_unit
;
294 secno
= GET_SECNO(sbi
, segno
);
296 if (sec_usage_check(sbi
, secno
))
298 if (gc_type
== BG_GC
&& test_bit(secno
, dirty_i
->victim_secmap
))
301 cost
= get_gc_cost(sbi
, segno
, &p
);
303 if (p
.min_cost
> cost
) {
306 } else if (unlikely(cost
== max_cost
)) {
310 if (nsearched
++ >= p
.max_search
) {
311 sbi
->last_victim
[p
.gc_mode
] = segno
;
315 if (p
.min_segno
!= NULL_SEGNO
) {
317 if (p
.alloc_mode
== LFS
) {
318 secno
= GET_SECNO(sbi
, p
.min_segno
);
319 if (gc_type
== FG_GC
)
320 sbi
->cur_victim_sec
= secno
;
322 set_bit(secno
, dirty_i
->victim_secmap
);
324 *result
= (p
.min_segno
/ p
.ofs_unit
) * p
.ofs_unit
;
326 trace_f2fs_get_victim(sbi
->sb
, type
, gc_type
, &p
,
328 prefree_segments(sbi
), free_segments(sbi
));
330 mutex_unlock(&dirty_i
->seglist_lock
);
332 return (p
.min_segno
== NULL_SEGNO
) ? 0 : 1;
335 static const struct victim_selection default_v_ops
= {
336 .get_victim
= get_victim_by_default
,
339 static struct inode
*find_gc_inode(struct gc_inode_list
*gc_list
, nid_t ino
)
341 struct inode_entry
*ie
;
343 ie
= radix_tree_lookup(&gc_list
->iroot
, ino
);
349 static void add_gc_inode(struct gc_inode_list
*gc_list
, struct inode
*inode
)
351 struct inode_entry
*new_ie
;
353 if (inode
== find_gc_inode(gc_list
, inode
->i_ino
)) {
357 new_ie
= f2fs_kmem_cache_alloc(inode_entry_slab
, GFP_NOFS
);
358 new_ie
->inode
= inode
;
360 f2fs_radix_tree_insert(&gc_list
->iroot
, inode
->i_ino
, new_ie
);
361 list_add_tail(&new_ie
->list
, &gc_list
->ilist
);
364 static void put_gc_inode(struct gc_inode_list
*gc_list
)
366 struct inode_entry
*ie
, *next_ie
;
367 list_for_each_entry_safe(ie
, next_ie
, &gc_list
->ilist
, list
) {
368 radix_tree_delete(&gc_list
->iroot
, ie
->inode
->i_ino
);
371 kmem_cache_free(inode_entry_slab
, ie
);
375 static int check_valid_map(struct f2fs_sb_info
*sbi
,
376 unsigned int segno
, int offset
)
378 struct sit_info
*sit_i
= SIT_I(sbi
);
379 struct seg_entry
*sentry
;
382 mutex_lock(&sit_i
->sentry_lock
);
383 sentry
= get_seg_entry(sbi
, segno
);
384 ret
= f2fs_test_bit(offset
, sentry
->cur_valid_map
);
385 mutex_unlock(&sit_i
->sentry_lock
);
390 * This function compares node address got in summary with that in NAT.
391 * On validity, copy that node with cold status, otherwise (invalid node)
394 static int gc_node_segment(struct f2fs_sb_info
*sbi
,
395 struct f2fs_summary
*sum
, unsigned int segno
, int gc_type
)
398 struct f2fs_summary
*entry
;
402 start_addr
= START_BLOCK(sbi
, segno
);
407 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
408 nid_t nid
= le32_to_cpu(entry
->nid
);
409 struct page
*node_page
;
412 /* stop BG_GC if there is not enough free sections. */
413 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0))
416 if (check_valid_map(sbi
, segno
, off
) == 0)
420 ra_node_page(sbi
, nid
);
423 node_page
= get_node_page(sbi
, nid
);
424 if (IS_ERR(node_page
))
427 /* block may become invalid during get_node_page */
428 if (check_valid_map(sbi
, segno
, off
) == 0) {
429 f2fs_put_page(node_page
, 1);
433 get_node_info(sbi
, nid
, &ni
);
434 if (ni
.blk_addr
!= start_addr
+ off
) {
435 f2fs_put_page(node_page
, 1);
439 /* set page dirty and write it */
440 if (gc_type
== FG_GC
) {
441 f2fs_wait_on_page_writeback(node_page
, NODE
);
442 set_page_dirty(node_page
);
444 if (!PageWriteback(node_page
))
445 set_page_dirty(node_page
);
447 f2fs_put_page(node_page
, 1);
448 stat_inc_node_blk_count(sbi
, 1, gc_type
);
456 if (gc_type
== FG_GC
) {
457 struct writeback_control wbc
= {
458 .sync_mode
= WB_SYNC_ALL
,
459 .nr_to_write
= LONG_MAX
,
462 sync_node_pages(sbi
, 0, &wbc
);
464 /* return 1 only if FG_GC succefully reclaimed one */
465 if (get_valid_blocks(sbi
, segno
, 1) == 0)
472 * Calculate start block index indicating the given node offset.
473 * Be careful, caller should give this node offset only indicating direct node
474 * blocks. If any node offsets, which point the other types of node blocks such
475 * as indirect or double indirect node blocks, are given, it must be a caller's
478 block_t
start_bidx_of_node(unsigned int node_ofs
, struct f2fs_inode_info
*fi
)
480 unsigned int indirect_blks
= 2 * NIDS_PER_BLOCK
+ 4;
488 } else if (node_ofs
<= indirect_blks
) {
489 int dec
= (node_ofs
- 4) / (NIDS_PER_BLOCK
+ 1);
490 bidx
= node_ofs
- 2 - dec
;
492 int dec
= (node_ofs
- indirect_blks
- 3) / (NIDS_PER_BLOCK
+ 1);
493 bidx
= node_ofs
- 5 - dec
;
495 return bidx
* ADDRS_PER_BLOCK
+ ADDRS_PER_INODE(fi
);
498 static bool is_alive(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
499 struct node_info
*dni
, block_t blkaddr
, unsigned int *nofs
)
501 struct page
*node_page
;
503 unsigned int ofs_in_node
;
504 block_t source_blkaddr
;
506 nid
= le32_to_cpu(sum
->nid
);
507 ofs_in_node
= le16_to_cpu(sum
->ofs_in_node
);
509 node_page
= get_node_page(sbi
, nid
);
510 if (IS_ERR(node_page
))
513 get_node_info(sbi
, nid
, dni
);
515 if (sum
->version
!= dni
->version
) {
516 f2fs_put_page(node_page
, 1);
520 *nofs
= ofs_of_node(node_page
);
521 source_blkaddr
= datablock_addr(node_page
, ofs_in_node
);
522 f2fs_put_page(node_page
, 1);
524 if (source_blkaddr
!= blkaddr
)
529 static void move_encrypted_block(struct inode
*inode
, block_t bidx
)
531 struct f2fs_io_info fio
= {
532 .sbi
= F2FS_I_SB(inode
),
535 .encrypted_page
= NULL
,
537 struct dnode_of_data dn
;
538 struct f2fs_summary sum
;
543 /* do not read out */
544 page
= grab_cache_page(inode
->i_mapping
, bidx
);
548 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
549 err
= get_dnode_of_data(&dn
, bidx
, LOOKUP_NODE
);
553 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
))
556 get_node_info(fio
.sbi
, dn
.nid
, &ni
);
557 set_summary(&sum
, dn
.nid
, dn
.ofs_in_node
, ni
.version
);
561 fio
.blk_addr
= dn
.data_blkaddr
;
563 fio
.encrypted_page
= pagecache_get_page(META_MAPPING(fio
.sbi
),
567 if (!fio
.encrypted_page
)
570 err
= f2fs_submit_page_bio(&fio
);
575 lock_page(fio
.encrypted_page
);
577 if (unlikely(!PageUptodate(fio
.encrypted_page
)))
579 if (unlikely(fio
.encrypted_page
->mapping
!= META_MAPPING(fio
.sbi
)))
582 set_page_dirty(fio
.encrypted_page
);
583 f2fs_wait_on_page_writeback(fio
.encrypted_page
, META
);
584 if (clear_page_dirty_for_io(fio
.encrypted_page
))
585 dec_page_count(fio
.sbi
, F2FS_DIRTY_META
);
587 set_page_writeback(fio
.encrypted_page
);
589 /* allocate block address */
590 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
);
591 allocate_data_block(fio
.sbi
, NULL
, fio
.blk_addr
,
592 &fio
.blk_addr
, &sum
, CURSEG_COLD_DATA
);
594 f2fs_submit_page_mbio(&fio
);
596 dn
.data_blkaddr
= fio
.blk_addr
;
597 set_data_blkaddr(&dn
);
598 f2fs_update_extent_cache(&dn
);
599 set_inode_flag(F2FS_I(inode
), FI_APPEND_WRITE
);
600 if (page
->index
== 0)
601 set_inode_flag(F2FS_I(inode
), FI_FIRST_BLOCK_WRITTEN
);
603 f2fs_put_page(fio
.encrypted_page
, 1);
607 f2fs_put_page(page
, 1);
610 static void move_data_page(struct inode
*inode
, block_t bidx
, int gc_type
)
614 page
= get_lock_data_page(inode
, bidx
);
618 if (gc_type
== BG_GC
) {
619 if (PageWriteback(page
))
621 set_page_dirty(page
);
624 struct f2fs_io_info fio
= {
625 .sbi
= F2FS_I_SB(inode
),
629 .encrypted_page
= NULL
,
631 set_page_dirty(page
);
632 f2fs_wait_on_page_writeback(page
, DATA
);
633 if (clear_page_dirty_for_io(page
))
634 inode_dec_dirty_pages(inode
);
636 do_write_data_page(&fio
);
637 clear_cold_data(page
);
640 f2fs_put_page(page
, 1);
644 * This function tries to get parent node of victim data block, and identifies
645 * data block validity. If the block is valid, copy that with cold status and
646 * modify parent node.
647 * If the parent node is not valid or the data block address is different,
648 * the victim data block is ignored.
650 static int gc_data_segment(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
651 struct gc_inode_list
*gc_list
, unsigned int segno
, int gc_type
)
653 struct super_block
*sb
= sbi
->sb
;
654 struct f2fs_summary
*entry
;
659 start_addr
= START_BLOCK(sbi
, segno
);
664 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
665 struct page
*data_page
;
667 struct node_info dni
; /* dnode info for the data */
668 unsigned int ofs_in_node
, nofs
;
671 /* stop BG_GC if there is not enough free sections. */
672 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0))
675 if (check_valid_map(sbi
, segno
, off
) == 0)
679 ra_node_page(sbi
, le32_to_cpu(entry
->nid
));
683 /* Get an inode by ino with checking validity */
684 if (!is_alive(sbi
, entry
, &dni
, start_addr
+ off
, &nofs
))
688 ra_node_page(sbi
, dni
.ino
);
692 ofs_in_node
= le16_to_cpu(entry
->ofs_in_node
);
695 inode
= f2fs_iget(sb
, dni
.ino
);
696 if (IS_ERR(inode
) || is_bad_inode(inode
))
699 /* if encrypted inode, let's go phase 3 */
700 if (f2fs_encrypted_inode(inode
) &&
701 S_ISREG(inode
->i_mode
)) {
702 add_gc_inode(gc_list
, inode
);
706 start_bidx
= start_bidx_of_node(nofs
, F2FS_I(inode
));
707 data_page
= get_read_data_page(inode
,
708 start_bidx
+ ofs_in_node
, READA
);
709 if (IS_ERR(data_page
)) {
714 f2fs_put_page(data_page
, 0);
715 add_gc_inode(gc_list
, inode
);
720 inode
= find_gc_inode(gc_list
, dni
.ino
);
722 start_bidx
= start_bidx_of_node(nofs
, F2FS_I(inode
))
724 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
725 move_encrypted_block(inode
, start_bidx
);
727 move_data_page(inode
, start_bidx
, gc_type
);
728 stat_inc_data_blk_count(sbi
, 1, gc_type
);
735 if (gc_type
== FG_GC
) {
736 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
738 /* return 1 only if FG_GC succefully reclaimed one */
739 if (get_valid_blocks(sbi
, segno
, 1) == 0)
745 static int __get_victim(struct f2fs_sb_info
*sbi
, unsigned int *victim
,
748 struct sit_info
*sit_i
= SIT_I(sbi
);
751 mutex_lock(&sit_i
->sentry_lock
);
752 ret
= DIRTY_I(sbi
)->v_ops
->get_victim(sbi
, victim
, gc_type
,
754 mutex_unlock(&sit_i
->sentry_lock
);
758 static int do_garbage_collect(struct f2fs_sb_info
*sbi
, unsigned int segno
,
759 struct gc_inode_list
*gc_list
, int gc_type
)
761 struct page
*sum_page
;
762 struct f2fs_summary_block
*sum
;
763 struct blk_plug plug
;
766 /* read segment summary of victim */
767 sum_page
= get_sum_page(sbi
, segno
);
769 blk_start_plug(&plug
);
771 sum
= page_address(sum_page
);
774 * this is to avoid deadlock:
775 * - lock_page(sum_page) - f2fs_replace_block
776 * - check_valid_map() - mutex_lock(sentry_lock)
777 * - mutex_lock(sentry_lock) - change_curseg()
778 * - lock_page(sum_page)
780 unlock_page(sum_page
);
782 switch (GET_SUM_TYPE((&sum
->footer
))) {
784 nfree
= gc_node_segment(sbi
, sum
->entries
, segno
, gc_type
);
787 nfree
= gc_data_segment(sbi
, sum
->entries
, gc_list
,
791 blk_finish_plug(&plug
);
793 stat_inc_seg_count(sbi
, GET_SUM_TYPE((&sum
->footer
)), gc_type
);
794 stat_inc_call_count(sbi
->stat_info
);
796 f2fs_put_page(sum_page
, 0);
800 int f2fs_gc(struct f2fs_sb_info
*sbi
)
802 unsigned int segno
= NULL_SEGNO
;
807 struct cp_control cpc
;
808 struct gc_inode_list gc_list
= {
809 .ilist
= LIST_HEAD_INIT(gc_list
.ilist
),
810 .iroot
= RADIX_TREE_INIT(GFP_NOFS
),
813 cpc
.reason
= __get_cp_reason(sbi
);
815 if (unlikely(!(sbi
->sb
->s_flags
& MS_ACTIVE
)))
817 if (unlikely(f2fs_cp_error(sbi
)))
820 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, nfree
)) {
822 if (__get_victim(sbi
, &segno
, gc_type
) || prefree_segments(sbi
))
823 write_checkpoint(sbi
, &cpc
);
826 if (segno
== NULL_SEGNO
&& !__get_victim(sbi
, &segno
, gc_type
))
830 /* readahead multi ssa blocks those have contiguous address */
831 if (sbi
->segs_per_sec
> 1)
832 ra_meta_pages(sbi
, GET_SUM_BLOCK(sbi
, segno
), sbi
->segs_per_sec
,
835 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
836 nfree
+= do_garbage_collect(sbi
, segno
+ i
, &gc_list
, gc_type
);
838 if (gc_type
== FG_GC
)
839 sbi
->cur_victim_sec
= NULL_SEGNO
;
841 if (has_not_enough_free_secs(sbi
, nfree
))
844 if (gc_type
== FG_GC
)
845 write_checkpoint(sbi
, &cpc
);
847 mutex_unlock(&sbi
->gc_mutex
);
849 put_gc_inode(&gc_list
);
853 void build_gc_manager(struct f2fs_sb_info
*sbi
)
855 DIRTY_I(sbi
)->v_ops
= &default_v_ops
;