1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/init.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/freezer.h>
21 #include <trace/events/f2fs.h>
23 static int gc_thread_func(void *data
)
25 struct f2fs_sb_info
*sbi
= data
;
26 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
27 wait_queue_head_t
*wq
= &sbi
->gc_thread
->gc_wait_queue_head
;
30 wait_ms
= gc_th
->min_sleep_time
;
34 wait_event_interruptible_timeout(*wq
,
35 kthread_should_stop() || freezing(current
) ||
37 msecs_to_jiffies(wait_ms
));
39 /* give it a try one time */
43 if (try_to_freeze()) {
44 stat_other_skip_bggc_count(sbi
);
47 if (kthread_should_stop())
50 if (sbi
->sb
->s_writers
.frozen
>= SB_FREEZE_WRITE
) {
51 increase_sleep_time(gc_th
, &wait_ms
);
52 stat_other_skip_bggc_count(sbi
);
56 if (time_to_inject(sbi
, FAULT_CHECKPOINT
)) {
57 f2fs_show_injection_info(FAULT_CHECKPOINT
);
58 f2fs_stop_checkpoint(sbi
, false);
61 if (!sb_start_write_trylock(sbi
->sb
)) {
62 stat_other_skip_bggc_count(sbi
);
67 * [GC triggering condition]
68 * 0. GC is not conducted currently.
69 * 1. There are enough dirty segments.
70 * 2. IO subsystem is idle by checking the # of writeback pages.
71 * 3. IO subsystem is idle by checking the # of requests in
72 * bdev's request list.
74 * Note) We have to avoid triggering GCs frequently.
75 * Because it is possible that some segments can be
76 * invalidated soon after by user update or deletion.
77 * So, I'd like to wait some time to collect dirty segments.
79 if (sbi
->gc_mode
== GC_URGENT
) {
80 wait_ms
= gc_th
->urgent_sleep_time
;
81 mutex_lock(&sbi
->gc_mutex
);
85 if (!mutex_trylock(&sbi
->gc_mutex
)) {
86 stat_other_skip_bggc_count(sbi
);
90 if (!is_idle(sbi
, GC_TIME
)) {
91 increase_sleep_time(gc_th
, &wait_ms
);
92 mutex_unlock(&sbi
->gc_mutex
);
93 stat_io_skip_bggc_count(sbi
);
97 if (has_enough_invalid_blocks(sbi
))
98 decrease_sleep_time(gc_th
, &wait_ms
);
100 increase_sleep_time(gc_th
, &wait_ms
);
102 stat_inc_bggc_count(sbi
);
104 /* if return value is not zero, no victim was selected */
105 if (f2fs_gc(sbi
, test_opt(sbi
, FORCE_FG_GC
), true, NULL_SEGNO
))
106 wait_ms
= gc_th
->no_gc_sleep_time
;
108 trace_f2fs_background_gc(sbi
->sb
, wait_ms
,
109 prefree_segments(sbi
), free_segments(sbi
));
111 /* balancing f2fs's metadata periodically */
112 f2fs_balance_fs_bg(sbi
);
114 sb_end_write(sbi
->sb
);
116 } while (!kthread_should_stop());
120 int f2fs_start_gc_thread(struct f2fs_sb_info
*sbi
)
122 struct f2fs_gc_kthread
*gc_th
;
123 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
126 gc_th
= f2fs_kmalloc(sbi
, sizeof(struct f2fs_gc_kthread
), GFP_KERNEL
);
132 gc_th
->urgent_sleep_time
= DEF_GC_THREAD_URGENT_SLEEP_TIME
;
133 gc_th
->min_sleep_time
= DEF_GC_THREAD_MIN_SLEEP_TIME
;
134 gc_th
->max_sleep_time
= DEF_GC_THREAD_MAX_SLEEP_TIME
;
135 gc_th
->no_gc_sleep_time
= DEF_GC_THREAD_NOGC_SLEEP_TIME
;
139 sbi
->gc_thread
= gc_th
;
140 init_waitqueue_head(&sbi
->gc_thread
->gc_wait_queue_head
);
141 sbi
->gc_thread
->f2fs_gc_task
= kthread_run(gc_thread_func
, sbi
,
142 "f2fs_gc-%u:%u", MAJOR(dev
), MINOR(dev
));
143 if (IS_ERR(gc_th
->f2fs_gc_task
)) {
144 err
= PTR_ERR(gc_th
->f2fs_gc_task
);
146 sbi
->gc_thread
= NULL
;
152 void f2fs_stop_gc_thread(struct f2fs_sb_info
*sbi
)
154 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
157 kthread_stop(gc_th
->f2fs_gc_task
);
159 sbi
->gc_thread
= NULL
;
162 static int select_gc_type(struct f2fs_sb_info
*sbi
, int gc_type
)
164 int gc_mode
= (gc_type
== BG_GC
) ? GC_CB
: GC_GREEDY
;
166 switch (sbi
->gc_mode
) {
178 static void select_policy(struct f2fs_sb_info
*sbi
, int gc_type
,
179 int type
, struct victim_sel_policy
*p
)
181 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
183 if (p
->alloc_mode
== SSR
) {
184 p
->gc_mode
= GC_GREEDY
;
185 p
->dirty_segmap
= dirty_i
->dirty_segmap
[type
];
186 p
->max_search
= dirty_i
->nr_dirty
[type
];
189 p
->gc_mode
= select_gc_type(sbi
, gc_type
);
190 p
->dirty_segmap
= dirty_i
->dirty_segmap
[DIRTY
];
191 p
->max_search
= dirty_i
->nr_dirty
[DIRTY
];
192 p
->ofs_unit
= sbi
->segs_per_sec
;
195 /* we need to check every dirty segments in the FG_GC case */
196 if (gc_type
!= FG_GC
&&
197 (sbi
->gc_mode
!= GC_URGENT
) &&
198 p
->max_search
> sbi
->max_victim_search
)
199 p
->max_search
= sbi
->max_victim_search
;
201 /* let's select beginning hot/small space first in no_heap mode*/
202 if (test_opt(sbi
, NOHEAP
) &&
203 (type
== CURSEG_HOT_DATA
|| IS_NODESEG(type
)))
206 p
->offset
= SIT_I(sbi
)->last_victim
[p
->gc_mode
];
209 static unsigned int get_max_cost(struct f2fs_sb_info
*sbi
,
210 struct victim_sel_policy
*p
)
212 /* SSR allocates in a segment unit */
213 if (p
->alloc_mode
== SSR
)
214 return sbi
->blocks_per_seg
;
215 if (p
->gc_mode
== GC_GREEDY
)
216 return 2 * sbi
->blocks_per_seg
* p
->ofs_unit
;
217 else if (p
->gc_mode
== GC_CB
)
219 else /* No other gc_mode */
223 static unsigned int check_bg_victims(struct f2fs_sb_info
*sbi
)
225 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
229 * If the gc_type is FG_GC, we can select victim segments
230 * selected by background GC before.
231 * Those segments guarantee they have small valid blocks.
233 for_each_set_bit(secno
, dirty_i
->victim_secmap
, MAIN_SECS(sbi
)) {
234 if (sec_usage_check(sbi
, secno
))
236 clear_bit(secno
, dirty_i
->victim_secmap
);
237 return GET_SEG_FROM_SEC(sbi
, secno
);
242 static unsigned int get_cb_cost(struct f2fs_sb_info
*sbi
, unsigned int segno
)
244 struct sit_info
*sit_i
= SIT_I(sbi
);
245 unsigned int secno
= GET_SEC_FROM_SEG(sbi
, segno
);
246 unsigned int start
= GET_SEG_FROM_SEC(sbi
, secno
);
247 unsigned long long mtime
= 0;
248 unsigned int vblocks
;
249 unsigned char age
= 0;
253 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
254 mtime
+= get_seg_entry(sbi
, start
+ i
)->mtime
;
255 vblocks
= get_valid_blocks(sbi
, segno
, true);
257 mtime
= div_u64(mtime
, sbi
->segs_per_sec
);
258 vblocks
= div_u64(vblocks
, sbi
->segs_per_sec
);
260 u
= (vblocks
* 100) >> sbi
->log_blocks_per_seg
;
262 /* Handle if the system time has changed by the user */
263 if (mtime
< sit_i
->min_mtime
)
264 sit_i
->min_mtime
= mtime
;
265 if (mtime
> sit_i
->max_mtime
)
266 sit_i
->max_mtime
= mtime
;
267 if (sit_i
->max_mtime
!= sit_i
->min_mtime
)
268 age
= 100 - div64_u64(100 * (mtime
- sit_i
->min_mtime
),
269 sit_i
->max_mtime
- sit_i
->min_mtime
);
271 return UINT_MAX
- ((100 * (100 - u
) * age
) / (100 + u
));
274 static inline unsigned int get_gc_cost(struct f2fs_sb_info
*sbi
,
275 unsigned int segno
, struct victim_sel_policy
*p
)
277 if (p
->alloc_mode
== SSR
)
278 return get_seg_entry(sbi
, segno
)->ckpt_valid_blocks
;
280 /* alloc_mode == LFS */
281 if (p
->gc_mode
== GC_GREEDY
)
282 return get_valid_blocks(sbi
, segno
, true);
284 return get_cb_cost(sbi
, segno
);
287 static unsigned int count_bits(const unsigned long *addr
,
288 unsigned int offset
, unsigned int len
)
290 unsigned int end
= offset
+ len
, sum
= 0;
292 while (offset
< end
) {
293 if (test_bit(offset
++, addr
))
300 * This function is called from two paths.
301 * One is garbage collection and the other is SSR segment selection.
302 * When it is called during GC, it just gets a victim segment
303 * and it does not remove it from dirty seglist.
304 * When it is called from SSR segment selection, it finds a segment
305 * which has minimum valid blocks and removes it from dirty seglist.
307 static int get_victim_by_default(struct f2fs_sb_info
*sbi
,
308 unsigned int *result
, int gc_type
, int type
, char alloc_mode
)
310 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
311 struct sit_info
*sm
= SIT_I(sbi
);
312 struct victim_sel_policy p
;
313 unsigned int secno
, last_victim
;
314 unsigned int last_segment
;
315 unsigned int nsearched
= 0;
317 mutex_lock(&dirty_i
->seglist_lock
);
318 last_segment
= MAIN_SECS(sbi
) * sbi
->segs_per_sec
;
320 p
.alloc_mode
= alloc_mode
;
321 select_policy(sbi
, gc_type
, type
, &p
);
323 p
.min_segno
= NULL_SEGNO
;
324 p
.min_cost
= get_max_cost(sbi
, &p
);
326 if (*result
!= NULL_SEGNO
) {
327 if (get_valid_blocks(sbi
, *result
, false) &&
328 !sec_usage_check(sbi
, GET_SEC_FROM_SEG(sbi
, *result
)))
329 p
.min_segno
= *result
;
333 if (p
.max_search
== 0)
336 if (__is_large_section(sbi
) && p
.alloc_mode
== LFS
) {
337 if (sbi
->next_victim_seg
[BG_GC
] != NULL_SEGNO
) {
338 p
.min_segno
= sbi
->next_victim_seg
[BG_GC
];
339 *result
= p
.min_segno
;
340 sbi
->next_victim_seg
[BG_GC
] = NULL_SEGNO
;
343 if (gc_type
== FG_GC
&&
344 sbi
->next_victim_seg
[FG_GC
] != NULL_SEGNO
) {
345 p
.min_segno
= sbi
->next_victim_seg
[FG_GC
];
346 *result
= p
.min_segno
;
347 sbi
->next_victim_seg
[FG_GC
] = NULL_SEGNO
;
352 last_victim
= sm
->last_victim
[p
.gc_mode
];
353 if (p
.alloc_mode
== LFS
&& gc_type
== FG_GC
) {
354 p
.min_segno
= check_bg_victims(sbi
);
355 if (p
.min_segno
!= NULL_SEGNO
)
363 segno
= find_next_bit(p
.dirty_segmap
, last_segment
, p
.offset
);
364 if (segno
>= last_segment
) {
365 if (sm
->last_victim
[p
.gc_mode
]) {
367 sm
->last_victim
[p
.gc_mode
];
368 sm
->last_victim
[p
.gc_mode
] = 0;
375 p
.offset
= segno
+ p
.ofs_unit
;
376 if (p
.ofs_unit
> 1) {
377 p
.offset
-= segno
% p
.ofs_unit
;
378 nsearched
+= count_bits(p
.dirty_segmap
,
379 p
.offset
- p
.ofs_unit
,
385 #ifdef CONFIG_F2FS_CHECK_FS
387 * skip selecting the invalid segno (that is failed due to block
388 * validity check failure during GC) to avoid endless GC loop in
391 if (test_bit(segno
, sm
->invalid_segmap
))
395 secno
= GET_SEC_FROM_SEG(sbi
, segno
);
397 if (sec_usage_check(sbi
, secno
))
399 /* Don't touch checkpointed data */
400 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) &&
401 get_ckpt_valid_blocks(sbi
, segno
) &&
402 p
.alloc_mode
!= SSR
))
404 if (gc_type
== BG_GC
&& test_bit(secno
, dirty_i
->victim_secmap
))
407 cost
= get_gc_cost(sbi
, segno
, &p
);
409 if (p
.min_cost
> cost
) {
414 if (nsearched
>= p
.max_search
) {
415 if (!sm
->last_victim
[p
.gc_mode
] && segno
<= last_victim
)
416 sm
->last_victim
[p
.gc_mode
] = last_victim
+ 1;
418 sm
->last_victim
[p
.gc_mode
] = segno
+ 1;
419 sm
->last_victim
[p
.gc_mode
] %=
420 (MAIN_SECS(sbi
) * sbi
->segs_per_sec
);
424 if (p
.min_segno
!= NULL_SEGNO
) {
426 *result
= (p
.min_segno
/ p
.ofs_unit
) * p
.ofs_unit
;
428 if (p
.alloc_mode
== LFS
) {
429 secno
= GET_SEC_FROM_SEG(sbi
, p
.min_segno
);
430 if (gc_type
== FG_GC
)
431 sbi
->cur_victim_sec
= secno
;
433 set_bit(secno
, dirty_i
->victim_secmap
);
438 if (p
.min_segno
!= NULL_SEGNO
)
439 trace_f2fs_get_victim(sbi
->sb
, type
, gc_type
, &p
,
441 prefree_segments(sbi
), free_segments(sbi
));
442 mutex_unlock(&dirty_i
->seglist_lock
);
444 return (p
.min_segno
== NULL_SEGNO
) ? 0 : 1;
447 static const struct victim_selection default_v_ops
= {
448 .get_victim
= get_victim_by_default
,
451 static struct inode
*find_gc_inode(struct gc_inode_list
*gc_list
, nid_t ino
)
453 struct inode_entry
*ie
;
455 ie
= radix_tree_lookup(&gc_list
->iroot
, ino
);
461 static void add_gc_inode(struct gc_inode_list
*gc_list
, struct inode
*inode
)
463 struct inode_entry
*new_ie
;
465 if (inode
== find_gc_inode(gc_list
, inode
->i_ino
)) {
469 new_ie
= f2fs_kmem_cache_alloc(f2fs_inode_entry_slab
, GFP_NOFS
);
470 new_ie
->inode
= inode
;
472 f2fs_radix_tree_insert(&gc_list
->iroot
, inode
->i_ino
, new_ie
);
473 list_add_tail(&new_ie
->list
, &gc_list
->ilist
);
476 static void put_gc_inode(struct gc_inode_list
*gc_list
)
478 struct inode_entry
*ie
, *next_ie
;
479 list_for_each_entry_safe(ie
, next_ie
, &gc_list
->ilist
, list
) {
480 radix_tree_delete(&gc_list
->iroot
, ie
->inode
->i_ino
);
483 kmem_cache_free(f2fs_inode_entry_slab
, ie
);
487 static int check_valid_map(struct f2fs_sb_info
*sbi
,
488 unsigned int segno
, int offset
)
490 struct sit_info
*sit_i
= SIT_I(sbi
);
491 struct seg_entry
*sentry
;
494 down_read(&sit_i
->sentry_lock
);
495 sentry
= get_seg_entry(sbi
, segno
);
496 ret
= f2fs_test_bit(offset
, sentry
->cur_valid_map
);
497 up_read(&sit_i
->sentry_lock
);
502 * This function compares node address got in summary with that in NAT.
503 * On validity, copy that node with cold status, otherwise (invalid node)
506 static int gc_node_segment(struct f2fs_sb_info
*sbi
,
507 struct f2fs_summary
*sum
, unsigned int segno
, int gc_type
)
509 struct f2fs_summary
*entry
;
513 bool fggc
= (gc_type
== FG_GC
);
516 start_addr
= START_BLOCK(sbi
, segno
);
521 if (fggc
&& phase
== 2)
522 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
524 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
525 nid_t nid
= le32_to_cpu(entry
->nid
);
526 struct page
*node_page
;
530 /* stop BG_GC if there is not enough free sections. */
531 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0))
534 if (check_valid_map(sbi
, segno
, off
) == 0)
538 f2fs_ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), 1,
544 f2fs_ra_node_page(sbi
, nid
);
549 node_page
= f2fs_get_node_page(sbi
, nid
);
550 if (IS_ERR(node_page
))
553 /* block may become invalid during f2fs_get_node_page */
554 if (check_valid_map(sbi
, segno
, off
) == 0) {
555 f2fs_put_page(node_page
, 1);
559 if (f2fs_get_node_info(sbi
, nid
, &ni
)) {
560 f2fs_put_page(node_page
, 1);
564 if (ni
.blk_addr
!= start_addr
+ off
) {
565 f2fs_put_page(node_page
, 1);
569 err
= f2fs_move_node_page(node_page
, gc_type
);
570 if (!err
&& gc_type
== FG_GC
)
572 stat_inc_node_blk_count(sbi
, 1, gc_type
);
579 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
584 * Calculate start block index indicating the given node offset.
585 * Be careful, caller should give this node offset only indicating direct node
586 * blocks. If any node offsets, which point the other types of node blocks such
587 * as indirect or double indirect node blocks, are given, it must be a caller's
590 block_t
f2fs_start_bidx_of_node(unsigned int node_ofs
, struct inode
*inode
)
592 unsigned int indirect_blks
= 2 * NIDS_PER_BLOCK
+ 4;
600 } else if (node_ofs
<= indirect_blks
) {
601 int dec
= (node_ofs
- 4) / (NIDS_PER_BLOCK
+ 1);
602 bidx
= node_ofs
- 2 - dec
;
604 int dec
= (node_ofs
- indirect_blks
- 3) / (NIDS_PER_BLOCK
+ 1);
605 bidx
= node_ofs
- 5 - dec
;
607 return bidx
* ADDRS_PER_BLOCK(inode
) + ADDRS_PER_INODE(inode
);
610 static bool is_alive(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
611 struct node_info
*dni
, block_t blkaddr
, unsigned int *nofs
)
613 struct page
*node_page
;
615 unsigned int ofs_in_node
;
616 block_t source_blkaddr
;
618 nid
= le32_to_cpu(sum
->nid
);
619 ofs_in_node
= le16_to_cpu(sum
->ofs_in_node
);
621 node_page
= f2fs_get_node_page(sbi
, nid
);
622 if (IS_ERR(node_page
))
625 if (f2fs_get_node_info(sbi
, nid
, dni
)) {
626 f2fs_put_page(node_page
, 1);
630 if (sum
->version
!= dni
->version
) {
631 f2fs_warn(sbi
, "%s: valid data with mismatched node version.",
633 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
636 *nofs
= ofs_of_node(node_page
);
637 source_blkaddr
= datablock_addr(NULL
, node_page
, ofs_in_node
);
638 f2fs_put_page(node_page
, 1);
640 if (source_blkaddr
!= blkaddr
) {
641 #ifdef CONFIG_F2FS_CHECK_FS
642 unsigned int segno
= GET_SEGNO(sbi
, blkaddr
);
643 unsigned long offset
= GET_BLKOFF_FROM_SEG0(sbi
, blkaddr
);
645 if (unlikely(check_valid_map(sbi
, segno
, offset
))) {
646 if (!test_and_set_bit(segno
, SIT_I(sbi
)->invalid_segmap
)) {
647 f2fs_err(sbi
, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
648 blkaddr
, source_blkaddr
, segno
);
658 static int ra_data_block(struct inode
*inode
, pgoff_t index
)
660 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
661 struct address_space
*mapping
= inode
->i_mapping
;
662 struct dnode_of_data dn
;
664 struct extent_info ei
= {0, 0, 0};
665 struct f2fs_io_info fio
= {
672 .encrypted_page
= NULL
,
678 page
= f2fs_grab_cache_page(mapping
, index
, true);
682 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
683 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
684 if (unlikely(!f2fs_is_valid_blkaddr(sbi
, dn
.data_blkaddr
,
685 DATA_GENERIC_ENHANCE_READ
))) {
692 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
693 err
= f2fs_get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
698 if (!__is_valid_data_blkaddr(dn
.data_blkaddr
)) {
702 if (unlikely(!f2fs_is_valid_blkaddr(sbi
, dn
.data_blkaddr
,
703 DATA_GENERIC_ENHANCE
))) {
710 fio
.new_blkaddr
= fio
.old_blkaddr
= dn
.data_blkaddr
;
713 * don't cache encrypted data into meta inode until previous dirty
714 * data were writebacked to avoid racing between GC and flush.
716 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
718 f2fs_wait_on_block_writeback(inode
, dn
.data_blkaddr
);
720 fio
.encrypted_page
= f2fs_pagecache_get_page(META_MAPPING(sbi
),
722 FGP_LOCK
| FGP_CREAT
, GFP_NOFS
);
723 if (!fio
.encrypted_page
) {
728 err
= f2fs_submit_page_bio(&fio
);
730 goto put_encrypted_page
;
731 f2fs_put_page(fio
.encrypted_page
, 0);
732 f2fs_put_page(page
, 1);
735 f2fs_put_page(fio
.encrypted_page
, 1);
737 f2fs_put_page(page
, 1);
742 * Move data block via META_MAPPING while keeping locked data page.
743 * This can be used to move blocks, aka LBAs, directly on disk.
745 static int move_data_block(struct inode
*inode
, block_t bidx
,
746 int gc_type
, unsigned int segno
, int off
)
748 struct f2fs_io_info fio
= {
749 .sbi
= F2FS_I_SB(inode
),
755 .encrypted_page
= NULL
,
759 struct dnode_of_data dn
;
760 struct f2fs_summary sum
;
762 struct page
*page
, *mpage
;
765 bool lfs_mode
= test_opt(fio
.sbi
, LFS
);
767 /* do not read out */
768 page
= f2fs_grab_cache_page(inode
->i_mapping
, bidx
, false);
772 if (!check_valid_map(F2FS_I_SB(inode
), segno
, off
)) {
777 if (f2fs_is_atomic_file(inode
)) {
778 F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_ATOMIC
]++;
779 F2FS_I_SB(inode
)->skipped_atomic_files
[gc_type
]++;
784 if (f2fs_is_pinned_file(inode
)) {
785 f2fs_pin_file_control(inode
, true);
790 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
791 err
= f2fs_get_dnode_of_data(&dn
, bidx
, LOOKUP_NODE
);
795 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
796 ClearPageUptodate(page
);
802 * don't cache encrypted data into meta inode until previous dirty
803 * data were writebacked to avoid racing between GC and flush.
805 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
807 f2fs_wait_on_block_writeback(inode
, dn
.data_blkaddr
);
809 err
= f2fs_get_node_info(fio
.sbi
, dn
.nid
, &ni
);
813 set_summary(&sum
, dn
.nid
, dn
.ofs_in_node
, ni
.version
);
817 fio
.new_blkaddr
= fio
.old_blkaddr
= dn
.data_blkaddr
;
820 down_write(&fio
.sbi
->io_order_lock
);
822 mpage
= f2fs_grab_cache_page(META_MAPPING(fio
.sbi
),
823 fio
.old_blkaddr
, false);
827 fio
.encrypted_page
= mpage
;
829 /* read source block in mpage */
830 if (!PageUptodate(mpage
)) {
831 err
= f2fs_submit_page_bio(&fio
);
833 f2fs_put_page(mpage
, 1);
837 if (unlikely(mpage
->mapping
!= META_MAPPING(fio
.sbi
) ||
838 !PageUptodate(mpage
))) {
840 f2fs_put_page(mpage
, 1);
845 f2fs_allocate_data_block(fio
.sbi
, NULL
, fio
.old_blkaddr
, &newaddr
,
846 &sum
, CURSEG_COLD_DATA
, NULL
, false);
848 fio
.encrypted_page
= f2fs_pagecache_get_page(META_MAPPING(fio
.sbi
),
849 newaddr
, FGP_LOCK
| FGP_CREAT
, GFP_NOFS
);
850 if (!fio
.encrypted_page
) {
852 f2fs_put_page(mpage
, 1);
856 /* write target block */
857 f2fs_wait_on_page_writeback(fio
.encrypted_page
, DATA
, true, true);
858 memcpy(page_address(fio
.encrypted_page
),
859 page_address(mpage
), PAGE_SIZE
);
860 f2fs_put_page(mpage
, 1);
861 invalidate_mapping_pages(META_MAPPING(fio
.sbi
),
862 fio
.old_blkaddr
, fio
.old_blkaddr
);
864 set_page_dirty(fio
.encrypted_page
);
865 if (clear_page_dirty_for_io(fio
.encrypted_page
))
866 dec_page_count(fio
.sbi
, F2FS_DIRTY_META
);
868 set_page_writeback(fio
.encrypted_page
);
869 ClearPageError(page
);
871 /* allocate block address */
872 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
, true, true);
874 fio
.op
= REQ_OP_WRITE
;
875 fio
.op_flags
= REQ_SYNC
;
876 fio
.new_blkaddr
= newaddr
;
877 f2fs_submit_page_write(&fio
);
880 if (PageWriteback(fio
.encrypted_page
))
881 end_page_writeback(fio
.encrypted_page
);
885 f2fs_update_iostat(fio
.sbi
, FS_GC_DATA_IO
, F2FS_BLKSIZE
);
887 f2fs_update_data_blkaddr(&dn
, newaddr
);
888 set_inode_flag(inode
, FI_APPEND_WRITE
);
889 if (page
->index
== 0)
890 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
892 f2fs_put_page(fio
.encrypted_page
, 1);
895 f2fs_do_replace_block(fio
.sbi
, &sum
, newaddr
, fio
.old_blkaddr
,
899 up_write(&fio
.sbi
->io_order_lock
);
903 f2fs_put_page(page
, 1);
907 static int move_data_page(struct inode
*inode
, block_t bidx
, int gc_type
,
908 unsigned int segno
, int off
)
913 page
= f2fs_get_lock_data_page(inode
, bidx
, true);
915 return PTR_ERR(page
);
917 if (!check_valid_map(F2FS_I_SB(inode
), segno
, off
)) {
922 if (f2fs_is_atomic_file(inode
)) {
923 F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_ATOMIC
]++;
924 F2FS_I_SB(inode
)->skipped_atomic_files
[gc_type
]++;
928 if (f2fs_is_pinned_file(inode
)) {
929 if (gc_type
== FG_GC
)
930 f2fs_pin_file_control(inode
, true);
935 if (gc_type
== BG_GC
) {
936 if (PageWriteback(page
)) {
940 set_page_dirty(page
);
943 struct f2fs_io_info fio
= {
944 .sbi
= F2FS_I_SB(inode
),
949 .op_flags
= REQ_SYNC
,
950 .old_blkaddr
= NULL_ADDR
,
952 .encrypted_page
= NULL
,
953 .need_lock
= LOCK_REQ
,
954 .io_type
= FS_GC_DATA_IO
,
956 bool is_dirty
= PageDirty(page
);
959 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
961 set_page_dirty(page
);
962 if (clear_page_dirty_for_io(page
)) {
963 inode_dec_dirty_pages(inode
);
964 f2fs_remove_dirty_inode(inode
);
969 err
= f2fs_do_write_data_page(&fio
);
971 clear_cold_data(page
);
972 if (err
== -ENOMEM
) {
973 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
977 set_page_dirty(page
);
981 f2fs_put_page(page
, 1);
986 * This function tries to get parent node of victim data block, and identifies
987 * data block validity. If the block is valid, copy that with cold status and
988 * modify parent node.
989 * If the parent node is not valid or the data block address is different,
990 * the victim data block is ignored.
992 static int gc_data_segment(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
993 struct gc_inode_list
*gc_list
, unsigned int segno
, int gc_type
)
995 struct super_block
*sb
= sbi
->sb
;
996 struct f2fs_summary
*entry
;
1002 start_addr
= START_BLOCK(sbi
, segno
);
1007 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
1008 struct page
*data_page
;
1009 struct inode
*inode
;
1010 struct node_info dni
; /* dnode info for the data */
1011 unsigned int ofs_in_node
, nofs
;
1013 nid_t nid
= le32_to_cpu(entry
->nid
);
1015 /* stop BG_GC if there is not enough free sections. */
1016 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0))
1019 if (check_valid_map(sbi
, segno
, off
) == 0)
1023 f2fs_ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), 1,
1029 f2fs_ra_node_page(sbi
, nid
);
1033 /* Get an inode by ino with checking validity */
1034 if (!is_alive(sbi
, entry
, &dni
, start_addr
+ off
, &nofs
))
1038 f2fs_ra_node_page(sbi
, dni
.ino
);
1042 ofs_in_node
= le16_to_cpu(entry
->ofs_in_node
);
1045 inode
= f2fs_iget(sb
, dni
.ino
);
1046 if (IS_ERR(inode
) || is_bad_inode(inode
))
1049 if (!down_write_trylock(
1050 &F2FS_I(inode
)->i_gc_rwsem
[WRITE
])) {
1052 sbi
->skipped_gc_rwsem
++;
1056 start_bidx
= f2fs_start_bidx_of_node(nofs
, inode
) +
1059 if (f2fs_post_read_required(inode
)) {
1060 int err
= ra_data_block(inode
, start_bidx
);
1062 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1067 add_gc_inode(gc_list
, inode
);
1071 data_page
= f2fs_get_read_data_page(inode
,
1072 start_bidx
, REQ_RAHEAD
, true);
1073 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1074 if (IS_ERR(data_page
)) {
1079 f2fs_put_page(data_page
, 0);
1080 add_gc_inode(gc_list
, inode
);
1085 inode
= find_gc_inode(gc_list
, dni
.ino
);
1087 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1088 bool locked
= false;
1091 if (S_ISREG(inode
->i_mode
)) {
1092 if (!down_write_trylock(&fi
->i_gc_rwsem
[READ
]))
1094 if (!down_write_trylock(
1095 &fi
->i_gc_rwsem
[WRITE
])) {
1096 sbi
->skipped_gc_rwsem
++;
1097 up_write(&fi
->i_gc_rwsem
[READ
]);
1102 /* wait for all inflight aio data */
1103 inode_dio_wait(inode
);
1106 start_bidx
= f2fs_start_bidx_of_node(nofs
, inode
)
1108 if (f2fs_post_read_required(inode
))
1109 err
= move_data_block(inode
, start_bidx
,
1110 gc_type
, segno
, off
);
1112 err
= move_data_page(inode
, start_bidx
, gc_type
,
1115 if (!err
&& (gc_type
== FG_GC
||
1116 f2fs_post_read_required(inode
)))
1120 up_write(&fi
->i_gc_rwsem
[WRITE
]);
1121 up_write(&fi
->i_gc_rwsem
[READ
]);
1124 stat_inc_data_blk_count(sbi
, 1, gc_type
);
1134 static int __get_victim(struct f2fs_sb_info
*sbi
, unsigned int *victim
,
1137 struct sit_info
*sit_i
= SIT_I(sbi
);
1140 down_write(&sit_i
->sentry_lock
);
1141 ret
= DIRTY_I(sbi
)->v_ops
->get_victim(sbi
, victim
, gc_type
,
1142 NO_CHECK_TYPE
, LFS
);
1143 up_write(&sit_i
->sentry_lock
);
1147 static int do_garbage_collect(struct f2fs_sb_info
*sbi
,
1148 unsigned int start_segno
,
1149 struct gc_inode_list
*gc_list
, int gc_type
)
1151 struct page
*sum_page
;
1152 struct f2fs_summary_block
*sum
;
1153 struct blk_plug plug
;
1154 unsigned int segno
= start_segno
;
1155 unsigned int end_segno
= start_segno
+ sbi
->segs_per_sec
;
1156 int seg_freed
= 0, migrated
= 0;
1157 unsigned char type
= IS_DATASEG(get_seg_entry(sbi
, segno
)->type
) ?
1158 SUM_TYPE_DATA
: SUM_TYPE_NODE
;
1161 if (__is_large_section(sbi
))
1162 end_segno
= rounddown(end_segno
, sbi
->segs_per_sec
);
1164 /* readahead multi ssa blocks those have contiguous address */
1165 if (__is_large_section(sbi
))
1166 f2fs_ra_meta_pages(sbi
, GET_SUM_BLOCK(sbi
, segno
),
1167 end_segno
- segno
, META_SSA
, true);
1169 /* reference all summary page */
1170 while (segno
< end_segno
) {
1171 sum_page
= f2fs_get_sum_page(sbi
, segno
++);
1172 if (IS_ERR(sum_page
)) {
1173 int err
= PTR_ERR(sum_page
);
1175 end_segno
= segno
- 1;
1176 for (segno
= start_segno
; segno
< end_segno
; segno
++) {
1177 sum_page
= find_get_page(META_MAPPING(sbi
),
1178 GET_SUM_BLOCK(sbi
, segno
));
1179 f2fs_put_page(sum_page
, 0);
1180 f2fs_put_page(sum_page
, 0);
1184 unlock_page(sum_page
);
1187 blk_start_plug(&plug
);
1189 for (segno
= start_segno
; segno
< end_segno
; segno
++) {
1191 /* find segment summary of victim */
1192 sum_page
= find_get_page(META_MAPPING(sbi
),
1193 GET_SUM_BLOCK(sbi
, segno
));
1194 f2fs_put_page(sum_page
, 0);
1196 if (get_valid_blocks(sbi
, segno
, false) == 0)
1198 if (__is_large_section(sbi
) &&
1199 migrated
>= sbi
->migration_granularity
)
1201 if (!PageUptodate(sum_page
) || unlikely(f2fs_cp_error(sbi
)))
1204 sum
= page_address(sum_page
);
1205 if (type
!= GET_SUM_TYPE((&sum
->footer
))) {
1206 f2fs_err(sbi
, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1207 segno
, type
, GET_SUM_TYPE((&sum
->footer
)));
1208 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
1209 f2fs_stop_checkpoint(sbi
, false);
1214 * this is to avoid deadlock:
1215 * - lock_page(sum_page) - f2fs_replace_block
1216 * - check_valid_map() - down_write(sentry_lock)
1217 * - down_read(sentry_lock) - change_curseg()
1218 * - lock_page(sum_page)
1220 if (type
== SUM_TYPE_NODE
)
1221 submitted
+= gc_node_segment(sbi
, sum
->entries
, segno
,
1224 submitted
+= gc_data_segment(sbi
, sum
->entries
, gc_list
,
1227 stat_inc_seg_count(sbi
, type
, gc_type
);
1230 if (gc_type
== FG_GC
&&
1231 get_valid_blocks(sbi
, segno
, false) == 0)
1235 if (__is_large_section(sbi
) && segno
+ 1 < end_segno
)
1236 sbi
->next_victim_seg
[gc_type
] = segno
+ 1;
1238 f2fs_put_page(sum_page
, 0);
1242 f2fs_submit_merged_write(sbi
,
1243 (type
== SUM_TYPE_NODE
) ? NODE
: DATA
);
1245 blk_finish_plug(&plug
);
1247 stat_inc_call_count(sbi
->stat_info
);
1252 int f2fs_gc(struct f2fs_sb_info
*sbi
, bool sync
,
1253 bool background
, unsigned int segno
)
1255 int gc_type
= sync
? FG_GC
: BG_GC
;
1256 int sec_freed
= 0, seg_freed
= 0, total_freed
= 0;
1258 struct cp_control cpc
;
1259 unsigned int init_segno
= segno
;
1260 struct gc_inode_list gc_list
= {
1261 .ilist
= LIST_HEAD_INIT(gc_list
.ilist
),
1262 .iroot
= RADIX_TREE_INIT(gc_list
.iroot
, GFP_NOFS
),
1264 unsigned long long last_skipped
= sbi
->skipped_atomic_files
[FG_GC
];
1265 unsigned long long first_skipped
;
1266 unsigned int skipped_round
= 0, round
= 0;
1268 trace_f2fs_gc_begin(sbi
->sb
, sync
, background
,
1269 get_pages(sbi
, F2FS_DIRTY_NODES
),
1270 get_pages(sbi
, F2FS_DIRTY_DENTS
),
1271 get_pages(sbi
, F2FS_DIRTY_IMETA
),
1274 reserved_segments(sbi
),
1275 prefree_segments(sbi
));
1277 cpc
.reason
= __get_cp_reason(sbi
);
1278 sbi
->skipped_gc_rwsem
= 0;
1279 first_skipped
= last_skipped
;
1281 if (unlikely(!(sbi
->sb
->s_flags
& SB_ACTIVE
))) {
1285 if (unlikely(f2fs_cp_error(sbi
))) {
1290 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0)) {
1292 * For example, if there are many prefree_segments below given
1293 * threshold, we can make them free by checkpoint. Then, we
1294 * secure free segments which doesn't need fggc any more.
1296 if (prefree_segments(sbi
) &&
1297 !is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)) {
1298 ret
= f2fs_write_checkpoint(sbi
, &cpc
);
1302 if (has_not_enough_free_secs(sbi
, 0, 0))
1306 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1307 if (gc_type
== BG_GC
&& !background
) {
1311 if (!__get_victim(sbi
, &segno
, gc_type
)) {
1316 seg_freed
= do_garbage_collect(sbi
, segno
, &gc_list
, gc_type
);
1317 if (gc_type
== FG_GC
&& seg_freed
== sbi
->segs_per_sec
)
1319 total_freed
+= seg_freed
;
1321 if (gc_type
== FG_GC
) {
1322 if (sbi
->skipped_atomic_files
[FG_GC
] > last_skipped
||
1323 sbi
->skipped_gc_rwsem
)
1325 last_skipped
= sbi
->skipped_atomic_files
[FG_GC
];
1329 if (gc_type
== FG_GC
&& seg_freed
)
1330 sbi
->cur_victim_sec
= NULL_SEGNO
;
1335 if (has_not_enough_free_secs(sbi
, sec_freed
, 0)) {
1336 if (skipped_round
<= MAX_SKIP_GC_COUNT
||
1337 skipped_round
* 2 < round
) {
1342 if (first_skipped
< last_skipped
&&
1343 (last_skipped
- first_skipped
) >
1344 sbi
->skipped_gc_rwsem
) {
1345 f2fs_drop_inmem_pages_all(sbi
, true);
1349 if (gc_type
== FG_GC
&& !is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))
1350 ret
= f2fs_write_checkpoint(sbi
, &cpc
);
1353 SIT_I(sbi
)->last_victim
[ALLOC_NEXT
] = 0;
1354 SIT_I(sbi
)->last_victim
[FLUSH_DEVICE
] = init_segno
;
1356 trace_f2fs_gc_end(sbi
->sb
, ret
, total_freed
, sec_freed
,
1357 get_pages(sbi
, F2FS_DIRTY_NODES
),
1358 get_pages(sbi
, F2FS_DIRTY_DENTS
),
1359 get_pages(sbi
, F2FS_DIRTY_IMETA
),
1362 reserved_segments(sbi
),
1363 prefree_segments(sbi
));
1365 mutex_unlock(&sbi
->gc_mutex
);
1367 put_gc_inode(&gc_list
);
1370 ret
= sec_freed
? 0 : -EAGAIN
;
1374 void f2fs_build_gc_manager(struct f2fs_sb_info
*sbi
)
1376 DIRTY_I(sbi
)->v_ops
= &default_v_ops
;
1378 sbi
->gc_pin_file_threshold
= DEF_GC_FAILED_PINNED_FILES
;
1380 /* give warm/cold data area from slower device */
1381 if (f2fs_is_multi_device(sbi
) && !__is_large_section(sbi
))
1382 SIT_I(sbi
)->last_victim
[ALLOC_NEXT
] =
1383 GET_SEGNO(sbi
, FDEV(0).end_blk
) + 1;
1386 static int free_segment_range(struct f2fs_sb_info
*sbi
, unsigned int start
,
1390 unsigned int segno
, next_inuse
;
1393 /* Move out cursegs from the target range */
1394 for (type
= CURSEG_HOT_DATA
; type
< NR_CURSEG_TYPE
; type
++)
1395 allocate_segment_for_resize(sbi
, type
, start
, end
);
1397 /* do GC to move out valid blocks in the range */
1398 for (segno
= start
; segno
<= end
; segno
+= sbi
->segs_per_sec
) {
1399 struct gc_inode_list gc_list
= {
1400 .ilist
= LIST_HEAD_INIT(gc_list
.ilist
),
1401 .iroot
= RADIX_TREE_INIT(gc_list
.iroot
, GFP_NOFS
),
1404 mutex_lock(&sbi
->gc_mutex
);
1405 do_garbage_collect(sbi
, segno
, &gc_list
, FG_GC
);
1406 mutex_unlock(&sbi
->gc_mutex
);
1407 put_gc_inode(&gc_list
);
1409 if (get_valid_blocks(sbi
, segno
, true))
1413 err
= f2fs_sync_fs(sbi
->sb
, 1);
1417 next_inuse
= find_next_inuse(FREE_I(sbi
), end
+ 1, start
);
1418 if (next_inuse
<= end
) {
1419 f2fs_err(sbi
, "segno %u should be free but still inuse!",
1421 f2fs_bug_on(sbi
, 1);
1426 static void update_sb_metadata(struct f2fs_sb_info
*sbi
, int secs
)
1428 struct f2fs_super_block
*raw_sb
= F2FS_RAW_SUPER(sbi
);
1429 int section_count
= le32_to_cpu(raw_sb
->section_count
);
1430 int segment_count
= le32_to_cpu(raw_sb
->segment_count
);
1431 int segment_count_main
= le32_to_cpu(raw_sb
->segment_count_main
);
1432 long long block_count
= le64_to_cpu(raw_sb
->block_count
);
1433 int segs
= secs
* sbi
->segs_per_sec
;
1435 raw_sb
->section_count
= cpu_to_le32(section_count
+ secs
);
1436 raw_sb
->segment_count
= cpu_to_le32(segment_count
+ segs
);
1437 raw_sb
->segment_count_main
= cpu_to_le32(segment_count_main
+ segs
);
1438 raw_sb
->block_count
= cpu_to_le64(block_count
+
1439 (long long)segs
* sbi
->blocks_per_seg
);
1442 static void update_fs_metadata(struct f2fs_sb_info
*sbi
, int secs
)
1444 int segs
= secs
* sbi
->segs_per_sec
;
1445 long long user_block_count
=
1446 le64_to_cpu(F2FS_CKPT(sbi
)->user_block_count
);
1448 SM_I(sbi
)->segment_count
= (int)SM_I(sbi
)->segment_count
+ segs
;
1449 MAIN_SEGS(sbi
) = (int)MAIN_SEGS(sbi
) + segs
;
1450 FREE_I(sbi
)->free_sections
= (int)FREE_I(sbi
)->free_sections
+ secs
;
1451 FREE_I(sbi
)->free_segments
= (int)FREE_I(sbi
)->free_segments
+ segs
;
1452 F2FS_CKPT(sbi
)->user_block_count
= cpu_to_le64(user_block_count
+
1453 (long long)segs
* sbi
->blocks_per_seg
);
1456 int f2fs_resize_fs(struct f2fs_sb_info
*sbi
, __u64 block_count
)
1458 __u64 old_block_count
, shrunk_blocks
;
1460 int gc_mode
, gc_type
;
1464 old_block_count
= le64_to_cpu(F2FS_RAW_SUPER(sbi
)->block_count
);
1465 if (block_count
> old_block_count
)
1468 /* new fs size should align to section size */
1469 div_u64_rem(block_count
, BLKS_PER_SEC(sbi
), &rem
);
1473 if (block_count
== old_block_count
)
1476 if (is_sbi_flag_set(sbi
, SBI_NEED_FSCK
)) {
1477 f2fs_err(sbi
, "Should run fsck to repair first.");
1478 return -EFSCORRUPTED
;
1481 if (test_opt(sbi
, DISABLE_CHECKPOINT
)) {
1482 f2fs_err(sbi
, "Checkpoint should be enabled.");
1486 freeze_bdev(sbi
->sb
->s_bdev
);
1488 shrunk_blocks
= old_block_count
- block_count
;
1489 secs
= div_u64(shrunk_blocks
, BLKS_PER_SEC(sbi
));
1490 spin_lock(&sbi
->stat_lock
);
1491 if (shrunk_blocks
+ valid_user_blocks(sbi
) +
1492 sbi
->current_reserved_blocks
+ sbi
->unusable_block_count
+
1493 F2FS_OPTION(sbi
).root_reserved_blocks
> sbi
->user_block_count
)
1496 sbi
->user_block_count
-= shrunk_blocks
;
1497 spin_unlock(&sbi
->stat_lock
);
1499 thaw_bdev(sbi
->sb
->s_bdev
, sbi
->sb
);
1503 mutex_lock(&sbi
->resize_mutex
);
1504 set_sbi_flag(sbi
, SBI_IS_RESIZEFS
);
1506 mutex_lock(&DIRTY_I(sbi
)->seglist_lock
);
1508 MAIN_SECS(sbi
) -= secs
;
1510 for (gc_mode
= 0; gc_mode
< MAX_GC_POLICY
; gc_mode
++)
1511 if (SIT_I(sbi
)->last_victim
[gc_mode
] >=
1512 MAIN_SECS(sbi
) * sbi
->segs_per_sec
)
1513 SIT_I(sbi
)->last_victim
[gc_mode
] = 0;
1515 for (gc_type
= BG_GC
; gc_type
<= FG_GC
; gc_type
++)
1516 if (sbi
->next_victim_seg
[gc_type
] >=
1517 MAIN_SECS(sbi
) * sbi
->segs_per_sec
)
1518 sbi
->next_victim_seg
[gc_type
] = NULL_SEGNO
;
1520 mutex_unlock(&DIRTY_I(sbi
)->seglist_lock
);
1522 err
= free_segment_range(sbi
, MAIN_SECS(sbi
) * sbi
->segs_per_sec
,
1523 MAIN_SEGS(sbi
) - 1);
1527 update_sb_metadata(sbi
, -secs
);
1529 err
= f2fs_commit_super(sbi
, false);
1531 update_sb_metadata(sbi
, secs
);
1535 mutex_lock(&sbi
->cp_mutex
);
1536 update_fs_metadata(sbi
, -secs
);
1537 clear_sbi_flag(sbi
, SBI_IS_RESIZEFS
);
1538 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1539 mutex_unlock(&sbi
->cp_mutex
);
1541 err
= f2fs_sync_fs(sbi
->sb
, 1);
1543 mutex_lock(&sbi
->cp_mutex
);
1544 update_fs_metadata(sbi
, secs
);
1545 mutex_unlock(&sbi
->cp_mutex
);
1546 update_sb_metadata(sbi
, secs
);
1547 f2fs_commit_super(sbi
, false);
1551 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
1552 f2fs_err(sbi
, "resize_fs failed, should run fsck to repair!");
1554 MAIN_SECS(sbi
) += secs
;
1555 spin_lock(&sbi
->stat_lock
);
1556 sbi
->user_block_count
+= shrunk_blocks
;
1557 spin_unlock(&sbi
->stat_lock
);
1559 clear_sbi_flag(sbi
, SBI_IS_RESIZEFS
);
1560 mutex_unlock(&sbi
->resize_mutex
);
1561 thaw_bdev(sbi
->sb
->s_bdev
, sbi
->sb
);