4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/vmalloc.h>
17 #include <linux/swap.h>
22 #include <trace/events/f2fs.h>
24 #define __reverse_ffz(x) __reverse_ffs(~(x))
26 static struct kmem_cache
*discard_entry_slab
;
29 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
30 * MSB and LSB are reversed in a byte by f2fs_set_bit.
32 static inline unsigned long __reverse_ffs(unsigned long word
)
36 #if BITS_PER_LONG == 64
37 if ((word
& 0xffffffff) == 0) {
42 if ((word
& 0xffff) == 0) {
46 if ((word
& 0xff) == 0) {
50 if ((word
& 0xf0) == 0)
54 if ((word
& 0xc) == 0)
58 if ((word
& 0x2) == 0)
64 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c becasue
65 * f2fs_set_bit makes MSB and LSB reversed in a byte.
68 * f2fs_set_bit(0, bitmap) => 0000 0001
69 * f2fs_set_bit(7, bitmap) => 1000 0000
71 static unsigned long __find_rev_next_bit(const unsigned long *addr
,
72 unsigned long size
, unsigned long offset
)
74 const unsigned long *p
= addr
+ BIT_WORD(offset
);
75 unsigned long result
= offset
& ~(BITS_PER_LONG
- 1);
77 unsigned long mask
, submask
;
78 unsigned long quot
, rest
;
84 offset
%= BITS_PER_LONG
;
89 quot
= (offset
>> 3) << 3;
92 submask
= (unsigned char)(0xff << rest
) >> rest
;
96 if (size
< BITS_PER_LONG
)
101 size
-= BITS_PER_LONG
;
102 result
+= BITS_PER_LONG
;
104 while (size
& ~(BITS_PER_LONG
-1)) {
108 result
+= BITS_PER_LONG
;
109 size
-= BITS_PER_LONG
;
115 tmp
&= (~0UL >> (BITS_PER_LONG
- size
));
116 if (tmp
== 0UL) /* Are any bits set? */
117 return result
+ size
; /* Nope. */
119 return result
+ __reverse_ffs(tmp
);
122 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr
,
123 unsigned long size
, unsigned long offset
)
125 const unsigned long *p
= addr
+ BIT_WORD(offset
);
126 unsigned long result
= offset
& ~(BITS_PER_LONG
- 1);
128 unsigned long mask
, submask
;
129 unsigned long quot
, rest
;
135 offset
%= BITS_PER_LONG
;
140 quot
= (offset
>> 3) << 3;
142 mask
= ~(~0UL << quot
);
143 submask
= (unsigned char)~((unsigned char)(0xff << rest
) >> rest
);
147 if (size
< BITS_PER_LONG
)
152 size
-= BITS_PER_LONG
;
153 result
+= BITS_PER_LONG
;
155 while (size
& ~(BITS_PER_LONG
- 1)) {
159 result
+= BITS_PER_LONG
;
160 size
-= BITS_PER_LONG
;
168 if (tmp
== ~0UL) /* Are any bits zero? */
169 return result
+ size
; /* Nope. */
171 return result
+ __reverse_ffz(tmp
);
175 * This function balances dirty node and dentry pages.
176 * In addition, it controls garbage collection.
178 void f2fs_balance_fs(struct f2fs_sb_info
*sbi
)
181 * We should do GC or end up with checkpoint, if there are so many dirty
182 * dir/node pages without enough free segments.
184 if (has_not_enough_free_secs(sbi
, 0)) {
185 mutex_lock(&sbi
->gc_mutex
);
190 void f2fs_balance_fs_bg(struct f2fs_sb_info
*sbi
)
192 /* check the # of cached NAT entries and prefree segments */
193 if (try_to_free_nats(sbi
, NAT_ENTRY_PER_BLOCK
) ||
194 excess_prefree_segs(sbi
))
195 f2fs_sync_fs(sbi
->sb
, true);
198 static void __locate_dirty_segment(struct f2fs_sb_info
*sbi
, unsigned int segno
,
199 enum dirty_type dirty_type
)
201 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
203 /* need not be added */
204 if (IS_CURSEG(sbi
, segno
))
207 if (!test_and_set_bit(segno
, dirty_i
->dirty_segmap
[dirty_type
]))
208 dirty_i
->nr_dirty
[dirty_type
]++;
210 if (dirty_type
== DIRTY
) {
211 struct seg_entry
*sentry
= get_seg_entry(sbi
, segno
);
212 enum dirty_type t
= sentry
->type
;
214 if (!test_and_set_bit(segno
, dirty_i
->dirty_segmap
[t
]))
215 dirty_i
->nr_dirty
[t
]++;
219 static void __remove_dirty_segment(struct f2fs_sb_info
*sbi
, unsigned int segno
,
220 enum dirty_type dirty_type
)
222 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
224 if (test_and_clear_bit(segno
, dirty_i
->dirty_segmap
[dirty_type
]))
225 dirty_i
->nr_dirty
[dirty_type
]--;
227 if (dirty_type
== DIRTY
) {
228 struct seg_entry
*sentry
= get_seg_entry(sbi
, segno
);
229 enum dirty_type t
= sentry
->type
;
231 if (test_and_clear_bit(segno
, dirty_i
->dirty_segmap
[t
]))
232 dirty_i
->nr_dirty
[t
]--;
234 if (get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
) == 0)
235 clear_bit(GET_SECNO(sbi
, segno
),
236 dirty_i
->victim_secmap
);
241 * Should not occur error such as -ENOMEM.
242 * Adding dirty entry into seglist is not critical operation.
243 * If a given segment is one of current working segments, it won't be added.
245 static void locate_dirty_segment(struct f2fs_sb_info
*sbi
, unsigned int segno
)
247 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
248 unsigned short valid_blocks
;
250 if (segno
== NULL_SEGNO
|| IS_CURSEG(sbi
, segno
))
253 mutex_lock(&dirty_i
->seglist_lock
);
255 valid_blocks
= get_valid_blocks(sbi
, segno
, 0);
257 if (valid_blocks
== 0) {
258 __locate_dirty_segment(sbi
, segno
, PRE
);
259 __remove_dirty_segment(sbi
, segno
, DIRTY
);
260 } else if (valid_blocks
< sbi
->blocks_per_seg
) {
261 __locate_dirty_segment(sbi
, segno
, DIRTY
);
263 /* Recovery routine with SSR needs this */
264 __remove_dirty_segment(sbi
, segno
, DIRTY
);
267 mutex_unlock(&dirty_i
->seglist_lock
);
270 static void f2fs_issue_discard(struct f2fs_sb_info
*sbi
,
271 block_t blkstart
, block_t blklen
)
273 sector_t start
= SECTOR_FROM_BLOCK(sbi
, blkstart
);
274 sector_t len
= SECTOR_FROM_BLOCK(sbi
, blklen
);
275 blkdev_issue_discard(sbi
->sb
->s_bdev
, start
, len
, GFP_NOFS
, 0);
276 trace_f2fs_issue_discard(sbi
->sb
, blkstart
, blklen
);
279 static void add_discard_addrs(struct f2fs_sb_info
*sbi
,
280 unsigned int segno
, struct seg_entry
*se
)
282 struct list_head
*head
= &SM_I(sbi
)->discard_list
;
283 struct discard_entry
*new;
284 int entries
= SIT_VBLOCK_MAP_SIZE
/ sizeof(unsigned long);
285 int max_blocks
= sbi
->blocks_per_seg
;
286 unsigned long *cur_map
= (unsigned long *)se
->cur_valid_map
;
287 unsigned long *ckpt_map
= (unsigned long *)se
->ckpt_valid_map
;
288 unsigned long dmap
[entries
];
289 unsigned int start
= 0, end
= -1;
292 if (!test_opt(sbi
, DISCARD
))
295 /* zero block will be discarded through the prefree list */
296 if (!se
->valid_blocks
|| se
->valid_blocks
== max_blocks
)
299 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
300 for (i
= 0; i
< entries
; i
++)
301 dmap
[i
] = (cur_map
[i
] ^ ckpt_map
[i
]) & ckpt_map
[i
];
303 while (SM_I(sbi
)->nr_discards
<= SM_I(sbi
)->max_discards
) {
304 start
= __find_rev_next_bit(dmap
, max_blocks
, end
+ 1);
305 if (start
>= max_blocks
)
308 end
= __find_rev_next_zero_bit(dmap
, max_blocks
, start
+ 1);
310 new = f2fs_kmem_cache_alloc(discard_entry_slab
, GFP_NOFS
);
311 INIT_LIST_HEAD(&new->list
);
312 new->blkaddr
= START_BLOCK(sbi
, segno
) + start
;
313 new->len
= end
- start
;
315 list_add_tail(&new->list
, head
);
316 SM_I(sbi
)->nr_discards
+= end
- start
;
321 * Should call clear_prefree_segments after checkpoint is done.
323 static void set_prefree_as_free_segments(struct f2fs_sb_info
*sbi
)
325 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
326 unsigned int segno
= -1;
327 unsigned int total_segs
= TOTAL_SEGS(sbi
);
329 mutex_lock(&dirty_i
->seglist_lock
);
331 segno
= find_next_bit(dirty_i
->dirty_segmap
[PRE
], total_segs
,
333 if (segno
>= total_segs
)
335 __set_test_and_free(sbi
, segno
);
337 mutex_unlock(&dirty_i
->seglist_lock
);
340 void clear_prefree_segments(struct f2fs_sb_info
*sbi
)
342 struct list_head
*head
= &(SM_I(sbi
)->discard_list
);
343 struct list_head
*this, *next
;
344 struct discard_entry
*entry
;
345 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
346 unsigned long *prefree_map
= dirty_i
->dirty_segmap
[PRE
];
347 unsigned int total_segs
= TOTAL_SEGS(sbi
);
348 unsigned int start
= 0, end
= -1;
350 mutex_lock(&dirty_i
->seglist_lock
);
354 start
= find_next_bit(prefree_map
, total_segs
, end
+ 1);
355 if (start
>= total_segs
)
357 end
= find_next_zero_bit(prefree_map
, total_segs
, start
+ 1);
359 for (i
= start
; i
< end
; i
++)
360 clear_bit(i
, prefree_map
);
362 dirty_i
->nr_dirty
[PRE
] -= end
- start
;
364 if (!test_opt(sbi
, DISCARD
))
367 f2fs_issue_discard(sbi
, START_BLOCK(sbi
, start
),
368 (end
- start
) << sbi
->log_blocks_per_seg
);
370 mutex_unlock(&dirty_i
->seglist_lock
);
372 /* send small discards */
373 list_for_each_safe(this, next
, head
) {
374 entry
= list_entry(this, struct discard_entry
, list
);
375 f2fs_issue_discard(sbi
, entry
->blkaddr
, entry
->len
);
376 list_del(&entry
->list
);
377 SM_I(sbi
)->nr_discards
-= entry
->len
;
378 kmem_cache_free(discard_entry_slab
, entry
);
382 static void __mark_sit_entry_dirty(struct f2fs_sb_info
*sbi
, unsigned int segno
)
384 struct sit_info
*sit_i
= SIT_I(sbi
);
385 if (!__test_and_set_bit(segno
, sit_i
->dirty_sentries_bitmap
))
386 sit_i
->dirty_sentries
++;
389 static void __set_sit_entry_type(struct f2fs_sb_info
*sbi
, int type
,
390 unsigned int segno
, int modified
)
392 struct seg_entry
*se
= get_seg_entry(sbi
, segno
);
395 __mark_sit_entry_dirty(sbi
, segno
);
398 static void update_sit_entry(struct f2fs_sb_info
*sbi
, block_t blkaddr
, int del
)
400 struct seg_entry
*se
;
401 unsigned int segno
, offset
;
402 long int new_vblocks
;
404 segno
= GET_SEGNO(sbi
, blkaddr
);
406 se
= get_seg_entry(sbi
, segno
);
407 new_vblocks
= se
->valid_blocks
+ del
;
408 offset
= GET_SEGOFF_FROM_SEG0(sbi
, blkaddr
) & (sbi
->blocks_per_seg
- 1);
410 f2fs_bug_on((new_vblocks
>> (sizeof(unsigned short) << 3) ||
411 (new_vblocks
> sbi
->blocks_per_seg
)));
413 se
->valid_blocks
= new_vblocks
;
414 se
->mtime
= get_mtime(sbi
);
415 SIT_I(sbi
)->max_mtime
= se
->mtime
;
417 /* Update valid block bitmap */
419 if (f2fs_set_bit(offset
, se
->cur_valid_map
))
422 if (!f2fs_clear_bit(offset
, se
->cur_valid_map
))
425 if (!f2fs_test_bit(offset
, se
->ckpt_valid_map
))
426 se
->ckpt_valid_blocks
+= del
;
428 __mark_sit_entry_dirty(sbi
, segno
);
430 /* update total number of valid blocks to be written in ckpt area */
431 SIT_I(sbi
)->written_valid_blocks
+= del
;
433 if (sbi
->segs_per_sec
> 1)
434 get_sec_entry(sbi
, segno
)->valid_blocks
+= del
;
437 static void refresh_sit_entry(struct f2fs_sb_info
*sbi
,
438 block_t old_blkaddr
, block_t new_blkaddr
)
440 update_sit_entry(sbi
, new_blkaddr
, 1);
441 if (GET_SEGNO(sbi
, old_blkaddr
) != NULL_SEGNO
)
442 update_sit_entry(sbi
, old_blkaddr
, -1);
445 void invalidate_blocks(struct f2fs_sb_info
*sbi
, block_t addr
)
447 unsigned int segno
= GET_SEGNO(sbi
, addr
);
448 struct sit_info
*sit_i
= SIT_I(sbi
);
450 f2fs_bug_on(addr
== NULL_ADDR
);
451 if (addr
== NEW_ADDR
)
454 /* add it into sit main buffer */
455 mutex_lock(&sit_i
->sentry_lock
);
457 update_sit_entry(sbi
, addr
, -1);
459 /* add it into dirty seglist */
460 locate_dirty_segment(sbi
, segno
);
462 mutex_unlock(&sit_i
->sentry_lock
);
466 * This function should be resided under the curseg_mutex lock
468 static void __add_sum_entry(struct f2fs_sb_info
*sbi
, int type
,
469 struct f2fs_summary
*sum
)
471 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
472 void *addr
= curseg
->sum_blk
;
473 addr
+= curseg
->next_blkoff
* sizeof(struct f2fs_summary
);
474 memcpy(addr
, sum
, sizeof(struct f2fs_summary
));
478 * Calculate the number of current summary pages for writing
480 int npages_for_summary_flush(struct f2fs_sb_info
*sbi
)
482 int valid_sum_count
= 0;
485 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
486 if (sbi
->ckpt
->alloc_type
[i
] == SSR
)
487 valid_sum_count
+= sbi
->blocks_per_seg
;
489 valid_sum_count
+= curseg_blkoff(sbi
, i
);
492 sum_in_page
= (PAGE_CACHE_SIZE
- 2 * SUM_JOURNAL_SIZE
-
493 SUM_FOOTER_SIZE
) / SUMMARY_SIZE
;
494 if (valid_sum_count
<= sum_in_page
)
496 else if ((valid_sum_count
- sum_in_page
) <=
497 (PAGE_CACHE_SIZE
- SUM_FOOTER_SIZE
) / SUMMARY_SIZE
)
503 * Caller should put this summary page
505 struct page
*get_sum_page(struct f2fs_sb_info
*sbi
, unsigned int segno
)
507 return get_meta_page(sbi
, GET_SUM_BLOCK(sbi
, segno
));
510 static void write_sum_page(struct f2fs_sb_info
*sbi
,
511 struct f2fs_summary_block
*sum_blk
, block_t blk_addr
)
513 struct page
*page
= grab_meta_page(sbi
, blk_addr
);
514 void *kaddr
= page_address(page
);
515 memcpy(kaddr
, sum_blk
, PAGE_CACHE_SIZE
);
516 set_page_dirty(page
);
517 f2fs_put_page(page
, 1);
520 static int is_next_segment_free(struct f2fs_sb_info
*sbi
, int type
)
522 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
523 unsigned int segno
= curseg
->segno
+ 1;
524 struct free_segmap_info
*free_i
= FREE_I(sbi
);
526 if (segno
< TOTAL_SEGS(sbi
) && segno
% sbi
->segs_per_sec
)
527 return !test_bit(segno
, free_i
->free_segmap
);
532 * Find a new segment from the free segments bitmap to right order
533 * This function should be returned with success, otherwise BUG
535 static void get_new_segment(struct f2fs_sb_info
*sbi
,
536 unsigned int *newseg
, bool new_sec
, int dir
)
538 struct free_segmap_info
*free_i
= FREE_I(sbi
);
539 unsigned int segno
, secno
, zoneno
;
540 unsigned int total_zones
= TOTAL_SECS(sbi
) / sbi
->secs_per_zone
;
541 unsigned int hint
= *newseg
/ sbi
->segs_per_sec
;
542 unsigned int old_zoneno
= GET_ZONENO_FROM_SEGNO(sbi
, *newseg
);
543 unsigned int left_start
= hint
;
548 write_lock(&free_i
->segmap_lock
);
550 if (!new_sec
&& ((*newseg
+ 1) % sbi
->segs_per_sec
)) {
551 segno
= find_next_zero_bit(free_i
->free_segmap
,
552 TOTAL_SEGS(sbi
), *newseg
+ 1);
553 if (segno
- *newseg
< sbi
->segs_per_sec
-
554 (*newseg
% sbi
->segs_per_sec
))
558 secno
= find_next_zero_bit(free_i
->free_secmap
, TOTAL_SECS(sbi
), hint
);
559 if (secno
>= TOTAL_SECS(sbi
)) {
560 if (dir
== ALLOC_RIGHT
) {
561 secno
= find_next_zero_bit(free_i
->free_secmap
,
563 f2fs_bug_on(secno
>= TOTAL_SECS(sbi
));
566 left_start
= hint
- 1;
572 while (test_bit(left_start
, free_i
->free_secmap
)) {
573 if (left_start
> 0) {
577 left_start
= find_next_zero_bit(free_i
->free_secmap
,
579 f2fs_bug_on(left_start
>= TOTAL_SECS(sbi
));
585 segno
= secno
* sbi
->segs_per_sec
;
586 zoneno
= secno
/ sbi
->secs_per_zone
;
588 /* give up on finding another zone */
591 if (sbi
->secs_per_zone
== 1)
593 if (zoneno
== old_zoneno
)
595 if (dir
== ALLOC_LEFT
) {
596 if (!go_left
&& zoneno
+ 1 >= total_zones
)
598 if (go_left
&& zoneno
== 0)
601 for (i
= 0; i
< NR_CURSEG_TYPE
; i
++)
602 if (CURSEG_I(sbi
, i
)->zone
== zoneno
)
605 if (i
< NR_CURSEG_TYPE
) {
606 /* zone is in user, try another */
608 hint
= zoneno
* sbi
->secs_per_zone
- 1;
609 else if (zoneno
+ 1 >= total_zones
)
612 hint
= (zoneno
+ 1) * sbi
->secs_per_zone
;
614 goto find_other_zone
;
617 /* set it as dirty segment in free segmap */
618 f2fs_bug_on(test_bit(segno
, free_i
->free_segmap
));
619 __set_inuse(sbi
, segno
);
621 write_unlock(&free_i
->segmap_lock
);
624 static void reset_curseg(struct f2fs_sb_info
*sbi
, int type
, int modified
)
626 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
627 struct summary_footer
*sum_footer
;
629 curseg
->segno
= curseg
->next_segno
;
630 curseg
->zone
= GET_ZONENO_FROM_SEGNO(sbi
, curseg
->segno
);
631 curseg
->next_blkoff
= 0;
632 curseg
->next_segno
= NULL_SEGNO
;
634 sum_footer
= &(curseg
->sum_blk
->footer
);
635 memset(sum_footer
, 0, sizeof(struct summary_footer
));
636 if (IS_DATASEG(type
))
637 SET_SUM_TYPE(sum_footer
, SUM_TYPE_DATA
);
638 if (IS_NODESEG(type
))
639 SET_SUM_TYPE(sum_footer
, SUM_TYPE_NODE
);
640 __set_sit_entry_type(sbi
, type
, curseg
->segno
, modified
);
644 * Allocate a current working segment.
645 * This function always allocates a free segment in LFS manner.
647 static void new_curseg(struct f2fs_sb_info
*sbi
, int type
, bool new_sec
)
649 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
650 unsigned int segno
= curseg
->segno
;
651 int dir
= ALLOC_LEFT
;
653 write_sum_page(sbi
, curseg
->sum_blk
,
654 GET_SUM_BLOCK(sbi
, segno
));
655 if (type
== CURSEG_WARM_DATA
|| type
== CURSEG_COLD_DATA
)
658 if (test_opt(sbi
, NOHEAP
))
661 get_new_segment(sbi
, &segno
, new_sec
, dir
);
662 curseg
->next_segno
= segno
;
663 reset_curseg(sbi
, type
, 1);
664 curseg
->alloc_type
= LFS
;
667 static void __next_free_blkoff(struct f2fs_sb_info
*sbi
,
668 struct curseg_info
*seg
, block_t start
)
670 struct seg_entry
*se
= get_seg_entry(sbi
, seg
->segno
);
671 int entries
= SIT_VBLOCK_MAP_SIZE
/ sizeof(unsigned long);
672 unsigned long target_map
[entries
];
673 unsigned long *ckpt_map
= (unsigned long *)se
->ckpt_valid_map
;
674 unsigned long *cur_map
= (unsigned long *)se
->cur_valid_map
;
677 for (i
= 0; i
< entries
; i
++)
678 target_map
[i
] = ckpt_map
[i
] | cur_map
[i
];
680 pos
= __find_rev_next_zero_bit(target_map
, sbi
->blocks_per_seg
, start
);
682 seg
->next_blkoff
= pos
;
686 * If a segment is written by LFS manner, next block offset is just obtained
687 * by increasing the current block offset. However, if a segment is written by
688 * SSR manner, next block offset obtained by calling __next_free_blkoff
690 static void __refresh_next_blkoff(struct f2fs_sb_info
*sbi
,
691 struct curseg_info
*seg
)
693 if (seg
->alloc_type
== SSR
)
694 __next_free_blkoff(sbi
, seg
, seg
->next_blkoff
+ 1);
700 * This function always allocates a used segment (from dirty seglist) by SSR
701 * manner, so it should recover the existing segment information of valid blocks
703 static void change_curseg(struct f2fs_sb_info
*sbi
, int type
, bool reuse
)
705 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
706 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
707 unsigned int new_segno
= curseg
->next_segno
;
708 struct f2fs_summary_block
*sum_node
;
709 struct page
*sum_page
;
711 write_sum_page(sbi
, curseg
->sum_blk
,
712 GET_SUM_BLOCK(sbi
, curseg
->segno
));
713 __set_test_and_inuse(sbi
, new_segno
);
715 mutex_lock(&dirty_i
->seglist_lock
);
716 __remove_dirty_segment(sbi
, new_segno
, PRE
);
717 __remove_dirty_segment(sbi
, new_segno
, DIRTY
);
718 mutex_unlock(&dirty_i
->seglist_lock
);
720 reset_curseg(sbi
, type
, 1);
721 curseg
->alloc_type
= SSR
;
722 __next_free_blkoff(sbi
, curseg
, 0);
725 sum_page
= get_sum_page(sbi
, new_segno
);
726 sum_node
= (struct f2fs_summary_block
*)page_address(sum_page
);
727 memcpy(curseg
->sum_blk
, sum_node
, SUM_ENTRY_SIZE
);
728 f2fs_put_page(sum_page
, 1);
732 static int get_ssr_segment(struct f2fs_sb_info
*sbi
, int type
)
734 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
735 const struct victim_selection
*v_ops
= DIRTY_I(sbi
)->v_ops
;
737 if (IS_NODESEG(type
) || !has_not_enough_free_secs(sbi
, 0))
738 return v_ops
->get_victim(sbi
,
739 &(curseg
)->next_segno
, BG_GC
, type
, SSR
);
741 /* For data segments, let's do SSR more intensively */
742 for (; type
>= CURSEG_HOT_DATA
; type
--)
743 if (v_ops
->get_victim(sbi
, &(curseg
)->next_segno
,
750 * flush out current segment and replace it with new segment
751 * This function should be returned with success, otherwise BUG
753 static void allocate_segment_by_default(struct f2fs_sb_info
*sbi
,
754 int type
, bool force
)
756 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
759 new_curseg(sbi
, type
, true);
760 else if (type
== CURSEG_WARM_NODE
)
761 new_curseg(sbi
, type
, false);
762 else if (curseg
->alloc_type
== LFS
&& is_next_segment_free(sbi
, type
))
763 new_curseg(sbi
, type
, false);
764 else if (need_SSR(sbi
) && get_ssr_segment(sbi
, type
))
765 change_curseg(sbi
, type
, true);
767 new_curseg(sbi
, type
, false);
769 stat_inc_seg_type(sbi
, curseg
);
772 void allocate_new_segments(struct f2fs_sb_info
*sbi
)
774 struct curseg_info
*curseg
;
775 unsigned int old_curseg
;
778 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
779 curseg
= CURSEG_I(sbi
, i
);
780 old_curseg
= curseg
->segno
;
781 SIT_I(sbi
)->s_ops
->allocate_segment(sbi
, i
, true);
782 locate_dirty_segment(sbi
, old_curseg
);
786 static const struct segment_allocation default_salloc_ops
= {
787 .allocate_segment
= allocate_segment_by_default
,
790 static bool __has_curseg_space(struct f2fs_sb_info
*sbi
, int type
)
792 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
793 if (curseg
->next_blkoff
< sbi
->blocks_per_seg
)
798 static int __get_segment_type_2(struct page
*page
, enum page_type p_type
)
801 return CURSEG_HOT_DATA
;
803 return CURSEG_HOT_NODE
;
806 static int __get_segment_type_4(struct page
*page
, enum page_type p_type
)
808 if (p_type
== DATA
) {
809 struct inode
*inode
= page
->mapping
->host
;
811 if (S_ISDIR(inode
->i_mode
))
812 return CURSEG_HOT_DATA
;
814 return CURSEG_COLD_DATA
;
816 if (IS_DNODE(page
) && !is_cold_node(page
))
817 return CURSEG_HOT_NODE
;
819 return CURSEG_COLD_NODE
;
823 static int __get_segment_type_6(struct page
*page
, enum page_type p_type
)
825 if (p_type
== DATA
) {
826 struct inode
*inode
= page
->mapping
->host
;
828 if (S_ISDIR(inode
->i_mode
))
829 return CURSEG_HOT_DATA
;
830 else if (is_cold_data(page
) || file_is_cold(inode
))
831 return CURSEG_COLD_DATA
;
833 return CURSEG_WARM_DATA
;
836 return is_cold_node(page
) ? CURSEG_WARM_NODE
:
839 return CURSEG_COLD_NODE
;
843 static int __get_segment_type(struct page
*page
, enum page_type p_type
)
845 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
846 switch (sbi
->active_logs
) {
848 return __get_segment_type_2(page
, p_type
);
850 return __get_segment_type_4(page
, p_type
);
852 /* NR_CURSEG_TYPE(6) logs by default */
853 f2fs_bug_on(sbi
->active_logs
!= NR_CURSEG_TYPE
);
854 return __get_segment_type_6(page
, p_type
);
857 void allocate_data_block(struct f2fs_sb_info
*sbi
, struct page
*page
,
858 block_t old_blkaddr
, block_t
*new_blkaddr
,
859 struct f2fs_summary
*sum
, int type
)
861 struct sit_info
*sit_i
= SIT_I(sbi
);
862 struct curseg_info
*curseg
;
863 unsigned int old_cursegno
;
865 curseg
= CURSEG_I(sbi
, type
);
867 mutex_lock(&curseg
->curseg_mutex
);
869 *new_blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
870 old_cursegno
= curseg
->segno
;
873 * __add_sum_entry should be resided under the curseg_mutex
874 * because, this function updates a summary entry in the
875 * current summary block.
877 __add_sum_entry(sbi
, type
, sum
);
879 mutex_lock(&sit_i
->sentry_lock
);
880 __refresh_next_blkoff(sbi
, curseg
);
882 stat_inc_block_count(sbi
, curseg
);
885 * SIT information should be updated before segment allocation,
886 * since SSR needs latest valid block information.
888 refresh_sit_entry(sbi
, old_blkaddr
, *new_blkaddr
);
890 if (!__has_curseg_space(sbi
, type
))
891 sit_i
->s_ops
->allocate_segment(sbi
, type
, false);
893 locate_dirty_segment(sbi
, old_cursegno
);
894 locate_dirty_segment(sbi
, GET_SEGNO(sbi
, old_blkaddr
));
895 mutex_unlock(&sit_i
->sentry_lock
);
897 if (page
&& IS_NODESEG(type
))
898 fill_node_footer_blkaddr(page
, NEXT_FREE_BLKADDR(sbi
, curseg
));
900 mutex_unlock(&curseg
->curseg_mutex
);
903 static void do_write_page(struct f2fs_sb_info
*sbi
, struct page
*page
,
904 block_t old_blkaddr
, block_t
*new_blkaddr
,
905 struct f2fs_summary
*sum
, struct f2fs_io_info
*fio
)
907 int type
= __get_segment_type(page
, fio
->type
);
909 allocate_data_block(sbi
, page
, old_blkaddr
, new_blkaddr
, sum
, type
);
911 /* writeout dirty page into bdev */
912 f2fs_submit_page_mbio(sbi
, page
, *new_blkaddr
, fio
);
915 void write_meta_page(struct f2fs_sb_info
*sbi
, struct page
*page
)
917 struct f2fs_io_info fio
= {
919 .rw
= WRITE_SYNC
| REQ_META
| REQ_PRIO
922 set_page_writeback(page
);
923 f2fs_submit_page_mbio(sbi
, page
, page
->index
, &fio
);
926 void write_node_page(struct f2fs_sb_info
*sbi
, struct page
*page
,
927 struct f2fs_io_info
*fio
,
928 unsigned int nid
, block_t old_blkaddr
, block_t
*new_blkaddr
)
930 struct f2fs_summary sum
;
931 set_summary(&sum
, nid
, 0, 0);
932 do_write_page(sbi
, page
, old_blkaddr
, new_blkaddr
, &sum
, fio
);
935 void write_data_page(struct page
*page
, struct dnode_of_data
*dn
,
936 block_t
*new_blkaddr
, struct f2fs_io_info
*fio
)
938 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
939 struct f2fs_summary sum
;
942 f2fs_bug_on(dn
->data_blkaddr
== NULL_ADDR
);
943 get_node_info(sbi
, dn
->nid
, &ni
);
944 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
946 do_write_page(sbi
, page
, dn
->data_blkaddr
, new_blkaddr
, &sum
, fio
);
949 void rewrite_data_page(struct page
*page
, block_t old_blkaddr
,
950 struct f2fs_io_info
*fio
)
952 struct inode
*inode
= page
->mapping
->host
;
953 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
954 f2fs_submit_page_mbio(sbi
, page
, old_blkaddr
, fio
);
957 void recover_data_page(struct f2fs_sb_info
*sbi
,
958 struct page
*page
, struct f2fs_summary
*sum
,
959 block_t old_blkaddr
, block_t new_blkaddr
)
961 struct sit_info
*sit_i
= SIT_I(sbi
);
962 struct curseg_info
*curseg
;
963 unsigned int segno
, old_cursegno
;
964 struct seg_entry
*se
;
967 segno
= GET_SEGNO(sbi
, new_blkaddr
);
968 se
= get_seg_entry(sbi
, segno
);
971 if (se
->valid_blocks
== 0 && !IS_CURSEG(sbi
, segno
)) {
972 if (old_blkaddr
== NULL_ADDR
)
973 type
= CURSEG_COLD_DATA
;
975 type
= CURSEG_WARM_DATA
;
977 curseg
= CURSEG_I(sbi
, type
);
979 mutex_lock(&curseg
->curseg_mutex
);
980 mutex_lock(&sit_i
->sentry_lock
);
982 old_cursegno
= curseg
->segno
;
984 /* change the current segment */
985 if (segno
!= curseg
->segno
) {
986 curseg
->next_segno
= segno
;
987 change_curseg(sbi
, type
, true);
990 curseg
->next_blkoff
= GET_SEGOFF_FROM_SEG0(sbi
, new_blkaddr
) &
991 (sbi
->blocks_per_seg
- 1);
992 __add_sum_entry(sbi
, type
, sum
);
994 refresh_sit_entry(sbi
, old_blkaddr
, new_blkaddr
);
996 locate_dirty_segment(sbi
, old_cursegno
);
997 locate_dirty_segment(sbi
, GET_SEGNO(sbi
, old_blkaddr
));
999 mutex_unlock(&sit_i
->sentry_lock
);
1000 mutex_unlock(&curseg
->curseg_mutex
);
1003 void rewrite_node_page(struct f2fs_sb_info
*sbi
,
1004 struct page
*page
, struct f2fs_summary
*sum
,
1005 block_t old_blkaddr
, block_t new_blkaddr
)
1007 struct sit_info
*sit_i
= SIT_I(sbi
);
1008 int type
= CURSEG_WARM_NODE
;
1009 struct curseg_info
*curseg
;
1010 unsigned int segno
, old_cursegno
;
1011 block_t next_blkaddr
= next_blkaddr_of_node(page
);
1012 unsigned int next_segno
= GET_SEGNO(sbi
, next_blkaddr
);
1013 struct f2fs_io_info fio
= {
1018 curseg
= CURSEG_I(sbi
, type
);
1020 mutex_lock(&curseg
->curseg_mutex
);
1021 mutex_lock(&sit_i
->sentry_lock
);
1023 segno
= GET_SEGNO(sbi
, new_blkaddr
);
1024 old_cursegno
= curseg
->segno
;
1026 /* change the current segment */
1027 if (segno
!= curseg
->segno
) {
1028 curseg
->next_segno
= segno
;
1029 change_curseg(sbi
, type
, true);
1031 curseg
->next_blkoff
= GET_SEGOFF_FROM_SEG0(sbi
, new_blkaddr
) &
1032 (sbi
->blocks_per_seg
- 1);
1033 __add_sum_entry(sbi
, type
, sum
);
1035 /* change the current log to the next block addr in advance */
1036 if (next_segno
!= segno
) {
1037 curseg
->next_segno
= next_segno
;
1038 change_curseg(sbi
, type
, true);
1040 curseg
->next_blkoff
= GET_SEGOFF_FROM_SEG0(sbi
, next_blkaddr
) &
1041 (sbi
->blocks_per_seg
- 1);
1043 /* rewrite node page */
1044 set_page_writeback(page
);
1045 f2fs_submit_page_mbio(sbi
, page
, new_blkaddr
, &fio
);
1046 f2fs_submit_merged_bio(sbi
, NODE
, WRITE
);
1047 refresh_sit_entry(sbi
, old_blkaddr
, new_blkaddr
);
1049 locate_dirty_segment(sbi
, old_cursegno
);
1050 locate_dirty_segment(sbi
, GET_SEGNO(sbi
, old_blkaddr
));
1052 mutex_unlock(&sit_i
->sentry_lock
);
1053 mutex_unlock(&curseg
->curseg_mutex
);
1056 void f2fs_wait_on_page_writeback(struct page
*page
,
1057 enum page_type type
)
1059 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
1060 if (PageWriteback(page
)) {
1061 f2fs_submit_merged_bio(sbi
, type
, WRITE
);
1062 wait_on_page_writeback(page
);
1066 static int read_compacted_summaries(struct f2fs_sb_info
*sbi
)
1068 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1069 struct curseg_info
*seg_i
;
1070 unsigned char *kaddr
;
1075 start
= start_sum_block(sbi
);
1077 page
= get_meta_page(sbi
, start
++);
1078 kaddr
= (unsigned char *)page_address(page
);
1080 /* Step 1: restore nat cache */
1081 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1082 memcpy(&seg_i
->sum_blk
->n_nats
, kaddr
, SUM_JOURNAL_SIZE
);
1084 /* Step 2: restore sit cache */
1085 seg_i
= CURSEG_I(sbi
, CURSEG_COLD_DATA
);
1086 memcpy(&seg_i
->sum_blk
->n_sits
, kaddr
+ SUM_JOURNAL_SIZE
,
1088 offset
= 2 * SUM_JOURNAL_SIZE
;
1090 /* Step 3: restore summary entries */
1091 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
1092 unsigned short blk_off
;
1095 seg_i
= CURSEG_I(sbi
, i
);
1096 segno
= le32_to_cpu(ckpt
->cur_data_segno
[i
]);
1097 blk_off
= le16_to_cpu(ckpt
->cur_data_blkoff
[i
]);
1098 seg_i
->next_segno
= segno
;
1099 reset_curseg(sbi
, i
, 0);
1100 seg_i
->alloc_type
= ckpt
->alloc_type
[i
];
1101 seg_i
->next_blkoff
= blk_off
;
1103 if (seg_i
->alloc_type
== SSR
)
1104 blk_off
= sbi
->blocks_per_seg
;
1106 for (j
= 0; j
< blk_off
; j
++) {
1107 struct f2fs_summary
*s
;
1108 s
= (struct f2fs_summary
*)(kaddr
+ offset
);
1109 seg_i
->sum_blk
->entries
[j
] = *s
;
1110 offset
+= SUMMARY_SIZE
;
1111 if (offset
+ SUMMARY_SIZE
<= PAGE_CACHE_SIZE
-
1115 f2fs_put_page(page
, 1);
1118 page
= get_meta_page(sbi
, start
++);
1119 kaddr
= (unsigned char *)page_address(page
);
1123 f2fs_put_page(page
, 1);
1127 static int read_normal_summaries(struct f2fs_sb_info
*sbi
, int type
)
1129 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1130 struct f2fs_summary_block
*sum
;
1131 struct curseg_info
*curseg
;
1133 unsigned short blk_off
;
1134 unsigned int segno
= 0;
1135 block_t blk_addr
= 0;
1137 /* get segment number and block addr */
1138 if (IS_DATASEG(type
)) {
1139 segno
= le32_to_cpu(ckpt
->cur_data_segno
[type
]);
1140 blk_off
= le16_to_cpu(ckpt
->cur_data_blkoff
[type
-
1142 if (is_set_ckpt_flags(ckpt
, CP_UMOUNT_FLAG
))
1143 blk_addr
= sum_blk_addr(sbi
, NR_CURSEG_TYPE
, type
);
1145 blk_addr
= sum_blk_addr(sbi
, NR_CURSEG_DATA_TYPE
, type
);
1147 segno
= le32_to_cpu(ckpt
->cur_node_segno
[type
-
1149 blk_off
= le16_to_cpu(ckpt
->cur_node_blkoff
[type
-
1151 if (is_set_ckpt_flags(ckpt
, CP_UMOUNT_FLAG
))
1152 blk_addr
= sum_blk_addr(sbi
, NR_CURSEG_NODE_TYPE
,
1153 type
- CURSEG_HOT_NODE
);
1155 blk_addr
= GET_SUM_BLOCK(sbi
, segno
);
1158 new = get_meta_page(sbi
, blk_addr
);
1159 sum
= (struct f2fs_summary_block
*)page_address(new);
1161 if (IS_NODESEG(type
)) {
1162 if (is_set_ckpt_flags(ckpt
, CP_UMOUNT_FLAG
)) {
1163 struct f2fs_summary
*ns
= &sum
->entries
[0];
1165 for (i
= 0; i
< sbi
->blocks_per_seg
; i
++, ns
++) {
1167 ns
->ofs_in_node
= 0;
1170 if (restore_node_summary(sbi
, segno
, sum
)) {
1171 f2fs_put_page(new, 1);
1177 /* set uncompleted segment to curseg */
1178 curseg
= CURSEG_I(sbi
, type
);
1179 mutex_lock(&curseg
->curseg_mutex
);
1180 memcpy(curseg
->sum_blk
, sum
, PAGE_CACHE_SIZE
);
1181 curseg
->next_segno
= segno
;
1182 reset_curseg(sbi
, type
, 0);
1183 curseg
->alloc_type
= ckpt
->alloc_type
[type
];
1184 curseg
->next_blkoff
= blk_off
;
1185 mutex_unlock(&curseg
->curseg_mutex
);
1186 f2fs_put_page(new, 1);
1190 static int restore_curseg_summaries(struct f2fs_sb_info
*sbi
)
1192 int type
= CURSEG_HOT_DATA
;
1194 if (is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_COMPACT_SUM_FLAG
)) {
1195 /* restore for compacted data summary */
1196 if (read_compacted_summaries(sbi
))
1198 type
= CURSEG_HOT_NODE
;
1201 for (; type
<= CURSEG_COLD_NODE
; type
++)
1202 if (read_normal_summaries(sbi
, type
))
1207 static void write_compacted_summaries(struct f2fs_sb_info
*sbi
, block_t blkaddr
)
1210 unsigned char *kaddr
;
1211 struct f2fs_summary
*summary
;
1212 struct curseg_info
*seg_i
;
1213 int written_size
= 0;
1216 page
= grab_meta_page(sbi
, blkaddr
++);
1217 kaddr
= (unsigned char *)page_address(page
);
1219 /* Step 1: write nat cache */
1220 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1221 memcpy(kaddr
, &seg_i
->sum_blk
->n_nats
, SUM_JOURNAL_SIZE
);
1222 written_size
+= SUM_JOURNAL_SIZE
;
1224 /* Step 2: write sit cache */
1225 seg_i
= CURSEG_I(sbi
, CURSEG_COLD_DATA
);
1226 memcpy(kaddr
+ written_size
, &seg_i
->sum_blk
->n_sits
,
1228 written_size
+= SUM_JOURNAL_SIZE
;
1230 /* Step 3: write summary entries */
1231 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
1232 unsigned short blkoff
;
1233 seg_i
= CURSEG_I(sbi
, i
);
1234 if (sbi
->ckpt
->alloc_type
[i
] == SSR
)
1235 blkoff
= sbi
->blocks_per_seg
;
1237 blkoff
= curseg_blkoff(sbi
, i
);
1239 for (j
= 0; j
< blkoff
; j
++) {
1241 page
= grab_meta_page(sbi
, blkaddr
++);
1242 kaddr
= (unsigned char *)page_address(page
);
1245 summary
= (struct f2fs_summary
*)(kaddr
+ written_size
);
1246 *summary
= seg_i
->sum_blk
->entries
[j
];
1247 written_size
+= SUMMARY_SIZE
;
1249 if (written_size
+ SUMMARY_SIZE
<= PAGE_CACHE_SIZE
-
1253 set_page_dirty(page
);
1254 f2fs_put_page(page
, 1);
1259 set_page_dirty(page
);
1260 f2fs_put_page(page
, 1);
1264 static void write_normal_summaries(struct f2fs_sb_info
*sbi
,
1265 block_t blkaddr
, int type
)
1268 if (IS_DATASEG(type
))
1269 end
= type
+ NR_CURSEG_DATA_TYPE
;
1271 end
= type
+ NR_CURSEG_NODE_TYPE
;
1273 for (i
= type
; i
< end
; i
++) {
1274 struct curseg_info
*sum
= CURSEG_I(sbi
, i
);
1275 mutex_lock(&sum
->curseg_mutex
);
1276 write_sum_page(sbi
, sum
->sum_blk
, blkaddr
+ (i
- type
));
1277 mutex_unlock(&sum
->curseg_mutex
);
1281 void write_data_summaries(struct f2fs_sb_info
*sbi
, block_t start_blk
)
1283 if (is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_COMPACT_SUM_FLAG
))
1284 write_compacted_summaries(sbi
, start_blk
);
1286 write_normal_summaries(sbi
, start_blk
, CURSEG_HOT_DATA
);
1289 void write_node_summaries(struct f2fs_sb_info
*sbi
, block_t start_blk
)
1291 if (is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_UMOUNT_FLAG
))
1292 write_normal_summaries(sbi
, start_blk
, CURSEG_HOT_NODE
);
1295 int lookup_journal_in_cursum(struct f2fs_summary_block
*sum
, int type
,
1296 unsigned int val
, int alloc
)
1300 if (type
== NAT_JOURNAL
) {
1301 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1302 if (le32_to_cpu(nid_in_journal(sum
, i
)) == val
)
1305 if (alloc
&& nats_in_cursum(sum
) < NAT_JOURNAL_ENTRIES
)
1306 return update_nats_in_cursum(sum
, 1);
1307 } else if (type
== SIT_JOURNAL
) {
1308 for (i
= 0; i
< sits_in_cursum(sum
); i
++)
1309 if (le32_to_cpu(segno_in_journal(sum
, i
)) == val
)
1311 if (alloc
&& sits_in_cursum(sum
) < SIT_JOURNAL_ENTRIES
)
1312 return update_sits_in_cursum(sum
, 1);
1317 static struct page
*get_current_sit_page(struct f2fs_sb_info
*sbi
,
1320 struct sit_info
*sit_i
= SIT_I(sbi
);
1321 unsigned int offset
= SIT_BLOCK_OFFSET(sit_i
, segno
);
1322 block_t blk_addr
= sit_i
->sit_base_addr
+ offset
;
1324 check_seg_range(sbi
, segno
);
1326 /* calculate sit block address */
1327 if (f2fs_test_bit(offset
, sit_i
->sit_bitmap
))
1328 blk_addr
+= sit_i
->sit_blocks
;
1330 return get_meta_page(sbi
, blk_addr
);
1333 static struct page
*get_next_sit_page(struct f2fs_sb_info
*sbi
,
1336 struct sit_info
*sit_i
= SIT_I(sbi
);
1337 struct page
*src_page
, *dst_page
;
1338 pgoff_t src_off
, dst_off
;
1339 void *src_addr
, *dst_addr
;
1341 src_off
= current_sit_addr(sbi
, start
);
1342 dst_off
= next_sit_addr(sbi
, src_off
);
1344 /* get current sit block page without lock */
1345 src_page
= get_meta_page(sbi
, src_off
);
1346 dst_page
= grab_meta_page(sbi
, dst_off
);
1347 f2fs_bug_on(PageDirty(src_page
));
1349 src_addr
= page_address(src_page
);
1350 dst_addr
= page_address(dst_page
);
1351 memcpy(dst_addr
, src_addr
, PAGE_CACHE_SIZE
);
1353 set_page_dirty(dst_page
);
1354 f2fs_put_page(src_page
, 1);
1356 set_to_next_sit(sit_i
, start
);
1361 static bool flush_sits_in_journal(struct f2fs_sb_info
*sbi
)
1363 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_COLD_DATA
);
1364 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1368 * If the journal area in the current summary is full of sit entries,
1369 * all the sit entries will be flushed. Otherwise the sit entries
1370 * are not able to replace with newly hot sit entries.
1372 if (sits_in_cursum(sum
) >= SIT_JOURNAL_ENTRIES
) {
1373 for (i
= sits_in_cursum(sum
) - 1; i
>= 0; i
--) {
1375 segno
= le32_to_cpu(segno_in_journal(sum
, i
));
1376 __mark_sit_entry_dirty(sbi
, segno
);
1378 update_sits_in_cursum(sum
, -sits_in_cursum(sum
));
1385 * CP calls this function, which flushes SIT entries including sit_journal,
1386 * and moves prefree segs to free segs.
1388 void flush_sit_entries(struct f2fs_sb_info
*sbi
)
1390 struct sit_info
*sit_i
= SIT_I(sbi
);
1391 unsigned long *bitmap
= sit_i
->dirty_sentries_bitmap
;
1392 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_COLD_DATA
);
1393 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1394 unsigned long nsegs
= TOTAL_SEGS(sbi
);
1395 struct page
*page
= NULL
;
1396 struct f2fs_sit_block
*raw_sit
= NULL
;
1397 unsigned int start
= 0, end
= 0;
1398 unsigned int segno
= -1;
1401 mutex_lock(&curseg
->curseg_mutex
);
1402 mutex_lock(&sit_i
->sentry_lock
);
1405 * "flushed" indicates whether sit entries in journal are flushed
1406 * to the SIT area or not.
1408 flushed
= flush_sits_in_journal(sbi
);
1410 while ((segno
= find_next_bit(bitmap
, nsegs
, segno
+ 1)) < nsegs
) {
1411 struct seg_entry
*se
= get_seg_entry(sbi
, segno
);
1412 int sit_offset
, offset
;
1414 sit_offset
= SIT_ENTRY_OFFSET(sit_i
, segno
);
1416 /* add discard candidates */
1417 if (SM_I(sbi
)->nr_discards
< SM_I(sbi
)->max_discards
)
1418 add_discard_addrs(sbi
, segno
, se
);
1423 offset
= lookup_journal_in_cursum(sum
, SIT_JOURNAL
, segno
, 1);
1425 segno_in_journal(sum
, offset
) = cpu_to_le32(segno
);
1426 seg_info_to_raw_sit(se
, &sit_in_journal(sum
, offset
));
1430 if (!page
|| (start
> segno
) || (segno
> end
)) {
1432 f2fs_put_page(page
, 1);
1436 start
= START_SEGNO(sit_i
, segno
);
1437 end
= start
+ SIT_ENTRY_PER_BLOCK
- 1;
1439 /* read sit block that will be updated */
1440 page
= get_next_sit_page(sbi
, start
);
1441 raw_sit
= page_address(page
);
1444 /* udpate entry in SIT block */
1445 seg_info_to_raw_sit(se
, &raw_sit
->entries
[sit_offset
]);
1447 __clear_bit(segno
, bitmap
);
1448 sit_i
->dirty_sentries
--;
1450 mutex_unlock(&sit_i
->sentry_lock
);
1451 mutex_unlock(&curseg
->curseg_mutex
);
1453 /* writeout last modified SIT block */
1454 f2fs_put_page(page
, 1);
1456 set_prefree_as_free_segments(sbi
);
1459 static int build_sit_info(struct f2fs_sb_info
*sbi
)
1461 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
1462 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1463 struct sit_info
*sit_i
;
1464 unsigned int sit_segs
, start
;
1465 char *src_bitmap
, *dst_bitmap
;
1466 unsigned int bitmap_size
;
1468 /* allocate memory for SIT information */
1469 sit_i
= kzalloc(sizeof(struct sit_info
), GFP_KERNEL
);
1473 SM_I(sbi
)->sit_info
= sit_i
;
1475 sit_i
->sentries
= vzalloc(TOTAL_SEGS(sbi
) * sizeof(struct seg_entry
));
1476 if (!sit_i
->sentries
)
1479 bitmap_size
= f2fs_bitmap_size(TOTAL_SEGS(sbi
));
1480 sit_i
->dirty_sentries_bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
1481 if (!sit_i
->dirty_sentries_bitmap
)
1484 for (start
= 0; start
< TOTAL_SEGS(sbi
); start
++) {
1485 sit_i
->sentries
[start
].cur_valid_map
1486 = kzalloc(SIT_VBLOCK_MAP_SIZE
, GFP_KERNEL
);
1487 sit_i
->sentries
[start
].ckpt_valid_map
1488 = kzalloc(SIT_VBLOCK_MAP_SIZE
, GFP_KERNEL
);
1489 if (!sit_i
->sentries
[start
].cur_valid_map
1490 || !sit_i
->sentries
[start
].ckpt_valid_map
)
1494 if (sbi
->segs_per_sec
> 1) {
1495 sit_i
->sec_entries
= vzalloc(TOTAL_SECS(sbi
) *
1496 sizeof(struct sec_entry
));
1497 if (!sit_i
->sec_entries
)
1501 /* get information related with SIT */
1502 sit_segs
= le32_to_cpu(raw_super
->segment_count_sit
) >> 1;
1504 /* setup SIT bitmap from ckeckpoint pack */
1505 bitmap_size
= __bitmap_size(sbi
, SIT_BITMAP
);
1506 src_bitmap
= __bitmap_ptr(sbi
, SIT_BITMAP
);
1508 dst_bitmap
= kmemdup(src_bitmap
, bitmap_size
, GFP_KERNEL
);
1512 /* init SIT information */
1513 sit_i
->s_ops
= &default_salloc_ops
;
1515 sit_i
->sit_base_addr
= le32_to_cpu(raw_super
->sit_blkaddr
);
1516 sit_i
->sit_blocks
= sit_segs
<< sbi
->log_blocks_per_seg
;
1517 sit_i
->written_valid_blocks
= le64_to_cpu(ckpt
->valid_block_count
);
1518 sit_i
->sit_bitmap
= dst_bitmap
;
1519 sit_i
->bitmap_size
= bitmap_size
;
1520 sit_i
->dirty_sentries
= 0;
1521 sit_i
->sents_per_block
= SIT_ENTRY_PER_BLOCK
;
1522 sit_i
->elapsed_time
= le64_to_cpu(sbi
->ckpt
->elapsed_time
);
1523 sit_i
->mounted_time
= CURRENT_TIME_SEC
.tv_sec
;
1524 mutex_init(&sit_i
->sentry_lock
);
1528 static int build_free_segmap(struct f2fs_sb_info
*sbi
)
1530 struct f2fs_sm_info
*sm_info
= SM_I(sbi
);
1531 struct free_segmap_info
*free_i
;
1532 unsigned int bitmap_size
, sec_bitmap_size
;
1534 /* allocate memory for free segmap information */
1535 free_i
= kzalloc(sizeof(struct free_segmap_info
), GFP_KERNEL
);
1539 SM_I(sbi
)->free_info
= free_i
;
1541 bitmap_size
= f2fs_bitmap_size(TOTAL_SEGS(sbi
));
1542 free_i
->free_segmap
= kmalloc(bitmap_size
, GFP_KERNEL
);
1543 if (!free_i
->free_segmap
)
1546 sec_bitmap_size
= f2fs_bitmap_size(TOTAL_SECS(sbi
));
1547 free_i
->free_secmap
= kmalloc(sec_bitmap_size
, GFP_KERNEL
);
1548 if (!free_i
->free_secmap
)
1551 /* set all segments as dirty temporarily */
1552 memset(free_i
->free_segmap
, 0xff, bitmap_size
);
1553 memset(free_i
->free_secmap
, 0xff, sec_bitmap_size
);
1555 /* init free segmap information */
1556 free_i
->start_segno
=
1557 (unsigned int) GET_SEGNO_FROM_SEG0(sbi
, sm_info
->main_blkaddr
);
1558 free_i
->free_segments
= 0;
1559 free_i
->free_sections
= 0;
1560 rwlock_init(&free_i
->segmap_lock
);
1564 static int build_curseg(struct f2fs_sb_info
*sbi
)
1566 struct curseg_info
*array
;
1569 array
= kzalloc(sizeof(*array
) * NR_CURSEG_TYPE
, GFP_KERNEL
);
1573 SM_I(sbi
)->curseg_array
= array
;
1575 for (i
= 0; i
< NR_CURSEG_TYPE
; i
++) {
1576 mutex_init(&array
[i
].curseg_mutex
);
1577 array
[i
].sum_blk
= kzalloc(PAGE_CACHE_SIZE
, GFP_KERNEL
);
1578 if (!array
[i
].sum_blk
)
1580 array
[i
].segno
= NULL_SEGNO
;
1581 array
[i
].next_blkoff
= 0;
1583 return restore_curseg_summaries(sbi
);
1586 static int ra_sit_pages(struct f2fs_sb_info
*sbi
, int start
, int nrpages
)
1588 struct address_space
*mapping
= META_MAPPING(sbi
);
1590 block_t blk_addr
, prev_blk_addr
= 0;
1591 int sit_blk_cnt
= SIT_BLK_CNT(sbi
);
1593 struct f2fs_io_info fio
= {
1595 .rw
= READ_SYNC
| REQ_META
| REQ_PRIO
1598 for (; blkno
< start
+ nrpages
&& blkno
< sit_blk_cnt
; blkno
++) {
1600 blk_addr
= current_sit_addr(sbi
, blkno
* SIT_ENTRY_PER_BLOCK
);
1602 if (blkno
!= start
&& prev_blk_addr
+ 1 != blk_addr
)
1604 prev_blk_addr
= blk_addr
;
1606 page
= grab_cache_page(mapping
, blk_addr
);
1611 if (PageUptodate(page
)) {
1612 mark_page_accessed(page
);
1613 f2fs_put_page(page
, 1);
1617 f2fs_submit_page_mbio(sbi
, page
, blk_addr
, &fio
);
1619 mark_page_accessed(page
);
1620 f2fs_put_page(page
, 0);
1623 f2fs_submit_merged_bio(sbi
, META
, READ
);
1624 return blkno
- start
;
1627 static void build_sit_entries(struct f2fs_sb_info
*sbi
)
1629 struct sit_info
*sit_i
= SIT_I(sbi
);
1630 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_COLD_DATA
);
1631 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1632 int sit_blk_cnt
= SIT_BLK_CNT(sbi
);
1633 unsigned int i
, start
, end
;
1634 unsigned int readed
, start_blk
= 0;
1635 int nrpages
= MAX_BIO_BLOCKS(max_hw_blocks(sbi
));
1638 readed
= ra_sit_pages(sbi
, start_blk
, nrpages
);
1640 start
= start_blk
* sit_i
->sents_per_block
;
1641 end
= (start_blk
+ readed
) * sit_i
->sents_per_block
;
1643 for (; start
< end
&& start
< TOTAL_SEGS(sbi
); start
++) {
1644 struct seg_entry
*se
= &sit_i
->sentries
[start
];
1645 struct f2fs_sit_block
*sit_blk
;
1646 struct f2fs_sit_entry sit
;
1649 mutex_lock(&curseg
->curseg_mutex
);
1650 for (i
= 0; i
< sits_in_cursum(sum
); i
++) {
1651 if (le32_to_cpu(segno_in_journal(sum
, i
))
1653 sit
= sit_in_journal(sum
, i
);
1654 mutex_unlock(&curseg
->curseg_mutex
);
1658 mutex_unlock(&curseg
->curseg_mutex
);
1660 page
= get_current_sit_page(sbi
, start
);
1661 sit_blk
= (struct f2fs_sit_block
*)page_address(page
);
1662 sit
= sit_blk
->entries
[SIT_ENTRY_OFFSET(sit_i
, start
)];
1663 f2fs_put_page(page
, 1);
1665 check_block_count(sbi
, start
, &sit
);
1666 seg_info_from_raw_sit(se
, &sit
);
1667 if (sbi
->segs_per_sec
> 1) {
1668 struct sec_entry
*e
= get_sec_entry(sbi
, start
);
1669 e
->valid_blocks
+= se
->valid_blocks
;
1672 start_blk
+= readed
;
1673 } while (start_blk
< sit_blk_cnt
);
1676 static void init_free_segmap(struct f2fs_sb_info
*sbi
)
1681 for (start
= 0; start
< TOTAL_SEGS(sbi
); start
++) {
1682 struct seg_entry
*sentry
= get_seg_entry(sbi
, start
);
1683 if (!sentry
->valid_blocks
)
1684 __set_free(sbi
, start
);
1687 /* set use the current segments */
1688 for (type
= CURSEG_HOT_DATA
; type
<= CURSEG_COLD_NODE
; type
++) {
1689 struct curseg_info
*curseg_t
= CURSEG_I(sbi
, type
);
1690 __set_test_and_inuse(sbi
, curseg_t
->segno
);
1694 static void init_dirty_segmap(struct f2fs_sb_info
*sbi
)
1696 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
1697 struct free_segmap_info
*free_i
= FREE_I(sbi
);
1698 unsigned int segno
= 0, offset
= 0, total_segs
= TOTAL_SEGS(sbi
);
1699 unsigned short valid_blocks
;
1702 /* find dirty segment based on free segmap */
1703 segno
= find_next_inuse(free_i
, total_segs
, offset
);
1704 if (segno
>= total_segs
)
1707 valid_blocks
= get_valid_blocks(sbi
, segno
, 0);
1708 if (valid_blocks
>= sbi
->blocks_per_seg
|| !valid_blocks
)
1710 mutex_lock(&dirty_i
->seglist_lock
);
1711 __locate_dirty_segment(sbi
, segno
, DIRTY
);
1712 mutex_unlock(&dirty_i
->seglist_lock
);
1716 static int init_victim_secmap(struct f2fs_sb_info
*sbi
)
1718 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
1719 unsigned int bitmap_size
= f2fs_bitmap_size(TOTAL_SECS(sbi
));
1721 dirty_i
->victim_secmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
1722 if (!dirty_i
->victim_secmap
)
1727 static int build_dirty_segmap(struct f2fs_sb_info
*sbi
)
1729 struct dirty_seglist_info
*dirty_i
;
1730 unsigned int bitmap_size
, i
;
1732 /* allocate memory for dirty segments list information */
1733 dirty_i
= kzalloc(sizeof(struct dirty_seglist_info
), GFP_KERNEL
);
1737 SM_I(sbi
)->dirty_info
= dirty_i
;
1738 mutex_init(&dirty_i
->seglist_lock
);
1740 bitmap_size
= f2fs_bitmap_size(TOTAL_SEGS(sbi
));
1742 for (i
= 0; i
< NR_DIRTY_TYPE
; i
++) {
1743 dirty_i
->dirty_segmap
[i
] = kzalloc(bitmap_size
, GFP_KERNEL
);
1744 if (!dirty_i
->dirty_segmap
[i
])
1748 init_dirty_segmap(sbi
);
1749 return init_victim_secmap(sbi
);
1753 * Update min, max modified time for cost-benefit GC algorithm
1755 static void init_min_max_mtime(struct f2fs_sb_info
*sbi
)
1757 struct sit_info
*sit_i
= SIT_I(sbi
);
1760 mutex_lock(&sit_i
->sentry_lock
);
1762 sit_i
->min_mtime
= LLONG_MAX
;
1764 for (segno
= 0; segno
< TOTAL_SEGS(sbi
); segno
+= sbi
->segs_per_sec
) {
1766 unsigned long long mtime
= 0;
1768 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
1769 mtime
+= get_seg_entry(sbi
, segno
+ i
)->mtime
;
1771 mtime
= div_u64(mtime
, sbi
->segs_per_sec
);
1773 if (sit_i
->min_mtime
> mtime
)
1774 sit_i
->min_mtime
= mtime
;
1776 sit_i
->max_mtime
= get_mtime(sbi
);
1777 mutex_unlock(&sit_i
->sentry_lock
);
1780 int build_segment_manager(struct f2fs_sb_info
*sbi
)
1782 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
1783 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1784 struct f2fs_sm_info
*sm_info
;
1787 sm_info
= kzalloc(sizeof(struct f2fs_sm_info
), GFP_KERNEL
);
1792 sbi
->sm_info
= sm_info
;
1793 INIT_LIST_HEAD(&sm_info
->wblist_head
);
1794 spin_lock_init(&sm_info
->wblist_lock
);
1795 sm_info
->seg0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
1796 sm_info
->main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
1797 sm_info
->segment_count
= le32_to_cpu(raw_super
->segment_count
);
1798 sm_info
->reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
1799 sm_info
->ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
1800 sm_info
->main_segments
= le32_to_cpu(raw_super
->segment_count_main
);
1801 sm_info
->ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
1802 sm_info
->rec_prefree_segments
= DEF_RECLAIM_PREFREE_SEGMENTS
;
1803 sm_info
->ipu_policy
= F2FS_IPU_DISABLE
;
1804 sm_info
->min_ipu_util
= DEF_MIN_IPU_UTIL
;
1806 INIT_LIST_HEAD(&sm_info
->discard_list
);
1807 sm_info
->nr_discards
= 0;
1808 sm_info
->max_discards
= 0;
1810 err
= build_sit_info(sbi
);
1813 err
= build_free_segmap(sbi
);
1816 err
= build_curseg(sbi
);
1820 /* reinit free segmap based on SIT */
1821 build_sit_entries(sbi
);
1823 init_free_segmap(sbi
);
1824 err
= build_dirty_segmap(sbi
);
1828 init_min_max_mtime(sbi
);
1832 static void discard_dirty_segmap(struct f2fs_sb_info
*sbi
,
1833 enum dirty_type dirty_type
)
1835 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
1837 mutex_lock(&dirty_i
->seglist_lock
);
1838 kfree(dirty_i
->dirty_segmap
[dirty_type
]);
1839 dirty_i
->nr_dirty
[dirty_type
] = 0;
1840 mutex_unlock(&dirty_i
->seglist_lock
);
1843 static void destroy_victim_secmap(struct f2fs_sb_info
*sbi
)
1845 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
1846 kfree(dirty_i
->victim_secmap
);
1849 static void destroy_dirty_segmap(struct f2fs_sb_info
*sbi
)
1851 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
1857 /* discard pre-free/dirty segments list */
1858 for (i
= 0; i
< NR_DIRTY_TYPE
; i
++)
1859 discard_dirty_segmap(sbi
, i
);
1861 destroy_victim_secmap(sbi
);
1862 SM_I(sbi
)->dirty_info
= NULL
;
1866 static void destroy_curseg(struct f2fs_sb_info
*sbi
)
1868 struct curseg_info
*array
= SM_I(sbi
)->curseg_array
;
1873 SM_I(sbi
)->curseg_array
= NULL
;
1874 for (i
= 0; i
< NR_CURSEG_TYPE
; i
++)
1875 kfree(array
[i
].sum_blk
);
1879 static void destroy_free_segmap(struct f2fs_sb_info
*sbi
)
1881 struct free_segmap_info
*free_i
= SM_I(sbi
)->free_info
;
1884 SM_I(sbi
)->free_info
= NULL
;
1885 kfree(free_i
->free_segmap
);
1886 kfree(free_i
->free_secmap
);
1890 static void destroy_sit_info(struct f2fs_sb_info
*sbi
)
1892 struct sit_info
*sit_i
= SIT_I(sbi
);
1898 if (sit_i
->sentries
) {
1899 for (start
= 0; start
< TOTAL_SEGS(sbi
); start
++) {
1900 kfree(sit_i
->sentries
[start
].cur_valid_map
);
1901 kfree(sit_i
->sentries
[start
].ckpt_valid_map
);
1904 vfree(sit_i
->sentries
);
1905 vfree(sit_i
->sec_entries
);
1906 kfree(sit_i
->dirty_sentries_bitmap
);
1908 SM_I(sbi
)->sit_info
= NULL
;
1909 kfree(sit_i
->sit_bitmap
);
1913 void destroy_segment_manager(struct f2fs_sb_info
*sbi
)
1915 struct f2fs_sm_info
*sm_info
= SM_I(sbi
);
1918 destroy_dirty_segmap(sbi
);
1919 destroy_curseg(sbi
);
1920 destroy_free_segmap(sbi
);
1921 destroy_sit_info(sbi
);
1922 sbi
->sm_info
= NULL
;
1926 int __init
create_segment_manager_caches(void)
1928 discard_entry_slab
= f2fs_kmem_cache_create("discard_entry",
1929 sizeof(struct discard_entry
), NULL
);
1930 if (!discard_entry_slab
)
1935 void destroy_segment_manager_caches(void)
1937 kmem_cache_destroy(discard_entry_slab
);