dm thin metadata: fix __udivdi3 undefined on 32-bit
[linux/fpc-iii.git] / fs / f2fs / segment.c
blob2bba0c4ef4b79f5eb5bf4784cc8e0e77f14a65be
1 /*
2 * fs/f2fs/segment.c
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/kthread.h>
17 #include <linux/swap.h>
18 #include <linux/timer.h>
20 #include "f2fs.h"
21 #include "segment.h"
22 #include "node.h"
23 #include "trace.h"
24 #include <trace/events/f2fs.h>
26 #define __reverse_ffz(x) __reverse_ffs(~(x))
28 static struct kmem_cache *discard_entry_slab;
29 static struct kmem_cache *sit_entry_set_slab;
30 static struct kmem_cache *inmem_entry_slab;
32 static unsigned long __reverse_ulong(unsigned char *str)
34 unsigned long tmp = 0;
35 int shift = 24, idx = 0;
37 #if BITS_PER_LONG == 64
38 shift = 56;
39 #endif
40 while (shift >= 0) {
41 tmp |= (unsigned long)str[idx++] << shift;
42 shift -= BITS_PER_BYTE;
44 return tmp;
48 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
49 * MSB and LSB are reversed in a byte by f2fs_set_bit.
51 static inline unsigned long __reverse_ffs(unsigned long word)
53 int num = 0;
55 #if BITS_PER_LONG == 64
56 if ((word & 0xffffffff00000000UL) == 0)
57 num += 32;
58 else
59 word >>= 32;
60 #endif
61 if ((word & 0xffff0000) == 0)
62 num += 16;
63 else
64 word >>= 16;
66 if ((word & 0xff00) == 0)
67 num += 8;
68 else
69 word >>= 8;
71 if ((word & 0xf0) == 0)
72 num += 4;
73 else
74 word >>= 4;
76 if ((word & 0xc) == 0)
77 num += 2;
78 else
79 word >>= 2;
81 if ((word & 0x2) == 0)
82 num += 1;
83 return num;
87 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
88 * f2fs_set_bit makes MSB and LSB reversed in a byte.
89 * Example:
90 * MSB <--> LSB
91 * f2fs_set_bit(0, bitmap) => 1000 0000
92 * f2fs_set_bit(7, bitmap) => 0000 0001
94 static unsigned long __find_rev_next_bit(const unsigned long *addr,
95 unsigned long size, unsigned long offset)
97 const unsigned long *p = addr + BIT_WORD(offset);
98 unsigned long result = offset & ~(BITS_PER_LONG - 1);
99 unsigned long tmp;
101 if (offset >= size)
102 return size;
104 size -= result;
105 offset %= BITS_PER_LONG;
106 if (!offset)
107 goto aligned;
109 tmp = __reverse_ulong((unsigned char *)p);
110 tmp &= ~0UL >> offset;
112 if (size < BITS_PER_LONG)
113 goto found_first;
114 if (tmp)
115 goto found_middle;
117 size -= BITS_PER_LONG;
118 result += BITS_PER_LONG;
119 p++;
120 aligned:
121 while (size & ~(BITS_PER_LONG-1)) {
122 tmp = __reverse_ulong((unsigned char *)p);
123 if (tmp)
124 goto found_middle;
125 result += BITS_PER_LONG;
126 size -= BITS_PER_LONG;
127 p++;
129 if (!size)
130 return result;
132 tmp = __reverse_ulong((unsigned char *)p);
133 found_first:
134 tmp &= (~0UL << (BITS_PER_LONG - size));
135 if (!tmp) /* Are any bits set? */
136 return result + size; /* Nope. */
137 found_middle:
138 return result + __reverse_ffs(tmp);
141 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
142 unsigned long size, unsigned long offset)
144 const unsigned long *p = addr + BIT_WORD(offset);
145 unsigned long result = offset & ~(BITS_PER_LONG - 1);
146 unsigned long tmp;
148 if (offset >= size)
149 return size;
151 size -= result;
152 offset %= BITS_PER_LONG;
153 if (!offset)
154 goto aligned;
156 tmp = __reverse_ulong((unsigned char *)p);
157 tmp |= ~((~0UL << offset) >> offset);
159 if (size < BITS_PER_LONG)
160 goto found_first;
161 if (tmp != ~0UL)
162 goto found_middle;
164 size -= BITS_PER_LONG;
165 result += BITS_PER_LONG;
166 p++;
167 aligned:
168 while (size & ~(BITS_PER_LONG - 1)) {
169 tmp = __reverse_ulong((unsigned char *)p);
170 if (tmp != ~0UL)
171 goto found_middle;
172 result += BITS_PER_LONG;
173 size -= BITS_PER_LONG;
174 p++;
176 if (!size)
177 return result;
179 tmp = __reverse_ulong((unsigned char *)p);
180 found_first:
181 tmp |= ~(~0UL << (BITS_PER_LONG - size));
182 if (tmp == ~0UL) /* Are any bits zero? */
183 return result + size; /* Nope. */
184 found_middle:
185 return result + __reverse_ffz(tmp);
188 void register_inmem_page(struct inode *inode, struct page *page)
190 struct f2fs_inode_info *fi = F2FS_I(inode);
191 struct inmem_pages *new;
193 f2fs_trace_pid(page);
195 set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
196 SetPagePrivate(page);
198 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
200 /* add atomic page indices to the list */
201 new->page = page;
202 INIT_LIST_HEAD(&new->list);
204 /* increase reference count with clean state */
205 mutex_lock(&fi->inmem_lock);
206 get_page(page);
207 list_add_tail(&new->list, &fi->inmem_pages);
208 inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
209 mutex_unlock(&fi->inmem_lock);
211 trace_f2fs_register_inmem_page(page, INMEM);
214 int commit_inmem_pages(struct inode *inode, bool abort)
216 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
217 struct f2fs_inode_info *fi = F2FS_I(inode);
218 struct inmem_pages *cur, *tmp;
219 bool submit_bio = false;
220 struct f2fs_io_info fio = {
221 .sbi = sbi,
222 .type = DATA,
223 .rw = WRITE_SYNC | REQ_PRIO,
224 .encrypted_page = NULL,
226 int err = 0;
229 * The abort is true only when f2fs_evict_inode is called.
230 * Basically, the f2fs_evict_inode doesn't produce any data writes, so
231 * that we don't need to call f2fs_balance_fs.
232 * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this
233 * inode becomes free by iget_locked in f2fs_iget.
235 if (!abort) {
236 f2fs_balance_fs(sbi);
237 f2fs_lock_op(sbi);
240 mutex_lock(&fi->inmem_lock);
241 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
242 lock_page(cur->page);
243 if (!abort) {
244 if (cur->page->mapping == inode->i_mapping) {
245 set_page_dirty(cur->page);
246 f2fs_wait_on_page_writeback(cur->page, DATA);
247 if (clear_page_dirty_for_io(cur->page))
248 inode_dec_dirty_pages(inode);
249 trace_f2fs_commit_inmem_page(cur->page, INMEM);
250 fio.page = cur->page;
251 err = do_write_data_page(&fio);
252 if (err) {
253 unlock_page(cur->page);
254 break;
256 clear_cold_data(cur->page);
257 submit_bio = true;
259 } else {
260 trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
262 set_page_private(cur->page, 0);
263 ClearPagePrivate(cur->page);
264 f2fs_put_page(cur->page, 1);
266 list_del(&cur->list);
267 kmem_cache_free(inmem_entry_slab, cur);
268 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
270 mutex_unlock(&fi->inmem_lock);
272 if (!abort) {
273 f2fs_unlock_op(sbi);
274 if (submit_bio)
275 f2fs_submit_merged_bio(sbi, DATA, WRITE);
277 return err;
281 * This function balances dirty node and dentry pages.
282 * In addition, it controls garbage collection.
284 void f2fs_balance_fs(struct f2fs_sb_info *sbi)
287 * We should do GC or end up with checkpoint, if there are so many dirty
288 * dir/node pages without enough free segments.
290 if (has_not_enough_free_secs(sbi, 0)) {
291 mutex_lock(&sbi->gc_mutex);
292 f2fs_gc(sbi, false);
296 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
298 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
299 return;
301 /* try to shrink extent cache when there is no enough memory */
302 if (!available_free_memory(sbi, EXTENT_CACHE))
303 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
305 /* check the # of cached NAT entries */
306 if (!available_free_memory(sbi, NAT_ENTRIES))
307 try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
309 if (!available_free_memory(sbi, FREE_NIDS))
310 try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
312 /* checkpoint is the only way to shrink partial cached entries */
313 if (!available_free_memory(sbi, NAT_ENTRIES) ||
314 excess_prefree_segs(sbi) ||
315 !available_free_memory(sbi, INO_ENTRIES) ||
316 jiffies > sbi->cp_expires)
317 f2fs_sync_fs(sbi->sb, true);
320 static int issue_flush_thread(void *data)
322 struct f2fs_sb_info *sbi = data;
323 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
324 wait_queue_head_t *q = &fcc->flush_wait_queue;
325 repeat:
326 if (kthread_should_stop())
327 return 0;
329 if (!llist_empty(&fcc->issue_list)) {
330 struct bio *bio;
331 struct flush_cmd *cmd, *next;
332 int ret;
334 bio = f2fs_bio_alloc(0);
336 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
337 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
339 bio->bi_bdev = sbi->sb->s_bdev;
340 ret = submit_bio_wait(WRITE_FLUSH, bio);
342 llist_for_each_entry_safe(cmd, next,
343 fcc->dispatch_list, llnode) {
344 cmd->ret = ret;
345 complete(&cmd->wait);
347 bio_put(bio);
348 fcc->dispatch_list = NULL;
351 wait_event_interruptible(*q,
352 kthread_should_stop() || !llist_empty(&fcc->issue_list));
353 goto repeat;
356 int f2fs_issue_flush(struct f2fs_sb_info *sbi)
358 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
359 struct flush_cmd cmd;
361 trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
362 test_opt(sbi, FLUSH_MERGE));
364 if (test_opt(sbi, NOBARRIER))
365 return 0;
367 if (!test_opt(sbi, FLUSH_MERGE)) {
368 struct bio *bio = f2fs_bio_alloc(0);
369 int ret;
371 bio->bi_bdev = sbi->sb->s_bdev;
372 ret = submit_bio_wait(WRITE_FLUSH, bio);
373 bio_put(bio);
374 return ret;
377 init_completion(&cmd.wait);
379 llist_add(&cmd.llnode, &fcc->issue_list);
381 if (!fcc->dispatch_list)
382 wake_up(&fcc->flush_wait_queue);
384 wait_for_completion(&cmd.wait);
386 return cmd.ret;
389 int create_flush_cmd_control(struct f2fs_sb_info *sbi)
391 dev_t dev = sbi->sb->s_bdev->bd_dev;
392 struct flush_cmd_control *fcc;
393 int err = 0;
395 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
396 if (!fcc)
397 return -ENOMEM;
398 init_waitqueue_head(&fcc->flush_wait_queue);
399 init_llist_head(&fcc->issue_list);
400 SM_I(sbi)->cmd_control_info = fcc;
401 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
402 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
403 if (IS_ERR(fcc->f2fs_issue_flush)) {
404 err = PTR_ERR(fcc->f2fs_issue_flush);
405 kfree(fcc);
406 SM_I(sbi)->cmd_control_info = NULL;
407 return err;
410 return err;
413 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
415 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
417 if (fcc && fcc->f2fs_issue_flush)
418 kthread_stop(fcc->f2fs_issue_flush);
419 kfree(fcc);
420 SM_I(sbi)->cmd_control_info = NULL;
423 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
424 enum dirty_type dirty_type)
426 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
428 /* need not be added */
429 if (IS_CURSEG(sbi, segno))
430 return;
432 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
433 dirty_i->nr_dirty[dirty_type]++;
435 if (dirty_type == DIRTY) {
436 struct seg_entry *sentry = get_seg_entry(sbi, segno);
437 enum dirty_type t = sentry->type;
439 if (unlikely(t >= DIRTY)) {
440 f2fs_bug_on(sbi, 1);
441 return;
443 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
444 dirty_i->nr_dirty[t]++;
448 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
449 enum dirty_type dirty_type)
451 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
453 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
454 dirty_i->nr_dirty[dirty_type]--;
456 if (dirty_type == DIRTY) {
457 struct seg_entry *sentry = get_seg_entry(sbi, segno);
458 enum dirty_type t = sentry->type;
460 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
461 dirty_i->nr_dirty[t]--;
463 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
464 clear_bit(GET_SECNO(sbi, segno),
465 dirty_i->victim_secmap);
470 * Should not occur error such as -ENOMEM.
471 * Adding dirty entry into seglist is not critical operation.
472 * If a given segment is one of current working segments, it won't be added.
474 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
476 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
477 unsigned short valid_blocks;
479 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
480 return;
482 mutex_lock(&dirty_i->seglist_lock);
484 valid_blocks = get_valid_blocks(sbi, segno, 0);
486 if (valid_blocks == 0) {
487 __locate_dirty_segment(sbi, segno, PRE);
488 __remove_dirty_segment(sbi, segno, DIRTY);
489 } else if (valid_blocks < sbi->blocks_per_seg) {
490 __locate_dirty_segment(sbi, segno, DIRTY);
491 } else {
492 /* Recovery routine with SSR needs this */
493 __remove_dirty_segment(sbi, segno, DIRTY);
496 mutex_unlock(&dirty_i->seglist_lock);
499 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
500 block_t blkstart, block_t blklen)
502 sector_t start = SECTOR_FROM_BLOCK(blkstart);
503 sector_t len = SECTOR_FROM_BLOCK(blklen);
504 struct seg_entry *se;
505 unsigned int offset;
506 block_t i;
508 for (i = blkstart; i < blkstart + blklen; i++) {
509 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
510 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
512 if (!f2fs_test_and_set_bit(offset, se->discard_map))
513 sbi->discard_blks--;
515 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
516 return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
519 bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
521 int err = -ENOTSUPP;
523 if (test_opt(sbi, DISCARD)) {
524 struct seg_entry *se = get_seg_entry(sbi,
525 GET_SEGNO(sbi, blkaddr));
526 unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
528 if (f2fs_test_bit(offset, se->discard_map))
529 return false;
531 err = f2fs_issue_discard(sbi, blkaddr, 1);
534 if (err) {
535 update_meta_page(sbi, NULL, blkaddr);
536 return true;
538 return false;
541 static void __add_discard_entry(struct f2fs_sb_info *sbi,
542 struct cp_control *cpc, struct seg_entry *se,
543 unsigned int start, unsigned int end)
545 struct list_head *head = &SM_I(sbi)->discard_list;
546 struct discard_entry *new, *last;
548 if (!list_empty(head)) {
549 last = list_last_entry(head, struct discard_entry, list);
550 if (START_BLOCK(sbi, cpc->trim_start) + start ==
551 last->blkaddr + last->len) {
552 last->len += end - start;
553 goto done;
557 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
558 INIT_LIST_HEAD(&new->list);
559 new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
560 new->len = end - start;
561 list_add_tail(&new->list, head);
562 done:
563 SM_I(sbi)->nr_discards += end - start;
566 static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
568 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
569 int max_blocks = sbi->blocks_per_seg;
570 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
571 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
572 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
573 unsigned long *discard_map = (unsigned long *)se->discard_map;
574 unsigned long *dmap = SIT_I(sbi)->tmp_map;
575 unsigned int start = 0, end = -1;
576 bool force = (cpc->reason == CP_DISCARD);
577 int i;
579 if (se->valid_blocks == max_blocks)
580 return;
582 if (!force) {
583 if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
584 SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)
585 return;
588 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
589 for (i = 0; i < entries; i++)
590 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
591 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
593 while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
594 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
595 if (start >= max_blocks)
596 break;
598 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
599 __add_discard_entry(sbi, cpc, se, start, end);
603 void release_discard_addrs(struct f2fs_sb_info *sbi)
605 struct list_head *head = &(SM_I(sbi)->discard_list);
606 struct discard_entry *entry, *this;
608 /* drop caches */
609 list_for_each_entry_safe(entry, this, head, list) {
610 list_del(&entry->list);
611 kmem_cache_free(discard_entry_slab, entry);
616 * Should call clear_prefree_segments after checkpoint is done.
618 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
620 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
621 unsigned int segno;
623 mutex_lock(&dirty_i->seglist_lock);
624 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
625 __set_test_and_free(sbi, segno);
626 mutex_unlock(&dirty_i->seglist_lock);
629 void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
631 struct list_head *head = &(SM_I(sbi)->discard_list);
632 struct discard_entry *entry, *this;
633 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
634 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
635 unsigned int start = 0, end = -1;
637 mutex_lock(&dirty_i->seglist_lock);
639 while (1) {
640 int i;
641 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
642 if (start >= MAIN_SEGS(sbi))
643 break;
644 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
645 start + 1);
647 for (i = start; i < end; i++)
648 clear_bit(i, prefree_map);
650 dirty_i->nr_dirty[PRE] -= end - start;
652 if (!test_opt(sbi, DISCARD))
653 continue;
655 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
656 (end - start) << sbi->log_blocks_per_seg);
658 mutex_unlock(&dirty_i->seglist_lock);
660 /* send small discards */
661 list_for_each_entry_safe(entry, this, head, list) {
662 if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen)
663 goto skip;
664 f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
665 cpc->trimmed += entry->len;
666 skip:
667 list_del(&entry->list);
668 SM_I(sbi)->nr_discards -= entry->len;
669 kmem_cache_free(discard_entry_slab, entry);
673 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
675 struct sit_info *sit_i = SIT_I(sbi);
677 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
678 sit_i->dirty_sentries++;
679 return false;
682 return true;
685 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
686 unsigned int segno, int modified)
688 struct seg_entry *se = get_seg_entry(sbi, segno);
689 se->type = type;
690 if (modified)
691 __mark_sit_entry_dirty(sbi, segno);
694 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
696 struct seg_entry *se;
697 unsigned int segno, offset;
698 long int new_vblocks;
700 segno = GET_SEGNO(sbi, blkaddr);
702 se = get_seg_entry(sbi, segno);
703 new_vblocks = se->valid_blocks + del;
704 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
706 f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
707 (new_vblocks > sbi->blocks_per_seg)));
709 se->valid_blocks = new_vblocks;
710 se->mtime = get_mtime(sbi);
711 SIT_I(sbi)->max_mtime = se->mtime;
713 /* Update valid block bitmap */
714 if (del > 0) {
715 if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
716 f2fs_bug_on(sbi, 1);
717 if (!f2fs_test_and_set_bit(offset, se->discard_map))
718 sbi->discard_blks--;
719 } else {
720 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
721 f2fs_bug_on(sbi, 1);
722 if (f2fs_test_and_clear_bit(offset, se->discard_map))
723 sbi->discard_blks++;
725 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
726 se->ckpt_valid_blocks += del;
728 __mark_sit_entry_dirty(sbi, segno);
730 /* update total number of valid blocks to be written in ckpt area */
731 SIT_I(sbi)->written_valid_blocks += del;
733 if (sbi->segs_per_sec > 1)
734 get_sec_entry(sbi, segno)->valid_blocks += del;
737 void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
739 update_sit_entry(sbi, new, 1);
740 if (GET_SEGNO(sbi, old) != NULL_SEGNO)
741 update_sit_entry(sbi, old, -1);
743 locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
744 locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
747 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
749 unsigned int segno = GET_SEGNO(sbi, addr);
750 struct sit_info *sit_i = SIT_I(sbi);
752 f2fs_bug_on(sbi, addr == NULL_ADDR);
753 if (addr == NEW_ADDR)
754 return;
756 /* add it into sit main buffer */
757 mutex_lock(&sit_i->sentry_lock);
759 update_sit_entry(sbi, addr, -1);
761 /* add it into dirty seglist */
762 locate_dirty_segment(sbi, segno);
764 mutex_unlock(&sit_i->sentry_lock);
767 bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
769 struct sit_info *sit_i = SIT_I(sbi);
770 unsigned int segno, offset;
771 struct seg_entry *se;
772 bool is_cp = false;
774 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
775 return true;
777 mutex_lock(&sit_i->sentry_lock);
779 segno = GET_SEGNO(sbi, blkaddr);
780 se = get_seg_entry(sbi, segno);
781 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
783 if (f2fs_test_bit(offset, se->ckpt_valid_map))
784 is_cp = true;
786 mutex_unlock(&sit_i->sentry_lock);
788 return is_cp;
792 * This function should be resided under the curseg_mutex lock
794 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
795 struct f2fs_summary *sum)
797 struct curseg_info *curseg = CURSEG_I(sbi, type);
798 void *addr = curseg->sum_blk;
799 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
800 memcpy(addr, sum, sizeof(struct f2fs_summary));
804 * Calculate the number of current summary pages for writing
806 int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
808 int valid_sum_count = 0;
809 int i, sum_in_page;
811 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
812 if (sbi->ckpt->alloc_type[i] == SSR)
813 valid_sum_count += sbi->blocks_per_seg;
814 else {
815 if (for_ra)
816 valid_sum_count += le16_to_cpu(
817 F2FS_CKPT(sbi)->cur_data_blkoff[i]);
818 else
819 valid_sum_count += curseg_blkoff(sbi, i);
823 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
824 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
825 if (valid_sum_count <= sum_in_page)
826 return 1;
827 else if ((valid_sum_count - sum_in_page) <=
828 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
829 return 2;
830 return 3;
834 * Caller should put this summary page
836 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
838 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
841 void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
843 struct page *page = grab_meta_page(sbi, blk_addr);
844 void *dst = page_address(page);
846 if (src)
847 memcpy(dst, src, PAGE_CACHE_SIZE);
848 else
849 memset(dst, 0, PAGE_CACHE_SIZE);
850 set_page_dirty(page);
851 f2fs_put_page(page, 1);
854 static void write_sum_page(struct f2fs_sb_info *sbi,
855 struct f2fs_summary_block *sum_blk, block_t blk_addr)
857 update_meta_page(sbi, (void *)sum_blk, blk_addr);
860 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
862 struct curseg_info *curseg = CURSEG_I(sbi, type);
863 unsigned int segno = curseg->segno + 1;
864 struct free_segmap_info *free_i = FREE_I(sbi);
866 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
867 return !test_bit(segno, free_i->free_segmap);
868 return 0;
872 * Find a new segment from the free segments bitmap to right order
873 * This function should be returned with success, otherwise BUG
875 static void get_new_segment(struct f2fs_sb_info *sbi,
876 unsigned int *newseg, bool new_sec, int dir)
878 struct free_segmap_info *free_i = FREE_I(sbi);
879 unsigned int segno, secno, zoneno;
880 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
881 unsigned int hint = *newseg / sbi->segs_per_sec;
882 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
883 unsigned int left_start = hint;
884 bool init = true;
885 int go_left = 0;
886 int i;
888 spin_lock(&free_i->segmap_lock);
890 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
891 segno = find_next_zero_bit(free_i->free_segmap,
892 MAIN_SEGS(sbi), *newseg + 1);
893 if (segno - *newseg < sbi->segs_per_sec -
894 (*newseg % sbi->segs_per_sec))
895 goto got_it;
897 find_other_zone:
898 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
899 if (secno >= MAIN_SECS(sbi)) {
900 if (dir == ALLOC_RIGHT) {
901 secno = find_next_zero_bit(free_i->free_secmap,
902 MAIN_SECS(sbi), 0);
903 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
904 } else {
905 go_left = 1;
906 left_start = hint - 1;
909 if (go_left == 0)
910 goto skip_left;
912 while (test_bit(left_start, free_i->free_secmap)) {
913 if (left_start > 0) {
914 left_start--;
915 continue;
917 left_start = find_next_zero_bit(free_i->free_secmap,
918 MAIN_SECS(sbi), 0);
919 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
920 break;
922 secno = left_start;
923 skip_left:
924 hint = secno;
925 segno = secno * sbi->segs_per_sec;
926 zoneno = secno / sbi->secs_per_zone;
928 /* give up on finding another zone */
929 if (!init)
930 goto got_it;
931 if (sbi->secs_per_zone == 1)
932 goto got_it;
933 if (zoneno == old_zoneno)
934 goto got_it;
935 if (dir == ALLOC_LEFT) {
936 if (!go_left && zoneno + 1 >= total_zones)
937 goto got_it;
938 if (go_left && zoneno == 0)
939 goto got_it;
941 for (i = 0; i < NR_CURSEG_TYPE; i++)
942 if (CURSEG_I(sbi, i)->zone == zoneno)
943 break;
945 if (i < NR_CURSEG_TYPE) {
946 /* zone is in user, try another */
947 if (go_left)
948 hint = zoneno * sbi->secs_per_zone - 1;
949 else if (zoneno + 1 >= total_zones)
950 hint = 0;
951 else
952 hint = (zoneno + 1) * sbi->secs_per_zone;
953 init = false;
954 goto find_other_zone;
956 got_it:
957 /* set it as dirty segment in free segmap */
958 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
959 __set_inuse(sbi, segno);
960 *newseg = segno;
961 spin_unlock(&free_i->segmap_lock);
964 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
966 struct curseg_info *curseg = CURSEG_I(sbi, type);
967 struct summary_footer *sum_footer;
969 curseg->segno = curseg->next_segno;
970 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
971 curseg->next_blkoff = 0;
972 curseg->next_segno = NULL_SEGNO;
974 sum_footer = &(curseg->sum_blk->footer);
975 memset(sum_footer, 0, sizeof(struct summary_footer));
976 if (IS_DATASEG(type))
977 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
978 if (IS_NODESEG(type))
979 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
980 __set_sit_entry_type(sbi, type, curseg->segno, modified);
984 * Allocate a current working segment.
985 * This function always allocates a free segment in LFS manner.
987 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
989 struct curseg_info *curseg = CURSEG_I(sbi, type);
990 unsigned int segno = curseg->segno;
991 int dir = ALLOC_LEFT;
993 write_sum_page(sbi, curseg->sum_blk,
994 GET_SUM_BLOCK(sbi, segno));
995 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
996 dir = ALLOC_RIGHT;
998 if (test_opt(sbi, NOHEAP))
999 dir = ALLOC_RIGHT;
1001 get_new_segment(sbi, &segno, new_sec, dir);
1002 curseg->next_segno = segno;
1003 reset_curseg(sbi, type, 1);
1004 curseg->alloc_type = LFS;
1007 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
1008 struct curseg_info *seg, block_t start)
1010 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
1011 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1012 unsigned long *target_map = SIT_I(sbi)->tmp_map;
1013 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1014 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1015 int i, pos;
1017 for (i = 0; i < entries; i++)
1018 target_map[i] = ckpt_map[i] | cur_map[i];
1020 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
1022 seg->next_blkoff = pos;
1026 * If a segment is written by LFS manner, next block offset is just obtained
1027 * by increasing the current block offset. However, if a segment is written by
1028 * SSR manner, next block offset obtained by calling __next_free_blkoff
1030 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
1031 struct curseg_info *seg)
1033 if (seg->alloc_type == SSR)
1034 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
1035 else
1036 seg->next_blkoff++;
1040 * This function always allocates a used segment(from dirty seglist) by SSR
1041 * manner, so it should recover the existing segment information of valid blocks
1043 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
1045 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1046 struct curseg_info *curseg = CURSEG_I(sbi, type);
1047 unsigned int new_segno = curseg->next_segno;
1048 struct f2fs_summary_block *sum_node;
1049 struct page *sum_page;
1051 write_sum_page(sbi, curseg->sum_blk,
1052 GET_SUM_BLOCK(sbi, curseg->segno));
1053 __set_test_and_inuse(sbi, new_segno);
1055 mutex_lock(&dirty_i->seglist_lock);
1056 __remove_dirty_segment(sbi, new_segno, PRE);
1057 __remove_dirty_segment(sbi, new_segno, DIRTY);
1058 mutex_unlock(&dirty_i->seglist_lock);
1060 reset_curseg(sbi, type, 1);
1061 curseg->alloc_type = SSR;
1062 __next_free_blkoff(sbi, curseg, 0);
1064 if (reuse) {
1065 sum_page = get_sum_page(sbi, new_segno);
1066 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
1067 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
1068 f2fs_put_page(sum_page, 1);
1072 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
1074 struct curseg_info *curseg = CURSEG_I(sbi, type);
1075 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
1077 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
1078 return v_ops->get_victim(sbi,
1079 &(curseg)->next_segno, BG_GC, type, SSR);
1081 /* For data segments, let's do SSR more intensively */
1082 for (; type >= CURSEG_HOT_DATA; type--)
1083 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
1084 BG_GC, type, SSR))
1085 return 1;
1086 return 0;
1090 * flush out current segment and replace it with new segment
1091 * This function should be returned with success, otherwise BUG
1093 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
1094 int type, bool force)
1096 struct curseg_info *curseg = CURSEG_I(sbi, type);
1098 if (force)
1099 new_curseg(sbi, type, true);
1100 else if (type == CURSEG_WARM_NODE)
1101 new_curseg(sbi, type, false);
1102 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
1103 new_curseg(sbi, type, false);
1104 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
1105 change_curseg(sbi, type, true);
1106 else
1107 new_curseg(sbi, type, false);
1109 stat_inc_seg_type(sbi, curseg);
1112 static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
1114 struct curseg_info *curseg = CURSEG_I(sbi, type);
1115 unsigned int old_segno;
1117 old_segno = curseg->segno;
1118 SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
1119 locate_dirty_segment(sbi, old_segno);
1122 void allocate_new_segments(struct f2fs_sb_info *sbi)
1124 int i;
1126 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
1127 __allocate_new_segments(sbi, i);
1130 static const struct segment_allocation default_salloc_ops = {
1131 .allocate_segment = allocate_segment_by_default,
1134 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
1136 __u64 start = F2FS_BYTES_TO_BLK(range->start);
1137 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
1138 unsigned int start_segno, end_segno;
1139 struct cp_control cpc;
1141 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
1142 return -EINVAL;
1144 cpc.trimmed = 0;
1145 if (end <= MAIN_BLKADDR(sbi))
1146 goto out;
1148 /* start/end segment number in main_area */
1149 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
1150 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
1151 GET_SEGNO(sbi, end);
1152 cpc.reason = CP_DISCARD;
1153 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
1155 /* do checkpoint to issue discard commands safely */
1156 for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
1157 cpc.trim_start = start_segno;
1159 if (sbi->discard_blks == 0)
1160 break;
1161 else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
1162 cpc.trim_end = end_segno;
1163 else
1164 cpc.trim_end = min_t(unsigned int,
1165 rounddown(start_segno +
1166 BATCHED_TRIM_SEGMENTS(sbi),
1167 sbi->segs_per_sec) - 1, end_segno);
1169 mutex_lock(&sbi->gc_mutex);
1170 write_checkpoint(sbi, &cpc);
1171 mutex_unlock(&sbi->gc_mutex);
1173 out:
1174 range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
1175 return 0;
1178 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
1180 struct curseg_info *curseg = CURSEG_I(sbi, type);
1181 if (curseg->next_blkoff < sbi->blocks_per_seg)
1182 return true;
1183 return false;
1186 static int __get_segment_type_2(struct page *page, enum page_type p_type)
1188 if (p_type == DATA)
1189 return CURSEG_HOT_DATA;
1190 else
1191 return CURSEG_HOT_NODE;
1194 static int __get_segment_type_4(struct page *page, enum page_type p_type)
1196 if (p_type == DATA) {
1197 struct inode *inode = page->mapping->host;
1199 if (S_ISDIR(inode->i_mode))
1200 return CURSEG_HOT_DATA;
1201 else
1202 return CURSEG_COLD_DATA;
1203 } else {
1204 if (IS_DNODE(page) && is_cold_node(page))
1205 return CURSEG_WARM_NODE;
1206 else
1207 return CURSEG_COLD_NODE;
1211 static int __get_segment_type_6(struct page *page, enum page_type p_type)
1213 if (p_type == DATA) {
1214 struct inode *inode = page->mapping->host;
1216 if (S_ISDIR(inode->i_mode))
1217 return CURSEG_HOT_DATA;
1218 else if (is_cold_data(page) || file_is_cold(inode))
1219 return CURSEG_COLD_DATA;
1220 else
1221 return CURSEG_WARM_DATA;
1222 } else {
1223 if (IS_DNODE(page))
1224 return is_cold_node(page) ? CURSEG_WARM_NODE :
1225 CURSEG_HOT_NODE;
1226 else
1227 return CURSEG_COLD_NODE;
1231 static int __get_segment_type(struct page *page, enum page_type p_type)
1233 switch (F2FS_P_SB(page)->active_logs) {
1234 case 2:
1235 return __get_segment_type_2(page, p_type);
1236 case 4:
1237 return __get_segment_type_4(page, p_type);
1239 /* NR_CURSEG_TYPE(6) logs by default */
1240 f2fs_bug_on(F2FS_P_SB(page),
1241 F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
1242 return __get_segment_type_6(page, p_type);
1245 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
1246 block_t old_blkaddr, block_t *new_blkaddr,
1247 struct f2fs_summary *sum, int type)
1249 struct sit_info *sit_i = SIT_I(sbi);
1250 struct curseg_info *curseg;
1251 bool direct_io = (type == CURSEG_DIRECT_IO);
1253 type = direct_io ? CURSEG_WARM_DATA : type;
1255 curseg = CURSEG_I(sbi, type);
1257 mutex_lock(&curseg->curseg_mutex);
1258 mutex_lock(&sit_i->sentry_lock);
1260 /* direct_io'ed data is aligned to the segment for better performance */
1261 if (direct_io && curseg->next_blkoff &&
1262 !has_not_enough_free_secs(sbi, 0))
1263 __allocate_new_segments(sbi, type);
1265 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
1268 * __add_sum_entry should be resided under the curseg_mutex
1269 * because, this function updates a summary entry in the
1270 * current summary block.
1272 __add_sum_entry(sbi, type, sum);
1274 __refresh_next_blkoff(sbi, curseg);
1276 stat_inc_block_count(sbi, curseg);
1278 if (!__has_curseg_space(sbi, type))
1279 sit_i->s_ops->allocate_segment(sbi, type, false);
1281 * SIT information should be updated before segment allocation,
1282 * since SSR needs latest valid block information.
1284 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
1286 mutex_unlock(&sit_i->sentry_lock);
1288 if (page && IS_NODESEG(type))
1289 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1291 mutex_unlock(&curseg->curseg_mutex);
1294 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
1296 int type = __get_segment_type(fio->page, fio->type);
1298 allocate_data_block(fio->sbi, fio->page, fio->blk_addr,
1299 &fio->blk_addr, sum, type);
1301 /* writeout dirty page into bdev */
1302 f2fs_submit_page_mbio(fio);
1305 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
1307 struct f2fs_io_info fio = {
1308 .sbi = sbi,
1309 .type = META,
1310 .rw = WRITE_SYNC | REQ_META | REQ_PRIO,
1311 .blk_addr = page->index,
1312 .page = page,
1313 .encrypted_page = NULL,
1316 if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
1317 fio.rw &= ~REQ_META;
1319 set_page_writeback(page);
1320 f2fs_submit_page_mbio(&fio);
1323 void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
1325 struct f2fs_summary sum;
1327 set_summary(&sum, nid, 0, 0);
1328 do_write_page(&sum, fio);
1331 void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
1333 struct f2fs_sb_info *sbi = fio->sbi;
1334 struct f2fs_summary sum;
1335 struct node_info ni;
1337 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
1338 get_node_info(sbi, dn->nid, &ni);
1339 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1340 do_write_page(&sum, fio);
1341 dn->data_blkaddr = fio->blk_addr;
1344 void rewrite_data_page(struct f2fs_io_info *fio)
1346 stat_inc_inplace_blocks(fio->sbi);
1347 f2fs_submit_page_mbio(fio);
1350 static void __f2fs_replace_block(struct f2fs_sb_info *sbi,
1351 struct f2fs_summary *sum,
1352 block_t old_blkaddr, block_t new_blkaddr,
1353 bool recover_curseg)
1355 struct sit_info *sit_i = SIT_I(sbi);
1356 struct curseg_info *curseg;
1357 unsigned int segno, old_cursegno;
1358 struct seg_entry *se;
1359 int type;
1360 unsigned short old_blkoff;
1362 segno = GET_SEGNO(sbi, new_blkaddr);
1363 se = get_seg_entry(sbi, segno);
1364 type = se->type;
1366 if (!recover_curseg) {
1367 /* for recovery flow */
1368 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1369 if (old_blkaddr == NULL_ADDR)
1370 type = CURSEG_COLD_DATA;
1371 else
1372 type = CURSEG_WARM_DATA;
1374 } else {
1375 if (!IS_CURSEG(sbi, segno))
1376 type = CURSEG_WARM_DATA;
1379 curseg = CURSEG_I(sbi, type);
1381 mutex_lock(&curseg->curseg_mutex);
1382 mutex_lock(&sit_i->sentry_lock);
1384 old_cursegno = curseg->segno;
1385 old_blkoff = curseg->next_blkoff;
1387 /* change the current segment */
1388 if (segno != curseg->segno) {
1389 curseg->next_segno = segno;
1390 change_curseg(sbi, type, true);
1393 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
1394 __add_sum_entry(sbi, type, sum);
1396 if (!recover_curseg)
1397 update_sit_entry(sbi, new_blkaddr, 1);
1398 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1399 update_sit_entry(sbi, old_blkaddr, -1);
1401 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
1402 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
1404 locate_dirty_segment(sbi, old_cursegno);
1406 if (recover_curseg) {
1407 if (old_cursegno != curseg->segno) {
1408 curseg->next_segno = old_cursegno;
1409 change_curseg(sbi, type, true);
1411 curseg->next_blkoff = old_blkoff;
1414 mutex_unlock(&sit_i->sentry_lock);
1415 mutex_unlock(&curseg->curseg_mutex);
1418 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
1419 block_t old_addr, block_t new_addr,
1420 unsigned char version, bool recover_curseg)
1422 struct f2fs_summary sum;
1424 set_summary(&sum, dn->nid, dn->ofs_in_node, version);
1426 __f2fs_replace_block(sbi, &sum, old_addr, new_addr, recover_curseg);
1428 dn->data_blkaddr = new_addr;
1429 set_data_blkaddr(dn);
1430 f2fs_update_extent_cache(dn);
1433 static inline bool is_merged_page(struct f2fs_sb_info *sbi,
1434 struct page *page, enum page_type type)
1436 enum page_type btype = PAGE_TYPE_OF_BIO(type);
1437 struct f2fs_bio_info *io = &sbi->write_io[btype];
1438 struct bio_vec *bvec;
1439 struct page *target;
1440 int i;
1442 down_read(&io->io_rwsem);
1443 if (!io->bio) {
1444 up_read(&io->io_rwsem);
1445 return false;
1448 bio_for_each_segment_all(bvec, io->bio, i) {
1450 if (bvec->bv_page->mapping) {
1451 target = bvec->bv_page;
1452 } else {
1453 struct f2fs_crypto_ctx *ctx;
1455 /* encrypted page */
1456 ctx = (struct f2fs_crypto_ctx *)page_private(
1457 bvec->bv_page);
1458 target = ctx->w.control_page;
1461 if (page == target) {
1462 up_read(&io->io_rwsem);
1463 return true;
1467 up_read(&io->io_rwsem);
1468 return false;
1471 void f2fs_wait_on_page_writeback(struct page *page,
1472 enum page_type type)
1474 if (PageWriteback(page)) {
1475 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1477 if (is_merged_page(sbi, page, type))
1478 f2fs_submit_merged_bio(sbi, type, WRITE);
1479 wait_on_page_writeback(page);
1483 void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
1484 block_t blkaddr)
1486 struct page *cpage;
1488 if (blkaddr == NEW_ADDR)
1489 return;
1491 f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
1493 cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
1494 if (cpage) {
1495 f2fs_wait_on_page_writeback(cpage, DATA);
1496 f2fs_put_page(cpage, 1);
1500 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1502 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1503 struct curseg_info *seg_i;
1504 unsigned char *kaddr;
1505 struct page *page;
1506 block_t start;
1507 int i, j, offset;
1509 start = start_sum_block(sbi);
1511 page = get_meta_page(sbi, start++);
1512 kaddr = (unsigned char *)page_address(page);
1514 /* Step 1: restore nat cache */
1515 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1516 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1518 /* Step 2: restore sit cache */
1519 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1520 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1521 SUM_JOURNAL_SIZE);
1522 offset = 2 * SUM_JOURNAL_SIZE;
1524 /* Step 3: restore summary entries */
1525 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1526 unsigned short blk_off;
1527 unsigned int segno;
1529 seg_i = CURSEG_I(sbi, i);
1530 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1531 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1532 seg_i->next_segno = segno;
1533 reset_curseg(sbi, i, 0);
1534 seg_i->alloc_type = ckpt->alloc_type[i];
1535 seg_i->next_blkoff = blk_off;
1537 if (seg_i->alloc_type == SSR)
1538 blk_off = sbi->blocks_per_seg;
1540 for (j = 0; j < blk_off; j++) {
1541 struct f2fs_summary *s;
1542 s = (struct f2fs_summary *)(kaddr + offset);
1543 seg_i->sum_blk->entries[j] = *s;
1544 offset += SUMMARY_SIZE;
1545 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1546 SUM_FOOTER_SIZE)
1547 continue;
1549 f2fs_put_page(page, 1);
1550 page = NULL;
1552 page = get_meta_page(sbi, start++);
1553 kaddr = (unsigned char *)page_address(page);
1554 offset = 0;
1557 f2fs_put_page(page, 1);
1558 return 0;
1561 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1563 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1564 struct f2fs_summary_block *sum;
1565 struct curseg_info *curseg;
1566 struct page *new;
1567 unsigned short blk_off;
1568 unsigned int segno = 0;
1569 block_t blk_addr = 0;
1571 /* get segment number and block addr */
1572 if (IS_DATASEG(type)) {
1573 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1574 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1575 CURSEG_HOT_DATA]);
1576 if (__exist_node_summaries(sbi))
1577 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1578 else
1579 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1580 } else {
1581 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1582 CURSEG_HOT_NODE]);
1583 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1584 CURSEG_HOT_NODE]);
1585 if (__exist_node_summaries(sbi))
1586 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1587 type - CURSEG_HOT_NODE);
1588 else
1589 blk_addr = GET_SUM_BLOCK(sbi, segno);
1592 new = get_meta_page(sbi, blk_addr);
1593 sum = (struct f2fs_summary_block *)page_address(new);
1595 if (IS_NODESEG(type)) {
1596 if (__exist_node_summaries(sbi)) {
1597 struct f2fs_summary *ns = &sum->entries[0];
1598 int i;
1599 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1600 ns->version = 0;
1601 ns->ofs_in_node = 0;
1603 } else {
1604 int err;
1606 err = restore_node_summary(sbi, segno, sum);
1607 if (err) {
1608 f2fs_put_page(new, 1);
1609 return err;
1614 /* set uncompleted segment to curseg */
1615 curseg = CURSEG_I(sbi, type);
1616 mutex_lock(&curseg->curseg_mutex);
1617 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1618 curseg->next_segno = segno;
1619 reset_curseg(sbi, type, 0);
1620 curseg->alloc_type = ckpt->alloc_type[type];
1621 curseg->next_blkoff = blk_off;
1622 mutex_unlock(&curseg->curseg_mutex);
1623 f2fs_put_page(new, 1);
1624 return 0;
1627 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1629 int type = CURSEG_HOT_DATA;
1630 int err;
1632 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1633 int npages = npages_for_summary_flush(sbi, true);
1635 if (npages >= 2)
1636 ra_meta_pages(sbi, start_sum_block(sbi), npages,
1637 META_CP, true);
1639 /* restore for compacted data summary */
1640 if (read_compacted_summaries(sbi))
1641 return -EINVAL;
1642 type = CURSEG_HOT_NODE;
1645 if (__exist_node_summaries(sbi))
1646 ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
1647 NR_CURSEG_TYPE - type, META_CP, true);
1649 for (; type <= CURSEG_COLD_NODE; type++) {
1650 err = read_normal_summaries(sbi, type);
1651 if (err)
1652 return err;
1655 return 0;
1658 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1660 struct page *page;
1661 unsigned char *kaddr;
1662 struct f2fs_summary *summary;
1663 struct curseg_info *seg_i;
1664 int written_size = 0;
1665 int i, j;
1667 page = grab_meta_page(sbi, blkaddr++);
1668 kaddr = (unsigned char *)page_address(page);
1670 /* Step 1: write nat cache */
1671 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1672 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1673 written_size += SUM_JOURNAL_SIZE;
1675 /* Step 2: write sit cache */
1676 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1677 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1678 SUM_JOURNAL_SIZE);
1679 written_size += SUM_JOURNAL_SIZE;
1681 /* Step 3: write summary entries */
1682 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1683 unsigned short blkoff;
1684 seg_i = CURSEG_I(sbi, i);
1685 if (sbi->ckpt->alloc_type[i] == SSR)
1686 blkoff = sbi->blocks_per_seg;
1687 else
1688 blkoff = curseg_blkoff(sbi, i);
1690 for (j = 0; j < blkoff; j++) {
1691 if (!page) {
1692 page = grab_meta_page(sbi, blkaddr++);
1693 kaddr = (unsigned char *)page_address(page);
1694 written_size = 0;
1696 summary = (struct f2fs_summary *)(kaddr + written_size);
1697 *summary = seg_i->sum_blk->entries[j];
1698 written_size += SUMMARY_SIZE;
1700 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1701 SUM_FOOTER_SIZE)
1702 continue;
1704 set_page_dirty(page);
1705 f2fs_put_page(page, 1);
1706 page = NULL;
1709 if (page) {
1710 set_page_dirty(page);
1711 f2fs_put_page(page, 1);
1715 static void write_normal_summaries(struct f2fs_sb_info *sbi,
1716 block_t blkaddr, int type)
1718 int i, end;
1719 if (IS_DATASEG(type))
1720 end = type + NR_CURSEG_DATA_TYPE;
1721 else
1722 end = type + NR_CURSEG_NODE_TYPE;
1724 for (i = type; i < end; i++) {
1725 struct curseg_info *sum = CURSEG_I(sbi, i);
1726 mutex_lock(&sum->curseg_mutex);
1727 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1728 mutex_unlock(&sum->curseg_mutex);
1732 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1734 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
1735 write_compacted_summaries(sbi, start_blk);
1736 else
1737 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1740 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1742 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
1745 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1746 unsigned int val, int alloc)
1748 int i;
1750 if (type == NAT_JOURNAL) {
1751 for (i = 0; i < nats_in_cursum(sum); i++) {
1752 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1753 return i;
1755 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1756 return update_nats_in_cursum(sum, 1);
1757 } else if (type == SIT_JOURNAL) {
1758 for (i = 0; i < sits_in_cursum(sum); i++)
1759 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1760 return i;
1761 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1762 return update_sits_in_cursum(sum, 1);
1764 return -1;
1767 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1768 unsigned int segno)
1770 return get_meta_page(sbi, current_sit_addr(sbi, segno));
1773 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1774 unsigned int start)
1776 struct sit_info *sit_i = SIT_I(sbi);
1777 struct page *src_page, *dst_page;
1778 pgoff_t src_off, dst_off;
1779 void *src_addr, *dst_addr;
1781 src_off = current_sit_addr(sbi, start);
1782 dst_off = next_sit_addr(sbi, src_off);
1784 /* get current sit block page without lock */
1785 src_page = get_meta_page(sbi, src_off);
1786 dst_page = grab_meta_page(sbi, dst_off);
1787 f2fs_bug_on(sbi, PageDirty(src_page));
1789 src_addr = page_address(src_page);
1790 dst_addr = page_address(dst_page);
1791 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1793 set_page_dirty(dst_page);
1794 f2fs_put_page(src_page, 1);
1796 set_to_next_sit(sit_i, start);
1798 return dst_page;
1801 static struct sit_entry_set *grab_sit_entry_set(void)
1803 struct sit_entry_set *ses =
1804 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
1806 ses->entry_cnt = 0;
1807 INIT_LIST_HEAD(&ses->set_list);
1808 return ses;
1811 static void release_sit_entry_set(struct sit_entry_set *ses)
1813 list_del(&ses->set_list);
1814 kmem_cache_free(sit_entry_set_slab, ses);
1817 static void adjust_sit_entry_set(struct sit_entry_set *ses,
1818 struct list_head *head)
1820 struct sit_entry_set *next = ses;
1822 if (list_is_last(&ses->set_list, head))
1823 return;
1825 list_for_each_entry_continue(next, head, set_list)
1826 if (ses->entry_cnt <= next->entry_cnt)
1827 break;
1829 list_move_tail(&ses->set_list, &next->set_list);
1832 static void add_sit_entry(unsigned int segno, struct list_head *head)
1834 struct sit_entry_set *ses;
1835 unsigned int start_segno = START_SEGNO(segno);
1837 list_for_each_entry(ses, head, set_list) {
1838 if (ses->start_segno == start_segno) {
1839 ses->entry_cnt++;
1840 adjust_sit_entry_set(ses, head);
1841 return;
1845 ses = grab_sit_entry_set();
1847 ses->start_segno = start_segno;
1848 ses->entry_cnt++;
1849 list_add(&ses->set_list, head);
1852 static void add_sits_in_set(struct f2fs_sb_info *sbi)
1854 struct f2fs_sm_info *sm_info = SM_I(sbi);
1855 struct list_head *set_list = &sm_info->sit_entry_set;
1856 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
1857 unsigned int segno;
1859 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
1860 add_sit_entry(segno, set_list);
1863 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
1865 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1866 struct f2fs_summary_block *sum = curseg->sum_blk;
1867 int i;
1869 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1870 unsigned int segno;
1871 bool dirtied;
1873 segno = le32_to_cpu(segno_in_journal(sum, i));
1874 dirtied = __mark_sit_entry_dirty(sbi, segno);
1876 if (!dirtied)
1877 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
1879 update_sits_in_cursum(sum, -sits_in_cursum(sum));
1883 * CP calls this function, which flushes SIT entries including sit_journal,
1884 * and moves prefree segs to free segs.
1886 void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1888 struct sit_info *sit_i = SIT_I(sbi);
1889 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1890 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1891 struct f2fs_summary_block *sum = curseg->sum_blk;
1892 struct sit_entry_set *ses, *tmp;
1893 struct list_head *head = &SM_I(sbi)->sit_entry_set;
1894 bool to_journal = true;
1895 struct seg_entry *se;
1897 mutex_lock(&curseg->curseg_mutex);
1898 mutex_lock(&sit_i->sentry_lock);
1900 if (!sit_i->dirty_sentries)
1901 goto out;
1904 * add and account sit entries of dirty bitmap in sit entry
1905 * set temporarily
1907 add_sits_in_set(sbi);
1910 * if there are no enough space in journal to store dirty sit
1911 * entries, remove all entries from journal and add and account
1912 * them in sit entry set.
1914 if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
1915 remove_sits_in_journal(sbi);
1918 * there are two steps to flush sit entries:
1919 * #1, flush sit entries to journal in current cold data summary block.
1920 * #2, flush sit entries to sit page.
1922 list_for_each_entry_safe(ses, tmp, head, set_list) {
1923 struct page *page = NULL;
1924 struct f2fs_sit_block *raw_sit = NULL;
1925 unsigned int start_segno = ses->start_segno;
1926 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
1927 (unsigned long)MAIN_SEGS(sbi));
1928 unsigned int segno = start_segno;
1930 if (to_journal &&
1931 !__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL))
1932 to_journal = false;
1934 if (!to_journal) {
1935 page = get_next_sit_page(sbi, start_segno);
1936 raw_sit = page_address(page);
1939 /* flush dirty sit entries in region of current sit set */
1940 for_each_set_bit_from(segno, bitmap, end) {
1941 int offset, sit_offset;
1943 se = get_seg_entry(sbi, segno);
1945 /* add discard candidates */
1946 if (cpc->reason != CP_DISCARD) {
1947 cpc->trim_start = segno;
1948 add_discard_addrs(sbi, cpc);
1951 if (to_journal) {
1952 offset = lookup_journal_in_cursum(sum,
1953 SIT_JOURNAL, segno, 1);
1954 f2fs_bug_on(sbi, offset < 0);
1955 segno_in_journal(sum, offset) =
1956 cpu_to_le32(segno);
1957 seg_info_to_raw_sit(se,
1958 &sit_in_journal(sum, offset));
1959 } else {
1960 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1961 seg_info_to_raw_sit(se,
1962 &raw_sit->entries[sit_offset]);
1965 __clear_bit(segno, bitmap);
1966 sit_i->dirty_sentries--;
1967 ses->entry_cnt--;
1970 if (!to_journal)
1971 f2fs_put_page(page, 1);
1973 f2fs_bug_on(sbi, ses->entry_cnt);
1974 release_sit_entry_set(ses);
1977 f2fs_bug_on(sbi, !list_empty(head));
1978 f2fs_bug_on(sbi, sit_i->dirty_sentries);
1979 out:
1980 if (cpc->reason == CP_DISCARD) {
1981 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
1982 add_discard_addrs(sbi, cpc);
1984 mutex_unlock(&sit_i->sentry_lock);
1985 mutex_unlock(&curseg->curseg_mutex);
1987 set_prefree_as_free_segments(sbi);
1990 static int build_sit_info(struct f2fs_sb_info *sbi)
1992 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1993 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1994 struct sit_info *sit_i;
1995 unsigned int sit_segs, start;
1996 char *src_bitmap, *dst_bitmap;
1997 unsigned int bitmap_size;
1999 /* allocate memory for SIT information */
2000 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
2001 if (!sit_i)
2002 return -ENOMEM;
2004 SM_I(sbi)->sit_info = sit_i;
2006 sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
2007 sizeof(struct seg_entry), GFP_KERNEL);
2008 if (!sit_i->sentries)
2009 return -ENOMEM;
2011 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2012 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
2013 if (!sit_i->dirty_sentries_bitmap)
2014 return -ENOMEM;
2016 for (start = 0; start < MAIN_SEGS(sbi); start++) {
2017 sit_i->sentries[start].cur_valid_map
2018 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2019 sit_i->sentries[start].ckpt_valid_map
2020 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2021 sit_i->sentries[start].discard_map
2022 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2023 if (!sit_i->sentries[start].cur_valid_map ||
2024 !sit_i->sentries[start].ckpt_valid_map ||
2025 !sit_i->sentries[start].discard_map)
2026 return -ENOMEM;
2029 sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2030 if (!sit_i->tmp_map)
2031 return -ENOMEM;
2033 if (sbi->segs_per_sec > 1) {
2034 sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
2035 sizeof(struct sec_entry), GFP_KERNEL);
2036 if (!sit_i->sec_entries)
2037 return -ENOMEM;
2040 /* get information related with SIT */
2041 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
2043 /* setup SIT bitmap from ckeckpoint pack */
2044 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
2045 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
2047 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
2048 if (!dst_bitmap)
2049 return -ENOMEM;
2051 /* init SIT information */
2052 sit_i->s_ops = &default_salloc_ops;
2054 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
2055 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
2056 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
2057 sit_i->sit_bitmap = dst_bitmap;
2058 sit_i->bitmap_size = bitmap_size;
2059 sit_i->dirty_sentries = 0;
2060 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
2061 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
2062 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
2063 mutex_init(&sit_i->sentry_lock);
2064 return 0;
2067 static int build_free_segmap(struct f2fs_sb_info *sbi)
2069 struct free_segmap_info *free_i;
2070 unsigned int bitmap_size, sec_bitmap_size;
2072 /* allocate memory for free segmap information */
2073 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
2074 if (!free_i)
2075 return -ENOMEM;
2077 SM_I(sbi)->free_info = free_i;
2079 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2080 free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
2081 if (!free_i->free_segmap)
2082 return -ENOMEM;
2084 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
2085 free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
2086 if (!free_i->free_secmap)
2087 return -ENOMEM;
2089 /* set all segments as dirty temporarily */
2090 memset(free_i->free_segmap, 0xff, bitmap_size);
2091 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
2093 /* init free segmap information */
2094 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
2095 free_i->free_segments = 0;
2096 free_i->free_sections = 0;
2097 spin_lock_init(&free_i->segmap_lock);
2098 return 0;
2101 static int build_curseg(struct f2fs_sb_info *sbi)
2103 struct curseg_info *array;
2104 int i;
2106 array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
2107 if (!array)
2108 return -ENOMEM;
2110 SM_I(sbi)->curseg_array = array;
2112 for (i = 0; i < NR_CURSEG_TYPE; i++) {
2113 mutex_init(&array[i].curseg_mutex);
2114 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
2115 if (!array[i].sum_blk)
2116 return -ENOMEM;
2117 array[i].segno = NULL_SEGNO;
2118 array[i].next_blkoff = 0;
2120 return restore_curseg_summaries(sbi);
2123 static void build_sit_entries(struct f2fs_sb_info *sbi)
2125 struct sit_info *sit_i = SIT_I(sbi);
2126 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2127 struct f2fs_summary_block *sum = curseg->sum_blk;
2128 int sit_blk_cnt = SIT_BLK_CNT(sbi);
2129 unsigned int i, start, end;
2130 unsigned int readed, start_blk = 0;
2131 int nrpages = MAX_BIO_BLOCKS(sbi);
2133 do {
2134 readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
2136 start = start_blk * sit_i->sents_per_block;
2137 end = (start_blk + readed) * sit_i->sents_per_block;
2139 for (; start < end && start < MAIN_SEGS(sbi); start++) {
2140 struct seg_entry *se = &sit_i->sentries[start];
2141 struct f2fs_sit_block *sit_blk;
2142 struct f2fs_sit_entry sit;
2143 struct page *page;
2145 mutex_lock(&curseg->curseg_mutex);
2146 for (i = 0; i < sits_in_cursum(sum); i++) {
2147 if (le32_to_cpu(segno_in_journal(sum, i))
2148 == start) {
2149 sit = sit_in_journal(sum, i);
2150 mutex_unlock(&curseg->curseg_mutex);
2151 goto got_it;
2154 mutex_unlock(&curseg->curseg_mutex);
2156 page = get_current_sit_page(sbi, start);
2157 sit_blk = (struct f2fs_sit_block *)page_address(page);
2158 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
2159 f2fs_put_page(page, 1);
2160 got_it:
2161 check_block_count(sbi, start, &sit);
2162 seg_info_from_raw_sit(se, &sit);
2164 /* build discard map only one time */
2165 memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2166 sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
2168 if (sbi->segs_per_sec > 1) {
2169 struct sec_entry *e = get_sec_entry(sbi, start);
2170 e->valid_blocks += se->valid_blocks;
2173 start_blk += readed;
2174 } while (start_blk < sit_blk_cnt);
2177 static void init_free_segmap(struct f2fs_sb_info *sbi)
2179 unsigned int start;
2180 int type;
2182 for (start = 0; start < MAIN_SEGS(sbi); start++) {
2183 struct seg_entry *sentry = get_seg_entry(sbi, start);
2184 if (!sentry->valid_blocks)
2185 __set_free(sbi, start);
2188 /* set use the current segments */
2189 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
2190 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
2191 __set_test_and_inuse(sbi, curseg_t->segno);
2195 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
2197 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2198 struct free_segmap_info *free_i = FREE_I(sbi);
2199 unsigned int segno = 0, offset = 0;
2200 unsigned short valid_blocks;
2202 while (1) {
2203 /* find dirty segment based on free segmap */
2204 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
2205 if (segno >= MAIN_SEGS(sbi))
2206 break;
2207 offset = segno + 1;
2208 valid_blocks = get_valid_blocks(sbi, segno, 0);
2209 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
2210 continue;
2211 if (valid_blocks > sbi->blocks_per_seg) {
2212 f2fs_bug_on(sbi, 1);
2213 continue;
2215 mutex_lock(&dirty_i->seglist_lock);
2216 __locate_dirty_segment(sbi, segno, DIRTY);
2217 mutex_unlock(&dirty_i->seglist_lock);
2221 static int init_victim_secmap(struct f2fs_sb_info *sbi)
2223 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2224 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
2226 dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
2227 if (!dirty_i->victim_secmap)
2228 return -ENOMEM;
2229 return 0;
2232 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
2234 struct dirty_seglist_info *dirty_i;
2235 unsigned int bitmap_size, i;
2237 /* allocate memory for dirty segments list information */
2238 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
2239 if (!dirty_i)
2240 return -ENOMEM;
2242 SM_I(sbi)->dirty_info = dirty_i;
2243 mutex_init(&dirty_i->seglist_lock);
2245 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2247 for (i = 0; i < NR_DIRTY_TYPE; i++) {
2248 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
2249 if (!dirty_i->dirty_segmap[i])
2250 return -ENOMEM;
2253 init_dirty_segmap(sbi);
2254 return init_victim_secmap(sbi);
2258 * Update min, max modified time for cost-benefit GC algorithm
2260 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
2262 struct sit_info *sit_i = SIT_I(sbi);
2263 unsigned int segno;
2265 mutex_lock(&sit_i->sentry_lock);
2267 sit_i->min_mtime = LLONG_MAX;
2269 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
2270 unsigned int i;
2271 unsigned long long mtime = 0;
2273 for (i = 0; i < sbi->segs_per_sec; i++)
2274 mtime += get_seg_entry(sbi, segno + i)->mtime;
2276 mtime = div_u64(mtime, sbi->segs_per_sec);
2278 if (sit_i->min_mtime > mtime)
2279 sit_i->min_mtime = mtime;
2281 sit_i->max_mtime = get_mtime(sbi);
2282 mutex_unlock(&sit_i->sentry_lock);
2285 int build_segment_manager(struct f2fs_sb_info *sbi)
2287 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2288 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2289 struct f2fs_sm_info *sm_info;
2290 int err;
2292 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
2293 if (!sm_info)
2294 return -ENOMEM;
2296 /* init sm info */
2297 sbi->sm_info = sm_info;
2298 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2299 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2300 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
2301 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2302 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2303 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
2304 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2305 sm_info->rec_prefree_segments = sm_info->main_segments *
2306 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
2307 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
2308 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
2309 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
2311 INIT_LIST_HEAD(&sm_info->discard_list);
2312 sm_info->nr_discards = 0;
2313 sm_info->max_discards = 0;
2315 sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
2317 INIT_LIST_HEAD(&sm_info->sit_entry_set);
2319 if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
2320 err = create_flush_cmd_control(sbi);
2321 if (err)
2322 return err;
2325 err = build_sit_info(sbi);
2326 if (err)
2327 return err;
2328 err = build_free_segmap(sbi);
2329 if (err)
2330 return err;
2331 err = build_curseg(sbi);
2332 if (err)
2333 return err;
2335 /* reinit free segmap based on SIT */
2336 build_sit_entries(sbi);
2338 init_free_segmap(sbi);
2339 err = build_dirty_segmap(sbi);
2340 if (err)
2341 return err;
2343 init_min_max_mtime(sbi);
2344 return 0;
2347 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
2348 enum dirty_type dirty_type)
2350 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2352 mutex_lock(&dirty_i->seglist_lock);
2353 kvfree(dirty_i->dirty_segmap[dirty_type]);
2354 dirty_i->nr_dirty[dirty_type] = 0;
2355 mutex_unlock(&dirty_i->seglist_lock);
2358 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
2360 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2361 kvfree(dirty_i->victim_secmap);
2364 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
2366 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2367 int i;
2369 if (!dirty_i)
2370 return;
2372 /* discard pre-free/dirty segments list */
2373 for (i = 0; i < NR_DIRTY_TYPE; i++)
2374 discard_dirty_segmap(sbi, i);
2376 destroy_victim_secmap(sbi);
2377 SM_I(sbi)->dirty_info = NULL;
2378 kfree(dirty_i);
2381 static void destroy_curseg(struct f2fs_sb_info *sbi)
2383 struct curseg_info *array = SM_I(sbi)->curseg_array;
2384 int i;
2386 if (!array)
2387 return;
2388 SM_I(sbi)->curseg_array = NULL;
2389 for (i = 0; i < NR_CURSEG_TYPE; i++)
2390 kfree(array[i].sum_blk);
2391 kfree(array);
2394 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
2396 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
2397 if (!free_i)
2398 return;
2399 SM_I(sbi)->free_info = NULL;
2400 kvfree(free_i->free_segmap);
2401 kvfree(free_i->free_secmap);
2402 kfree(free_i);
2405 static void destroy_sit_info(struct f2fs_sb_info *sbi)
2407 struct sit_info *sit_i = SIT_I(sbi);
2408 unsigned int start;
2410 if (!sit_i)
2411 return;
2413 if (sit_i->sentries) {
2414 for (start = 0; start < MAIN_SEGS(sbi); start++) {
2415 kfree(sit_i->sentries[start].cur_valid_map);
2416 kfree(sit_i->sentries[start].ckpt_valid_map);
2417 kfree(sit_i->sentries[start].discard_map);
2420 kfree(sit_i->tmp_map);
2422 kvfree(sit_i->sentries);
2423 kvfree(sit_i->sec_entries);
2424 kvfree(sit_i->dirty_sentries_bitmap);
2426 SM_I(sbi)->sit_info = NULL;
2427 kfree(sit_i->sit_bitmap);
2428 kfree(sit_i);
2431 void destroy_segment_manager(struct f2fs_sb_info *sbi)
2433 struct f2fs_sm_info *sm_info = SM_I(sbi);
2435 if (!sm_info)
2436 return;
2437 destroy_flush_cmd_control(sbi);
2438 destroy_dirty_segmap(sbi);
2439 destroy_curseg(sbi);
2440 destroy_free_segmap(sbi);
2441 destroy_sit_info(sbi);
2442 sbi->sm_info = NULL;
2443 kfree(sm_info);
2446 int __init create_segment_manager_caches(void)
2448 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
2449 sizeof(struct discard_entry));
2450 if (!discard_entry_slab)
2451 goto fail;
2453 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
2454 sizeof(struct sit_entry_set));
2455 if (!sit_entry_set_slab)
2456 goto destory_discard_entry;
2458 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
2459 sizeof(struct inmem_pages));
2460 if (!inmem_entry_slab)
2461 goto destroy_sit_entry_set;
2462 return 0;
2464 destroy_sit_entry_set:
2465 kmem_cache_destroy(sit_entry_set_slab);
2466 destory_discard_entry:
2467 kmem_cache_destroy(discard_entry_slab);
2468 fail:
2469 return -ENOMEM;
2472 void destroy_segment_manager_caches(void)
2474 kmem_cache_destroy(sit_entry_set_slab);
2475 kmem_cache_destroy(discard_entry_slab);
2476 kmem_cache_destroy(inmem_entry_slab);