Linux 4.8.3
[linux/fpc-iii.git] / fs / f2fs / shrinker.c
blob46c9154259239f284cb3a8a73c20ec7ded2356c4
1 /*
2 * f2fs shrinker support
3 * the basic infra was copied from fs/ubifs/shrinker.c
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/fs.h>
13 #include <linux/f2fs_fs.h>
15 #include "f2fs.h"
16 #include "node.h"
18 static LIST_HEAD(f2fs_list);
19 static DEFINE_SPINLOCK(f2fs_list_lock);
20 static unsigned int shrinker_run_no;
22 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
24 return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
27 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
29 if (NM_I(sbi)->fcnt > MAX_FREE_NIDS)
30 return NM_I(sbi)->fcnt - MAX_FREE_NIDS;
31 return 0;
34 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
36 return atomic_read(&sbi->total_zombie_tree) +
37 atomic_read(&sbi->total_ext_node);
40 unsigned long f2fs_shrink_count(struct shrinker *shrink,
41 struct shrink_control *sc)
43 struct f2fs_sb_info *sbi;
44 struct list_head *p;
45 unsigned long count = 0;
47 spin_lock(&f2fs_list_lock);
48 p = f2fs_list.next;
49 while (p != &f2fs_list) {
50 sbi = list_entry(p, struct f2fs_sb_info, s_list);
52 /* stop f2fs_put_super */
53 if (!mutex_trylock(&sbi->umount_mutex)) {
54 p = p->next;
55 continue;
57 spin_unlock(&f2fs_list_lock);
59 /* count extent cache entries */
60 count += __count_extent_cache(sbi);
62 /* shrink clean nat cache entries */
63 count += __count_nat_entries(sbi);
65 /* count free nids cache entries */
66 count += __count_free_nids(sbi);
68 spin_lock(&f2fs_list_lock);
69 p = p->next;
70 mutex_unlock(&sbi->umount_mutex);
72 spin_unlock(&f2fs_list_lock);
73 return count;
76 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
77 struct shrink_control *sc)
79 unsigned long nr = sc->nr_to_scan;
80 struct f2fs_sb_info *sbi;
81 struct list_head *p;
82 unsigned int run_no;
83 unsigned long freed = 0;
85 spin_lock(&f2fs_list_lock);
86 do {
87 run_no = ++shrinker_run_no;
88 } while (run_no == 0);
89 p = f2fs_list.next;
90 while (p != &f2fs_list) {
91 sbi = list_entry(p, struct f2fs_sb_info, s_list);
93 if (sbi->shrinker_run_no == run_no)
94 break;
96 /* stop f2fs_put_super */
97 if (!mutex_trylock(&sbi->umount_mutex)) {
98 p = p->next;
99 continue;
101 spin_unlock(&f2fs_list_lock);
103 sbi->shrinker_run_no = run_no;
105 /* shrink extent cache entries */
106 freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
108 /* shrink clean nat cache entries */
109 if (freed < nr)
110 freed += try_to_free_nats(sbi, nr - freed);
112 /* shrink free nids cache entries */
113 if (freed < nr)
114 freed += try_to_free_nids(sbi, nr - freed);
116 spin_lock(&f2fs_list_lock);
117 p = p->next;
118 list_move_tail(&sbi->s_list, &f2fs_list);
119 mutex_unlock(&sbi->umount_mutex);
120 if (freed >= nr)
121 break;
123 spin_unlock(&f2fs_list_lock);
124 return freed;
127 void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
129 spin_lock(&f2fs_list_lock);
130 list_add_tail(&sbi->s_list, &f2fs_list);
131 spin_unlock(&f2fs_list_lock);
134 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
136 f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
138 spin_lock(&f2fs_list_lock);
139 list_del(&sbi->s_list);
140 spin_unlock(&f2fs_list_lock);