Linux 5.6.13
[linux/fpc-iii.git] / fs / f2fs / shrinker.c
bloba467aca29cfefd8a25a16b125356debc54850e33
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * f2fs shrinker support
4 * the basic infra was copied from fs/ubifs/shrinker.c
6 * Copyright (c) 2015 Motorola Mobility
7 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
8 */
9 #include <linux/fs.h>
10 #include <linux/f2fs_fs.h>
12 #include "f2fs.h"
13 #include "node.h"
15 static LIST_HEAD(f2fs_list);
16 static DEFINE_SPINLOCK(f2fs_list_lock);
17 static unsigned int shrinker_run_no;
19 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
21 long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
23 return count > 0 ? count : 0;
26 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
28 long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
30 return count > 0 ? count : 0;
33 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
35 return atomic_read(&sbi->total_zombie_tree) +
36 atomic_read(&sbi->total_ext_node);
39 unsigned long f2fs_shrink_count(struct shrinker *shrink,
40 struct shrink_control *sc)
42 struct f2fs_sb_info *sbi;
43 struct list_head *p;
44 unsigned long count = 0;
46 spin_lock(&f2fs_list_lock);
47 p = f2fs_list.next;
48 while (p != &f2fs_list) {
49 sbi = list_entry(p, struct f2fs_sb_info, s_list);
51 /* stop f2fs_put_super */
52 if (!mutex_trylock(&sbi->umount_mutex)) {
53 p = p->next;
54 continue;
56 spin_unlock(&f2fs_list_lock);
58 /* count extent cache entries */
59 count += __count_extent_cache(sbi);
61 /* shrink clean nat cache entries */
62 count += __count_nat_entries(sbi);
64 /* count free nids cache entries */
65 count += __count_free_nids(sbi);
67 spin_lock(&f2fs_list_lock);
68 p = p->next;
69 mutex_unlock(&sbi->umount_mutex);
71 spin_unlock(&f2fs_list_lock);
72 return count;
75 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
76 struct shrink_control *sc)
78 unsigned long nr = sc->nr_to_scan;
79 struct f2fs_sb_info *sbi;
80 struct list_head *p;
81 unsigned int run_no;
82 unsigned long freed = 0;
84 spin_lock(&f2fs_list_lock);
85 do {
86 run_no = ++shrinker_run_no;
87 } while (run_no == 0);
88 p = f2fs_list.next;
89 while (p != &f2fs_list) {
90 sbi = list_entry(p, struct f2fs_sb_info, s_list);
92 if (sbi->shrinker_run_no == run_no)
93 break;
95 /* stop f2fs_put_super */
96 if (!mutex_trylock(&sbi->umount_mutex)) {
97 p = p->next;
98 continue;
100 spin_unlock(&f2fs_list_lock);
102 sbi->shrinker_run_no = run_no;
104 /* shrink extent cache entries */
105 freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
107 /* shrink clean nat cache entries */
108 if (freed < nr)
109 freed += f2fs_try_to_free_nats(sbi, nr - freed);
111 /* shrink free nids cache entries */
112 if (freed < nr)
113 freed += f2fs_try_to_free_nids(sbi, nr - freed);
115 spin_lock(&f2fs_list_lock);
116 p = p->next;
117 list_move_tail(&sbi->s_list, &f2fs_list);
118 mutex_unlock(&sbi->umount_mutex);
119 if (freed >= nr)
120 break;
122 spin_unlock(&f2fs_list_lock);
123 return freed;
126 void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
128 spin_lock(&f2fs_list_lock);
129 list_add_tail(&sbi->s_list, &f2fs_list);
130 spin_unlock(&f2fs_list_lock);
133 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
135 f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
137 spin_lock(&f2fs_list_lock);
138 list_del_init(&sbi->s_list);
139 spin_unlock(&f2fs_list_lock);