1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs shrinker support
4 * the basic infra was copied from fs/ubifs/shrinker.c
6 * Copyright (c) 2015 Motorola Mobility
7 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
10 #include <linux/f2fs_fs.h>
15 static LIST_HEAD(f2fs_list
);
16 static DEFINE_SPINLOCK(f2fs_list_lock
);
17 static unsigned int shrinker_run_no
;
19 static unsigned long __count_nat_entries(struct f2fs_sb_info
*sbi
)
21 return NM_I(sbi
)->nat_cnt
[RECLAIMABLE_NAT
];
24 static unsigned long __count_free_nids(struct f2fs_sb_info
*sbi
)
26 long count
= NM_I(sbi
)->nid_cnt
[FREE_NID
] - MAX_FREE_NIDS
;
28 return count
> 0 ? count
: 0;
31 static unsigned long __count_extent_cache(struct f2fs_sb_info
*sbi
,
32 enum extent_type type
)
34 struct extent_tree_info
*eti
= &sbi
->extent_tree
[type
];
36 return atomic_read(&eti
->total_zombie_tree
) +
37 atomic_read(&eti
->total_ext_node
);
40 unsigned long f2fs_shrink_count(struct shrinker
*shrink
,
41 struct shrink_control
*sc
)
43 struct f2fs_sb_info
*sbi
;
45 unsigned long count
= 0;
47 spin_lock(&f2fs_list_lock
);
49 while (p
!= &f2fs_list
) {
50 sbi
= list_entry(p
, struct f2fs_sb_info
, s_list
);
52 /* stop f2fs_put_super */
53 if (!mutex_trylock(&sbi
->umount_mutex
)) {
57 spin_unlock(&f2fs_list_lock
);
59 /* count read extent cache entries */
60 count
+= __count_extent_cache(sbi
, EX_READ
);
62 /* count block age extent cache entries */
63 count
+= __count_extent_cache(sbi
, EX_BLOCK_AGE
);
65 /* count clean nat cache entries */
66 count
+= __count_nat_entries(sbi
);
68 /* count free nids cache entries */
69 count
+= __count_free_nids(sbi
);
71 spin_lock(&f2fs_list_lock
);
73 mutex_unlock(&sbi
->umount_mutex
);
75 spin_unlock(&f2fs_list_lock
);
79 unsigned long f2fs_shrink_scan(struct shrinker
*shrink
,
80 struct shrink_control
*sc
)
82 unsigned long nr
= sc
->nr_to_scan
;
83 struct f2fs_sb_info
*sbi
;
86 unsigned long freed
= 0;
88 spin_lock(&f2fs_list_lock
);
90 run_no
= ++shrinker_run_no
;
91 } while (run_no
== 0);
93 while (p
!= &f2fs_list
) {
94 sbi
= list_entry(p
, struct f2fs_sb_info
, s_list
);
96 if (sbi
->shrinker_run_no
== run_no
)
99 /* stop f2fs_put_super */
100 if (!mutex_trylock(&sbi
->umount_mutex
)) {
104 spin_unlock(&f2fs_list_lock
);
106 sbi
->shrinker_run_no
= run_no
;
108 /* shrink extent cache entries */
109 freed
+= f2fs_shrink_age_extent_tree(sbi
, nr
>> 2);
111 /* shrink read extent cache entries */
112 freed
+= f2fs_shrink_read_extent_tree(sbi
, nr
>> 2);
114 /* shrink clean nat cache entries */
116 freed
+= f2fs_try_to_free_nats(sbi
, nr
- freed
);
118 /* shrink free nids cache entries */
120 freed
+= f2fs_try_to_free_nids(sbi
, nr
- freed
);
122 spin_lock(&f2fs_list_lock
);
124 list_move_tail(&sbi
->s_list
, &f2fs_list
);
125 mutex_unlock(&sbi
->umount_mutex
);
129 spin_unlock(&f2fs_list_lock
);
133 void f2fs_join_shrinker(struct f2fs_sb_info
*sbi
)
135 spin_lock(&f2fs_list_lock
);
136 list_add_tail(&sbi
->s_list
, &f2fs_list
);
137 spin_unlock(&f2fs_list_lock
);
140 void f2fs_leave_shrinker(struct f2fs_sb_info
*sbi
)
142 f2fs_shrink_read_extent_tree(sbi
, __count_extent_cache(sbi
, EX_READ
));
143 f2fs_shrink_age_extent_tree(sbi
,
144 __count_extent_cache(sbi
, EX_BLOCK_AGE
));
146 spin_lock(&f2fs_list_lock
);
147 list_del_init(&sbi
->s_list
);
148 spin_unlock(&f2fs_list_lock
);