fanotify: merge duplicate events on parent and child
[linux/fpc-iii.git] / fs / erofs / utils.c
blobdf42ea552a44da74cef6f6f1c89c453f23f3cc3d
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7 #include "internal.h"
8 #include <linux/pagevec.h>
10 struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
12 struct page *page;
14 if (!list_empty(pool)) {
15 page = lru_to_page(pool);
16 DBG_BUGON(page_ref_count(page) != 1);
17 list_del(&page->lru);
18 } else {
19 page = alloc_page(gfp);
21 return page;
24 #if (EROFS_PCPUBUF_NR_PAGES > 0)
25 static struct {
26 u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES];
27 } ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS];
29 void *erofs_get_pcpubuf(unsigned int pagenr)
31 preempt_disable();
32 return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE];
34 #endif
36 #ifdef CONFIG_EROFS_FS_ZIP
37 /* global shrink count (for all mounted EROFS instances) */
38 static atomic_long_t erofs_global_shrink_cnt;
40 #define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
41 #define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
43 static int erofs_workgroup_get(struct erofs_workgroup *grp)
45 int o;
47 repeat:
48 o = erofs_wait_on_workgroup_freezed(grp);
49 if (o <= 0)
50 return -1;
52 if (atomic_cmpxchg(&grp->refcount, o, o + 1) != o)
53 goto repeat;
55 /* decrease refcount paired by erofs_workgroup_put */
56 if (o == 1)
57 atomic_long_dec(&erofs_global_shrink_cnt);
58 return 0;
61 struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
62 pgoff_t index)
64 struct erofs_sb_info *sbi = EROFS_SB(sb);
65 struct erofs_workgroup *grp;
67 repeat:
68 rcu_read_lock();
69 grp = radix_tree_lookup(&sbi->workstn_tree, index);
70 if (grp) {
71 if (erofs_workgroup_get(grp)) {
72 /* prefer to relax rcu read side */
73 rcu_read_unlock();
74 goto repeat;
77 DBG_BUGON(index != grp->index);
79 rcu_read_unlock();
80 return grp;
83 int erofs_register_workgroup(struct super_block *sb,
84 struct erofs_workgroup *grp)
86 struct erofs_sb_info *sbi;
87 int err;
89 /* grp shouldn't be broken or used before */
90 if (atomic_read(&grp->refcount) != 1) {
91 DBG_BUGON(1);
92 return -EINVAL;
95 err = radix_tree_preload(GFP_NOFS);
96 if (err)
97 return err;
99 sbi = EROFS_SB(sb);
100 xa_lock(&sbi->workstn_tree);
103 * Bump up reference count before making this workgroup
104 * visible to other users in order to avoid potential UAF
105 * without serialized by workstn_lock.
107 __erofs_workgroup_get(grp);
109 err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp);
110 if (err)
112 * it's safe to decrease since the workgroup isn't visible
113 * and refcount >= 2 (cannot be freezed).
115 __erofs_workgroup_put(grp);
117 xa_unlock(&sbi->workstn_tree);
118 radix_tree_preload_end();
119 return err;
122 static void __erofs_workgroup_free(struct erofs_workgroup *grp)
124 atomic_long_dec(&erofs_global_shrink_cnt);
125 erofs_workgroup_free_rcu(grp);
128 int erofs_workgroup_put(struct erofs_workgroup *grp)
130 int count = atomic_dec_return(&grp->refcount);
132 if (count == 1)
133 atomic_long_inc(&erofs_global_shrink_cnt);
134 else if (!count)
135 __erofs_workgroup_free(grp);
136 return count;
139 static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
141 erofs_workgroup_unfreeze(grp, 0);
142 __erofs_workgroup_free(grp);
145 static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
146 struct erofs_workgroup *grp)
149 * If managed cache is on, refcount of workgroups
150 * themselves could be < 0 (freezed). In other words,
151 * there is no guarantee that all refcounts > 0.
153 if (!erofs_workgroup_try_to_freeze(grp, 1))
154 return false;
157 * Note that all cached pages should be unattached
158 * before deleted from the radix tree. Otherwise some
159 * cached pages could be still attached to the orphan
160 * old workgroup when the new one is available in the tree.
162 if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
163 erofs_workgroup_unfreeze(grp, 1);
164 return false;
168 * It's impossible to fail after the workgroup is freezed,
169 * however in order to avoid some race conditions, add a
170 * DBG_BUGON to observe this in advance.
172 DBG_BUGON(radix_tree_delete(&sbi->workstn_tree, grp->index) != grp);
175 * If managed cache is on, last refcount should indicate
176 * the related workstation.
178 erofs_workgroup_unfreeze_final(grp);
179 return true;
182 static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
183 unsigned long nr_shrink)
185 pgoff_t first_index = 0;
186 void *batch[PAGEVEC_SIZE];
187 unsigned int freed = 0;
189 int i, found;
190 repeat:
191 xa_lock(&sbi->workstn_tree);
193 found = radix_tree_gang_lookup(&sbi->workstn_tree,
194 batch, first_index, PAGEVEC_SIZE);
196 for (i = 0; i < found; ++i) {
197 struct erofs_workgroup *grp = batch[i];
199 first_index = grp->index + 1;
201 /* try to shrink each valid workgroup */
202 if (!erofs_try_to_release_workgroup(sbi, grp))
203 continue;
205 ++freed;
206 if (!--nr_shrink)
207 break;
209 xa_unlock(&sbi->workstn_tree);
211 if (i && nr_shrink)
212 goto repeat;
213 return freed;
216 /* protected by 'erofs_sb_list_lock' */
217 static unsigned int shrinker_run_no;
219 /* protects the mounted 'erofs_sb_list' */
220 static DEFINE_SPINLOCK(erofs_sb_list_lock);
221 static LIST_HEAD(erofs_sb_list);
223 void erofs_shrinker_register(struct super_block *sb)
225 struct erofs_sb_info *sbi = EROFS_SB(sb);
227 mutex_init(&sbi->umount_mutex);
229 spin_lock(&erofs_sb_list_lock);
230 list_add(&sbi->list, &erofs_sb_list);
231 spin_unlock(&erofs_sb_list_lock);
234 void erofs_shrinker_unregister(struct super_block *sb)
236 struct erofs_sb_info *const sbi = EROFS_SB(sb);
238 mutex_lock(&sbi->umount_mutex);
239 /* clean up all remaining workgroups in memory */
240 erofs_shrink_workstation(sbi, ~0UL);
242 spin_lock(&erofs_sb_list_lock);
243 list_del(&sbi->list);
244 spin_unlock(&erofs_sb_list_lock);
245 mutex_unlock(&sbi->umount_mutex);
248 static unsigned long erofs_shrink_count(struct shrinker *shrink,
249 struct shrink_control *sc)
251 return atomic_long_read(&erofs_global_shrink_cnt);
254 static unsigned long erofs_shrink_scan(struct shrinker *shrink,
255 struct shrink_control *sc)
257 struct erofs_sb_info *sbi;
258 struct list_head *p;
260 unsigned long nr = sc->nr_to_scan;
261 unsigned int run_no;
262 unsigned long freed = 0;
264 spin_lock(&erofs_sb_list_lock);
265 do {
266 run_no = ++shrinker_run_no;
267 } while (run_no == 0);
269 /* Iterate over all mounted superblocks and try to shrink them */
270 p = erofs_sb_list.next;
271 while (p != &erofs_sb_list) {
272 sbi = list_entry(p, struct erofs_sb_info, list);
275 * We move the ones we do to the end of the list, so we stop
276 * when we see one we have already done.
278 if (sbi->shrinker_run_no == run_no)
279 break;
281 if (!mutex_trylock(&sbi->umount_mutex)) {
282 p = p->next;
283 continue;
286 spin_unlock(&erofs_sb_list_lock);
287 sbi->shrinker_run_no = run_no;
289 freed += erofs_shrink_workstation(sbi, nr - freed);
291 spin_lock(&erofs_sb_list_lock);
292 /* Get the next list element before we move this one */
293 p = p->next;
296 * Move this one to the end of the list to provide some
297 * fairness.
299 list_move_tail(&sbi->list, &erofs_sb_list);
300 mutex_unlock(&sbi->umount_mutex);
302 if (freed >= nr)
303 break;
305 spin_unlock(&erofs_sb_list_lock);
306 return freed;
309 static struct shrinker erofs_shrinker_info = {
310 .scan_objects = erofs_shrink_scan,
311 .count_objects = erofs_shrink_count,
312 .seeks = DEFAULT_SEEKS,
315 int __init erofs_init_shrinker(void)
317 return register_shrinker(&erofs_shrinker_info);
320 void erofs_exit_shrinker(void)
322 unregister_shrinker(&erofs_shrinker_info);
324 #endif /* !CONFIG_EROFS_FS_ZIP */