1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 #include <linux/pagevec.h>
10 struct page
*erofs_allocpage(struct list_head
*pool
, gfp_t gfp
)
14 if (!list_empty(pool
)) {
15 page
= lru_to_page(pool
);
16 DBG_BUGON(page_ref_count(page
) != 1);
19 page
= alloc_page(gfp
);
24 #if (EROFS_PCPUBUF_NR_PAGES > 0)
26 u8 data
[PAGE_SIZE
* EROFS_PCPUBUF_NR_PAGES
];
27 } ____cacheline_aligned_in_smp erofs_pcpubuf
[NR_CPUS
];
29 void *erofs_get_pcpubuf(unsigned int pagenr
)
32 return &erofs_pcpubuf
[smp_processor_id()].data
[pagenr
* PAGE_SIZE
];
36 #ifdef CONFIG_EROFS_FS_ZIP
37 /* global shrink count (for all mounted EROFS instances) */
38 static atomic_long_t erofs_global_shrink_cnt
;
40 static int erofs_workgroup_get(struct erofs_workgroup
*grp
)
45 o
= erofs_wait_on_workgroup_freezed(grp
);
49 if (atomic_cmpxchg(&grp
->refcount
, o
, o
+ 1) != o
)
52 /* decrease refcount paired by erofs_workgroup_put */
54 atomic_long_dec(&erofs_global_shrink_cnt
);
58 struct erofs_workgroup
*erofs_find_workgroup(struct super_block
*sb
,
61 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
62 struct erofs_workgroup
*grp
;
66 grp
= xa_load(&sbi
->managed_pslots
, index
);
68 if (erofs_workgroup_get(grp
)) {
69 /* prefer to relax rcu read side */
74 DBG_BUGON(index
!= grp
->index
);
80 struct erofs_workgroup
*erofs_insert_workgroup(struct super_block
*sb
,
81 struct erofs_workgroup
*grp
)
83 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
84 struct erofs_workgroup
*pre
;
87 * Bump up a reference count before making this visible
88 * to others for the XArray in order to avoid potential
89 * UAF without serialized by xa_lock.
91 atomic_inc(&grp
->refcount
);
94 xa_lock(&sbi
->managed_pslots
);
95 pre
= __xa_cmpxchg(&sbi
->managed_pslots
, grp
->index
,
99 pre
= ERR_PTR(xa_err(pre
));
100 } else if (erofs_workgroup_get(pre
)) {
101 /* try to legitimize the current in-tree one */
102 xa_unlock(&sbi
->managed_pslots
);
106 atomic_dec(&grp
->refcount
);
109 xa_unlock(&sbi
->managed_pslots
);
113 static void __erofs_workgroup_free(struct erofs_workgroup
*grp
)
115 atomic_long_dec(&erofs_global_shrink_cnt
);
116 erofs_workgroup_free_rcu(grp
);
119 int erofs_workgroup_put(struct erofs_workgroup
*grp
)
121 int count
= atomic_dec_return(&grp
->refcount
);
124 atomic_long_inc(&erofs_global_shrink_cnt
);
126 __erofs_workgroup_free(grp
);
130 static void erofs_workgroup_unfreeze_final(struct erofs_workgroup
*grp
)
132 erofs_workgroup_unfreeze(grp
, 0);
133 __erofs_workgroup_free(grp
);
136 static bool erofs_try_to_release_workgroup(struct erofs_sb_info
*sbi
,
137 struct erofs_workgroup
*grp
)
140 * If managed cache is on, refcount of workgroups
141 * themselves could be < 0 (freezed). In other words,
142 * there is no guarantee that all refcounts > 0.
144 if (!erofs_workgroup_try_to_freeze(grp
, 1))
148 * Note that all cached pages should be unattached
149 * before deleted from the XArray. Otherwise some
150 * cached pages could be still attached to the orphan
151 * old workgroup when the new one is available in the tree.
153 if (erofs_try_to_free_all_cached_pages(sbi
, grp
)) {
154 erofs_workgroup_unfreeze(grp
, 1);
159 * It's impossible to fail after the workgroup is freezed,
160 * however in order to avoid some race conditions, add a
161 * DBG_BUGON to observe this in advance.
163 DBG_BUGON(xa_erase(&sbi
->managed_pslots
, grp
->index
) != grp
);
166 * If managed cache is on, last refcount should indicate
167 * the related workstation.
169 erofs_workgroup_unfreeze_final(grp
);
173 static unsigned long erofs_shrink_workstation(struct erofs_sb_info
*sbi
,
174 unsigned long nr_shrink
)
176 struct erofs_workgroup
*grp
;
177 unsigned int freed
= 0;
180 xa_for_each(&sbi
->managed_pslots
, index
, grp
) {
181 /* try to shrink each valid workgroup */
182 if (!erofs_try_to_release_workgroup(sbi
, grp
))
192 /* protected by 'erofs_sb_list_lock' */
193 static unsigned int shrinker_run_no
;
195 /* protects the mounted 'erofs_sb_list' */
196 static DEFINE_SPINLOCK(erofs_sb_list_lock
);
197 static LIST_HEAD(erofs_sb_list
);
199 void erofs_shrinker_register(struct super_block
*sb
)
201 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
203 mutex_init(&sbi
->umount_mutex
);
205 spin_lock(&erofs_sb_list_lock
);
206 list_add(&sbi
->list
, &erofs_sb_list
);
207 spin_unlock(&erofs_sb_list_lock
);
210 void erofs_shrinker_unregister(struct super_block
*sb
)
212 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
214 mutex_lock(&sbi
->umount_mutex
);
215 /* clean up all remaining workgroups in memory */
216 erofs_shrink_workstation(sbi
, ~0UL);
218 spin_lock(&erofs_sb_list_lock
);
219 list_del(&sbi
->list
);
220 spin_unlock(&erofs_sb_list_lock
);
221 mutex_unlock(&sbi
->umount_mutex
);
224 static unsigned long erofs_shrink_count(struct shrinker
*shrink
,
225 struct shrink_control
*sc
)
227 return atomic_long_read(&erofs_global_shrink_cnt
);
230 static unsigned long erofs_shrink_scan(struct shrinker
*shrink
,
231 struct shrink_control
*sc
)
233 struct erofs_sb_info
*sbi
;
236 unsigned long nr
= sc
->nr_to_scan
;
238 unsigned long freed
= 0;
240 spin_lock(&erofs_sb_list_lock
);
242 run_no
= ++shrinker_run_no
;
243 } while (run_no
== 0);
245 /* Iterate over all mounted superblocks and try to shrink them */
246 p
= erofs_sb_list
.next
;
247 while (p
!= &erofs_sb_list
) {
248 sbi
= list_entry(p
, struct erofs_sb_info
, list
);
251 * We move the ones we do to the end of the list, so we stop
252 * when we see one we have already done.
254 if (sbi
->shrinker_run_no
== run_no
)
257 if (!mutex_trylock(&sbi
->umount_mutex
)) {
262 spin_unlock(&erofs_sb_list_lock
);
263 sbi
->shrinker_run_no
= run_no
;
265 freed
+= erofs_shrink_workstation(sbi
, nr
- freed
);
267 spin_lock(&erofs_sb_list_lock
);
268 /* Get the next list element before we move this one */
272 * Move this one to the end of the list to provide some
275 list_move_tail(&sbi
->list
, &erofs_sb_list
);
276 mutex_unlock(&sbi
->umount_mutex
);
281 spin_unlock(&erofs_sb_list_lock
);
285 static struct shrinker erofs_shrinker_info
= {
286 .scan_objects
= erofs_shrink_scan
,
287 .count_objects
= erofs_shrink_count
,
288 .seeks
= DEFAULT_SEEKS
,
291 int __init
erofs_init_shrinker(void)
293 return register_shrinker(&erofs_shrinker_info
);
296 void erofs_exit_shrinker(void)
298 unregister_shrinker(&erofs_shrinker_info
);
300 #endif /* !CONFIG_EROFS_FS_ZIP */