1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2024 Alibaba Cloud
16 static struct z_erofs_gbuf
*z_erofs_gbufpool
, *z_erofs_rsvbuf
;
17 static unsigned int z_erofs_gbuf_count
, z_erofs_gbuf_nrpages
,
20 module_param_named(global_buffers
, z_erofs_gbuf_count
, uint
, 0444);
21 module_param_named(reserved_pages
, z_erofs_rsv_nrpages
, uint
, 0444);
23 atomic_long_t erofs_global_shrink_cnt
; /* for all mounted instances */
25 /* protects `erofs_sb_list_lock` and the mounted `erofs_sb_list` */
26 static DEFINE_SPINLOCK(erofs_sb_list_lock
);
27 static LIST_HEAD(erofs_sb_list
);
28 static unsigned int shrinker_run_no
;
29 static struct shrinker
*erofs_shrinker_info
;
31 static unsigned int z_erofs_gbuf_id(void)
33 return raw_smp_processor_id() % z_erofs_gbuf_count
;
36 void *z_erofs_get_gbuf(unsigned int requiredpages
)
37 __acquires(gbuf
->lock
)
39 struct z_erofs_gbuf
*gbuf
;
42 gbuf
= &z_erofs_gbufpool
[z_erofs_gbuf_id()];
43 spin_lock(&gbuf
->lock
);
44 /* check if the buffer is too small */
45 if (requiredpages
> gbuf
->nrpages
) {
46 spin_unlock(&gbuf
->lock
);
48 /* (for sparse checker) pretend gbuf->lock is still taken */
49 __acquire(gbuf
->lock
);
55 void z_erofs_put_gbuf(void *ptr
) __releases(gbuf
->lock
)
57 struct z_erofs_gbuf
*gbuf
;
59 gbuf
= &z_erofs_gbufpool
[z_erofs_gbuf_id()];
60 DBG_BUGON(gbuf
->ptr
!= ptr
);
61 spin_unlock(&gbuf
->lock
);
65 int z_erofs_gbuf_growsize(unsigned int nrpages
)
67 static DEFINE_MUTEX(gbuf_resize_mutex
);
68 struct page
**tmp_pages
= NULL
;
69 struct z_erofs_gbuf
*gbuf
;
73 mutex_lock(&gbuf_resize_mutex
);
74 /* avoid shrinking gbufs, since no idea how many fses rely on */
75 if (nrpages
<= z_erofs_gbuf_nrpages
) {
76 mutex_unlock(&gbuf_resize_mutex
);
80 for (i
= 0; i
< z_erofs_gbuf_count
; ++i
) {
81 gbuf
= &z_erofs_gbufpool
[i
];
82 tmp_pages
= kcalloc(nrpages
, sizeof(*tmp_pages
), GFP_KERNEL
);
86 for (j
= 0; j
< gbuf
->nrpages
; ++j
)
87 tmp_pages
[j
] = gbuf
->pages
[j
];
90 j
= alloc_pages_bulk_array(GFP_KERNEL
, nrpages
,
94 } while (j
!= nrpages
);
96 ptr
= vmap(tmp_pages
, nrpages
, VM_MAP
, PAGE_KERNEL
);
100 spin_lock(&gbuf
->lock
);
102 gbuf
->pages
= tmp_pages
;
105 gbuf
->nrpages
= nrpages
;
106 spin_unlock(&gbuf
->lock
);
110 z_erofs_gbuf_nrpages
= nrpages
;
112 if (i
< z_erofs_gbuf_count
&& tmp_pages
) {
113 for (j
= 0; j
< nrpages
; ++j
)
114 if (tmp_pages
[j
] && (j
>= gbuf
->nrpages
||
115 tmp_pages
[j
] != gbuf
->pages
[j
]))
116 __free_page(tmp_pages
[j
]);
119 mutex_unlock(&gbuf_resize_mutex
);
120 return i
< z_erofs_gbuf_count
? -ENOMEM
: 0;
123 int __init
z_erofs_gbuf_init(void)
125 unsigned int i
, total
= num_possible_cpus();
127 if (z_erofs_gbuf_count
)
128 total
= min(z_erofs_gbuf_count
, total
);
129 z_erofs_gbuf_count
= total
;
131 /* The last (special) global buffer is the reserved buffer */
132 total
+= !!z_erofs_rsv_nrpages
;
134 z_erofs_gbufpool
= kcalloc(total
, sizeof(*z_erofs_gbufpool
),
136 if (!z_erofs_gbufpool
)
139 if (z_erofs_rsv_nrpages
) {
140 z_erofs_rsvbuf
= &z_erofs_gbufpool
[total
- 1];
141 z_erofs_rsvbuf
->pages
= kcalloc(z_erofs_rsv_nrpages
,
142 sizeof(*z_erofs_rsvbuf
->pages
), GFP_KERNEL
);
143 if (!z_erofs_rsvbuf
->pages
) {
144 z_erofs_rsvbuf
= NULL
;
145 z_erofs_rsv_nrpages
= 0;
148 for (i
= 0; i
< total
; ++i
)
149 spin_lock_init(&z_erofs_gbufpool
[i
].lock
);
153 void z_erofs_gbuf_exit(void)
157 for (i
= 0; i
< z_erofs_gbuf_count
+ (!!z_erofs_rsvbuf
); ++i
) {
158 struct z_erofs_gbuf
*gbuf
= &z_erofs_gbufpool
[i
];
168 for (j
= 0; j
< gbuf
->nrpages
; ++j
)
170 put_page(gbuf
->pages
[j
]);
174 kfree(z_erofs_gbufpool
);
177 struct page
*__erofs_allocpage(struct page
**pagepool
, gfp_t gfp
, bool tryrsv
)
179 struct page
*page
= *pagepool
;
182 *pagepool
= (struct page
*)page_private(page
);
183 } else if (tryrsv
&& z_erofs_rsvbuf
&& z_erofs_rsvbuf
->nrpages
) {
184 spin_lock(&z_erofs_rsvbuf
->lock
);
185 if (z_erofs_rsvbuf
->nrpages
)
186 page
= z_erofs_rsvbuf
->pages
[--z_erofs_rsvbuf
->nrpages
];
187 spin_unlock(&z_erofs_rsvbuf
->lock
);
190 page
= alloc_page(gfp
);
191 DBG_BUGON(page
&& page_ref_count(page
) != 1);
195 void erofs_release_pages(struct page
**pagepool
)
198 struct page
*page
= *pagepool
;
200 *pagepool
= (struct page
*)page_private(page
);
201 /* try to fill reserved global pool first */
202 if (z_erofs_rsvbuf
&& z_erofs_rsvbuf
->nrpages
<
203 z_erofs_rsv_nrpages
) {
204 spin_lock(&z_erofs_rsvbuf
->lock
);
205 if (z_erofs_rsvbuf
->nrpages
< z_erofs_rsv_nrpages
) {
206 z_erofs_rsvbuf
->pages
[z_erofs_rsvbuf
->nrpages
++]
208 spin_unlock(&z_erofs_rsvbuf
->lock
);
211 spin_unlock(&z_erofs_rsvbuf
->lock
);
217 void erofs_shrinker_register(struct super_block
*sb
)
219 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
221 mutex_init(&sbi
->umount_mutex
);
223 spin_lock(&erofs_sb_list_lock
);
224 list_add(&sbi
->list
, &erofs_sb_list
);
225 spin_unlock(&erofs_sb_list_lock
);
228 void erofs_shrinker_unregister(struct super_block
*sb
)
230 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
232 mutex_lock(&sbi
->umount_mutex
);
233 /* clean up all remaining pclusters in memory */
234 z_erofs_shrink_scan(sbi
, ~0UL);
236 spin_lock(&erofs_sb_list_lock
);
237 list_del(&sbi
->list
);
238 spin_unlock(&erofs_sb_list_lock
);
239 mutex_unlock(&sbi
->umount_mutex
);
242 static unsigned long erofs_shrink_count(struct shrinker
*shrink
,
243 struct shrink_control
*sc
)
245 return atomic_long_read(&erofs_global_shrink_cnt
);
248 static unsigned long erofs_shrink_scan(struct shrinker
*shrink
,
249 struct shrink_control
*sc
)
251 struct erofs_sb_info
*sbi
;
254 unsigned long nr
= sc
->nr_to_scan
;
256 unsigned long freed
= 0;
258 spin_lock(&erofs_sb_list_lock
);
260 run_no
= ++shrinker_run_no
;
261 } while (run_no
== 0);
263 /* Iterate over all mounted superblocks and try to shrink them */
264 p
= erofs_sb_list
.next
;
265 while (p
!= &erofs_sb_list
) {
266 sbi
= list_entry(p
, struct erofs_sb_info
, list
);
269 * We move the ones we do to the end of the list, so we stop
270 * when we see one we have already done.
272 if (sbi
->shrinker_run_no
== run_no
)
275 if (!mutex_trylock(&sbi
->umount_mutex
)) {
280 spin_unlock(&erofs_sb_list_lock
);
281 sbi
->shrinker_run_no
= run_no
;
282 freed
+= z_erofs_shrink_scan(sbi
, nr
- freed
);
283 spin_lock(&erofs_sb_list_lock
);
284 /* Get the next list element before we move this one */
288 * Move this one to the end of the list to provide some
291 list_move_tail(&sbi
->list
, &erofs_sb_list
);
292 mutex_unlock(&sbi
->umount_mutex
);
297 spin_unlock(&erofs_sb_list_lock
);
301 int __init
erofs_init_shrinker(void)
303 erofs_shrinker_info
= shrinker_alloc(0, "erofs-shrinker");
304 if (!erofs_shrinker_info
)
307 erofs_shrinker_info
->count_objects
= erofs_shrink_count
;
308 erofs_shrinker_info
->scan_objects
= erofs_shrink_scan
;
309 shrinker_register(erofs_shrinker_info
);
313 void erofs_exit_shrinker(void)
315 shrinker_free(erofs_shrinker_info
);