1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Manage high-level VFS aspects of a cache.
4 * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/slab.h>
9 #include <linux/statfs.h>
10 #include <linux/namei.h>
11 #include <trace/events/fscache.h>
15 * Bring a cache online.
17 int cachefiles_add_cache(struct cachefiles_cache
*cache
)
19 struct fscache_cache
*cache_cookie
;
22 struct dentry
*graveyard
, *cachedir
, *root
;
23 const struct cred
*saved_cred
;
28 cache_cookie
= fscache_acquire_cache(cache
->tag
);
29 if (IS_ERR(cache_cookie
))
30 return PTR_ERR(cache_cookie
);
32 /* we want to work under the module's security ID */
33 ret
= cachefiles_get_security_ID(cache
);
37 cachefiles_begin_secure(cache
, &saved_cred
);
39 /* look up the directory at the root of the cache */
40 ret
= kern_path(cache
->rootdirname
, LOOKUP_DIRECTORY
, &path
);
44 cache
->mnt
= path
.mnt
;
48 if (is_idmapped_mnt(path
.mnt
)) {
49 pr_warn("File cache on idmapped mounts not supported");
50 goto error_unsupported
;
53 /* Check features of the backing filesystem:
54 * - Directories must support looking up and directory creation
55 * - We create tmpfiles to handle invalidation
56 * - We use xattrs to store metadata
57 * - We need to be able to query the amount of space available
58 * - We want to be able to sync the filesystem when stopping the cache
59 * - We use DIO to/from pages, so the blocksize mustn't be too big.
62 if (d_is_negative(root
) ||
63 !d_backing_inode(root
)->i_op
->lookup
||
64 !d_backing_inode(root
)->i_op
->mkdir
||
65 !d_backing_inode(root
)->i_op
->tmpfile
||
66 !(d_backing_inode(root
)->i_opflags
& IOP_XATTR
) ||
67 !root
->d_sb
->s_op
->statfs
||
68 !root
->d_sb
->s_op
->sync_fs
||
69 root
->d_sb
->s_blocksize
> PAGE_SIZE
)
70 goto error_unsupported
;
73 if (sb_rdonly(root
->d_sb
))
74 goto error_unsupported
;
76 /* determine the security of the on-disk cache as this governs
77 * security ID of files we create */
78 ret
= cachefiles_determine_cache_security(cache
, root
, &saved_cred
);
80 goto error_unsupported
;
82 /* get the cache size and blocksize */
83 ret
= vfs_statfs(&path
, &stats
);
85 goto error_unsupported
;
88 if (stats
.f_bsize
<= 0)
89 goto error_unsupported
;
92 if (stats
.f_bsize
> PAGE_SIZE
)
93 goto error_unsupported
;
95 cache
->bsize
= stats
.f_bsize
;
96 cache
->bshift
= ilog2(stats
.f_bsize
);
98 _debug("blksize %u (shift %u)",
99 cache
->bsize
, cache
->bshift
);
101 _debug("size %llu, avail %llu",
102 (unsigned long long) stats
.f_blocks
,
103 (unsigned long long) stats
.f_bavail
);
105 /* set up caching limits */
106 do_div(stats
.f_files
, 100);
107 cache
->fstop
= stats
.f_files
* cache
->fstop_percent
;
108 cache
->fcull
= stats
.f_files
* cache
->fcull_percent
;
109 cache
->frun
= stats
.f_files
* cache
->frun_percent
;
111 _debug("limits {%llu,%llu,%llu} files",
112 (unsigned long long) cache
->frun
,
113 (unsigned long long) cache
->fcull
,
114 (unsigned long long) cache
->fstop
);
116 do_div(stats
.f_blocks
, 100);
117 cache
->bstop
= stats
.f_blocks
* cache
->bstop_percent
;
118 cache
->bcull
= stats
.f_blocks
* cache
->bcull_percent
;
119 cache
->brun
= stats
.f_blocks
* cache
->brun_percent
;
121 _debug("limits {%llu,%llu,%llu} blocks",
122 (unsigned long long) cache
->brun
,
123 (unsigned long long) cache
->bcull
,
124 (unsigned long long) cache
->bstop
);
126 /* get the cache directory and check its type */
127 cachedir
= cachefiles_get_directory(cache
, root
, "cache", NULL
);
128 if (IS_ERR(cachedir
)) {
129 ret
= PTR_ERR(cachedir
);
130 goto error_unsupported
;
133 cache
->store
= cachedir
;
135 /* get the graveyard directory */
136 graveyard
= cachefiles_get_directory(cache
, root
, "graveyard", NULL
);
137 if (IS_ERR(graveyard
)) {
138 ret
= PTR_ERR(graveyard
);
139 goto error_unsupported
;
142 cache
->graveyard
= graveyard
;
143 cache
->cache
= cache_cookie
;
145 ret
= fscache_add_cache(cache_cookie
, &cachefiles_cache_ops
, cache
);
147 goto error_add_cache
;
150 set_bit(CACHEFILES_READY
, &cache
->flags
);
153 pr_info("File cache on %s registered\n", cache_cookie
->name
);
155 /* check how much space the cache has */
156 cachefiles_has_space(cache
, 0, 0, cachefiles_has_space_check
);
157 cachefiles_end_secure(cache
, saved_cred
);
158 _leave(" = 0 [%px]", cache
->cache
);
162 cachefiles_put_directory(cache
->graveyard
);
163 cache
->graveyard
= NULL
;
165 cachefiles_put_directory(cache
->store
);
171 cachefiles_end_secure(cache
, saved_cred
);
172 put_cred(cache
->cache_cred
);
173 cache
->cache_cred
= NULL
;
175 fscache_relinquish_cache(cache_cookie
);
177 pr_err("Failed to register: %d\n", ret
);
182 * See if we have space for a number of pages and/or a number of files in the
185 int cachefiles_has_space(struct cachefiles_cache
*cache
,
186 unsigned fnr
, unsigned bnr
,
187 enum cachefiles_has_space_for reason
)
189 struct kstatfs stats
;
190 u64 b_avail
, b_writing
;
195 .dentry
= cache
->mnt
->mnt_root
,
198 //_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
199 // (unsigned long long) cache->frun,
200 // (unsigned long long) cache->fcull,
201 // (unsigned long long) cache->fstop,
202 // (unsigned long long) cache->brun,
203 // (unsigned long long) cache->bcull,
204 // (unsigned long long) cache->bstop,
207 /* find out how many pages of blockdev are available */
208 memset(&stats
, 0, sizeof(stats
));
210 ret
= vfs_statfs(&path
, &stats
);
212 trace_cachefiles_vfs_error(NULL
, d_inode(path
.dentry
), ret
,
213 cachefiles_trace_statfs_error
);
215 cachefiles_io_error(cache
, "statfs failed");
216 _leave(" = %d", ret
);
220 b_avail
= stats
.f_bavail
;
221 b_writing
= atomic_long_read(&cache
->b_writing
);
222 if (b_avail
> b_writing
)
223 b_avail
-= b_writing
;
227 //_debug("avail %llu,%llu",
228 // (unsigned long long)stats.f_ffree,
229 // (unsigned long long)b_avail);
231 /* see if there is sufficient space */
232 if (stats
.f_ffree
> fnr
)
233 stats
.f_ffree
-= fnr
;
243 if (stats
.f_ffree
< cache
->fstop
||
244 b_avail
< cache
->bstop
)
245 goto stop_and_begin_cull
;
248 if (stats
.f_ffree
< cache
->fcull
||
249 b_avail
< cache
->bcull
)
252 if (test_bit(CACHEFILES_CULLING
, &cache
->flags
) &&
253 stats
.f_ffree
>= cache
->frun
&&
254 b_avail
>= cache
->brun
&&
255 test_and_clear_bit(CACHEFILES_CULLING
, &cache
->flags
)
257 _debug("cease culling");
258 cachefiles_state_changed(cache
);
266 case cachefiles_has_space_for_write
:
267 fscache_count_no_write_space();
269 case cachefiles_has_space_for_create
:
270 fscache_count_no_create_space();
276 if (!test_and_set_bit(CACHEFILES_CULLING
, &cache
->flags
)) {
277 _debug("### CULL CACHE ###");
278 cachefiles_state_changed(cache
);
281 _leave(" = %d", ret
);
286 * Mark all the objects as being out of service and queue them all for cleanup.
288 static void cachefiles_withdraw_objects(struct cachefiles_cache
*cache
)
290 struct cachefiles_object
*object
;
291 unsigned int count
= 0;
295 spin_lock(&cache
->object_list_lock
);
297 while (!list_empty(&cache
->object_list
)) {
298 object
= list_first_entry(&cache
->object_list
,
299 struct cachefiles_object
, cache_link
);
300 cachefiles_see_object(object
, cachefiles_obj_see_withdrawal
);
301 list_del_init(&object
->cache_link
);
302 fscache_withdraw_cookie(object
->cookie
);
304 if ((count
& 63) == 0) {
305 spin_unlock(&cache
->object_list_lock
);
307 spin_lock(&cache
->object_list_lock
);
311 spin_unlock(&cache
->object_list_lock
);
312 _leave(" [%u objs]", count
);
316 * Withdraw fscache volumes.
318 static void cachefiles_withdraw_fscache_volumes(struct cachefiles_cache
*cache
)
320 struct list_head
*cur
;
321 struct cachefiles_volume
*volume
;
322 struct fscache_volume
*vcookie
;
326 spin_lock(&cache
->object_list_lock
);
327 list_for_each(cur
, &cache
->volumes
) {
328 volume
= list_entry(cur
, struct cachefiles_volume
, cache_link
);
330 if (atomic_read(&volume
->vcookie
->n_accesses
) == 0)
333 vcookie
= fscache_try_get_volume(volume
->vcookie
,
334 fscache_volume_get_withdraw
);
336 spin_unlock(&cache
->object_list_lock
);
337 fscache_withdraw_volume(vcookie
);
338 fscache_put_volume(vcookie
, fscache_volume_put_withdraw
);
342 spin_unlock(&cache
->object_list_lock
);
348 * Withdraw cachefiles volumes.
350 static void cachefiles_withdraw_volumes(struct cachefiles_cache
*cache
)
355 struct fscache_volume
*vcookie
= NULL
;
356 struct cachefiles_volume
*volume
= NULL
;
358 spin_lock(&cache
->object_list_lock
);
359 if (!list_empty(&cache
->volumes
)) {
360 volume
= list_first_entry(&cache
->volumes
,
361 struct cachefiles_volume
, cache_link
);
362 vcookie
= fscache_try_get_volume(volume
->vcookie
,
363 fscache_volume_get_withdraw
);
365 spin_unlock(&cache
->object_list_lock
);
369 list_del_init(&volume
->cache_link
);
371 spin_unlock(&cache
->object_list_lock
);
375 cachefiles_withdraw_volume(volume
);
376 fscache_put_volume(vcookie
, fscache_volume_put_withdraw
);
383 * Sync a cache to backing disk.
385 static void cachefiles_sync_cache(struct cachefiles_cache
*cache
)
387 const struct cred
*saved_cred
;
390 _enter("%s", cache
->cache
->name
);
392 /* make sure all pages pinned by operations on behalf of the netfs are
394 cachefiles_begin_secure(cache
, &saved_cred
);
395 down_read(&cache
->mnt
->mnt_sb
->s_umount
);
396 ret
= sync_filesystem(cache
->mnt
->mnt_sb
);
397 up_read(&cache
->mnt
->mnt_sb
->s_umount
);
398 cachefiles_end_secure(cache
, saved_cred
);
401 cachefiles_io_error(cache
,
402 "Attempt to sync backing fs superblock returned error %d",
407 * Withdraw cache objects.
409 void cachefiles_withdraw_cache(struct cachefiles_cache
*cache
)
411 struct fscache_cache
*fscache
= cache
->cache
;
413 pr_info("File cache on %s unregistering\n", fscache
->name
);
415 fscache_withdraw_cache(fscache
);
416 cachefiles_withdraw_fscache_volumes(cache
);
418 /* we now have to destroy all the active objects pertaining to this
419 * cache - which we do by passing them off to thread pool to be
421 cachefiles_withdraw_objects(cache
);
422 fscache_wait_for_objects(fscache
);
424 cachefiles_withdraw_volumes(cache
);
425 cachefiles_sync_cache(cache
);
427 fscache_relinquish_cache(fscache
);