1 /* CacheFiles path walking and related routines
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/file.h>
16 #include <linux/fsnotify.h>
17 #include <linux/quotaops.h>
18 #include <linux/xattr.h>
19 #include <linux/mount.h>
20 #include <linux/namei.h>
21 #include <linux/security.h>
22 #include <linux/slab.h>
25 #define CACHEFILES_KEYBUF_SIZE 512
28 * dump debugging info about an object
31 void __cachefiles_printk_object(struct cachefiles_object
*object
,
35 struct fscache_cookie
*cookie
;
36 unsigned keylen
, loop
;
38 pr_err("%sobject: OBJ%x\n", prefix
, object
->fscache
.debug_id
);
39 pr_err("%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
40 prefix
, object
->fscache
.state
->name
,
41 object
->fscache
.flags
, work_busy(&object
->fscache
.work
),
42 object
->fscache
.events
, object
->fscache
.event_mask
);
43 pr_err("%sops=%u inp=%u exc=%u\n",
44 prefix
, object
->fscache
.n_ops
, object
->fscache
.n_in_progress
,
45 object
->fscache
.n_exclusive
);
46 pr_err("%sparent=%p\n",
47 prefix
, object
->fscache
.parent
);
49 spin_lock(&object
->fscache
.lock
);
50 cookie
= object
->fscache
.cookie
;
52 pr_err("%scookie=%p [pr=%p nd=%p fl=%lx]\n",
54 object
->fscache
.cookie
,
55 object
->fscache
.cookie
->parent
,
56 object
->fscache
.cookie
->netfs_data
,
57 object
->fscache
.cookie
->flags
);
58 if (keybuf
&& cookie
->def
)
59 keylen
= cookie
->def
->get_key(cookie
->netfs_data
, keybuf
,
60 CACHEFILES_KEYBUF_SIZE
);
64 pr_err("%scookie=NULL\n", prefix
);
67 spin_unlock(&object
->fscache
.lock
);
70 pr_err("%skey=[%u] '", prefix
, keylen
);
71 for (loop
= 0; loop
< keylen
; loop
++)
72 pr_cont("%02x", keybuf
[loop
]);
78 * dump debugging info about a pair of objects
80 static noinline
void cachefiles_printk_object(struct cachefiles_object
*object
,
81 struct cachefiles_object
*xobject
)
85 keybuf
= kmalloc(CACHEFILES_KEYBUF_SIZE
, GFP_NOIO
);
87 __cachefiles_printk_object(object
, "", keybuf
);
89 __cachefiles_printk_object(xobject
, "x", keybuf
);
94 * mark the owner of a dentry, if there is one, to indicate that that dentry
95 * has been preemptively deleted
96 * - the caller must hold the i_mutex on the dentry's parent as required to
97 * call vfs_unlink(), vfs_rmdir() or vfs_rename()
99 static void cachefiles_mark_object_buried(struct cachefiles_cache
*cache
,
100 struct dentry
*dentry
,
101 enum fscache_why_object_killed why
)
103 struct cachefiles_object
*object
;
106 _enter(",'%pd'", dentry
);
108 write_lock(&cache
->active_lock
);
110 p
= cache
->active_nodes
.rb_node
;
112 object
= rb_entry(p
, struct cachefiles_object
, active_node
);
113 if (object
->dentry
> dentry
)
115 else if (object
->dentry
< dentry
)
121 write_unlock(&cache
->active_lock
);
122 _leave(" [no owner]");
125 /* found the dentry for */
127 kdebug("preemptive burial: OBJ%x [%s] %p",
128 object
->fscache
.debug_id
,
129 object
->fscache
.state
->name
,
132 if (fscache_object_is_live(&object
->fscache
)) {
134 pr_err("Error: Can't preemptively bury live object\n");
135 cachefiles_printk_object(object
, NULL
);
137 if (why
!= FSCACHE_OBJECT_IS_STALE
)
138 fscache_object_mark_killed(&object
->fscache
, why
);
141 write_unlock(&cache
->active_lock
);
142 _leave(" [owner marked]");
146 * record the fact that an object is now active
148 static int cachefiles_mark_object_active(struct cachefiles_cache
*cache
,
149 struct cachefiles_object
*object
)
151 struct cachefiles_object
*xobject
;
152 struct rb_node
**_p
, *_parent
= NULL
;
153 struct dentry
*dentry
;
155 _enter(",%p", object
);
158 write_lock(&cache
->active_lock
);
160 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE
, &object
->flags
)) {
161 pr_err("Error: Object already active\n");
162 cachefiles_printk_object(object
, NULL
);
166 dentry
= object
->dentry
;
167 _p
= &cache
->active_nodes
.rb_node
;
170 xobject
= rb_entry(_parent
,
171 struct cachefiles_object
, active_node
);
173 ASSERT(xobject
!= object
);
175 if (xobject
->dentry
> dentry
)
176 _p
= &(*_p
)->rb_left
;
177 else if (xobject
->dentry
< dentry
)
178 _p
= &(*_p
)->rb_right
;
180 goto wait_for_old_object
;
183 rb_link_node(&object
->active_node
, _parent
, _p
);
184 rb_insert_color(&object
->active_node
, &cache
->active_nodes
);
186 write_unlock(&cache
->active_lock
);
190 /* an old object from a previous incarnation is hogging the slot - we
191 * need to wait for it to be destroyed */
193 if (fscache_object_is_live(&xobject
->fscache
)) {
195 pr_err("Error: Unexpected object collision\n");
196 cachefiles_printk_object(object
, xobject
);
199 atomic_inc(&xobject
->usage
);
200 write_unlock(&cache
->active_lock
);
202 if (test_bit(CACHEFILES_OBJECT_ACTIVE
, &xobject
->flags
)) {
203 wait_queue_head_t
*wq
;
205 signed long timeout
= 60 * HZ
;
209 /* if the object we're waiting for is queued for processing,
210 * then just put ourselves on the queue behind it */
211 if (work_pending(&xobject
->fscache
.work
)) {
212 _debug("queue OBJ%x behind OBJ%x immediately",
213 object
->fscache
.debug_id
,
214 xobject
->fscache
.debug_id
);
218 /* otherwise we sleep until either the object we're waiting for
219 * is done, or the fscache_object is congested */
220 wq
= bit_waitqueue(&xobject
->flags
, CACHEFILES_OBJECT_ACTIVE
);
224 prepare_to_wait(wq
, &wait
, TASK_UNINTERRUPTIBLE
);
225 if (!test_bit(CACHEFILES_OBJECT_ACTIVE
, &xobject
->flags
))
228 requeue
= fscache_object_sleep_till_congested(&timeout
);
229 } while (timeout
> 0 && !requeue
);
230 finish_wait(wq
, &wait
);
233 test_bit(CACHEFILES_OBJECT_ACTIVE
, &xobject
->flags
)) {
234 _debug("queue OBJ%x behind OBJ%x after wait",
235 object
->fscache
.debug_id
,
236 xobject
->fscache
.debug_id
);
242 pr_err("Error: Overlong wait for old active object to go away\n");
243 cachefiles_printk_object(object
, xobject
);
248 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE
, &xobject
->flags
));
250 cache
->cache
.ops
->put_object(&xobject
->fscache
);
254 clear_bit(CACHEFILES_OBJECT_ACTIVE
, &object
->flags
);
255 cache
->cache
.ops
->put_object(&xobject
->fscache
);
256 _leave(" = -ETIMEDOUT");
261 * Mark an object as being inactive.
263 void cachefiles_mark_object_inactive(struct cachefiles_cache
*cache
,
264 struct cachefiles_object
*object
)
266 write_lock(&cache
->active_lock
);
267 rb_erase(&object
->active_node
, &cache
->active_nodes
);
268 clear_bit(CACHEFILES_OBJECT_ACTIVE
, &object
->flags
);
269 write_unlock(&cache
->active_lock
);
271 wake_up_bit(&object
->flags
, CACHEFILES_OBJECT_ACTIVE
);
273 /* This object can now be culled, so we need to let the daemon know
274 * that there is something it can remove if it needs to.
276 atomic_long_add(d_backing_inode(object
->dentry
)->i_blocks
,
278 if (atomic_inc_return(&cache
->f_released
))
279 cachefiles_state_changed(cache
);
283 * delete an object representation from the cache
284 * - file backed objects are unlinked
285 * - directory backed objects are stuffed into the graveyard for userspace to
287 * - unlocks the directory mutex
289 static int cachefiles_bury_object(struct cachefiles_cache
*cache
,
293 enum fscache_why_object_killed why
)
295 struct dentry
*grave
, *trap
;
296 struct path path
, path_to_graveyard
;
297 char nbuffer
[8 + 8 + 1];
300 _enter(",'%pd','%pd'", dir
, rep
);
302 _debug("remove %p from %p", rep
, dir
);
304 /* non-directories can just be unlinked */
305 if (!d_is_dir(rep
)) {
306 _debug("unlink stale object");
308 path
.mnt
= cache
->mnt
;
310 ret
= security_path_unlink(&path
, rep
);
312 cachefiles_io_error(cache
, "Unlink security error");
314 ret
= vfs_unlink(d_inode(dir
), rep
, NULL
);
317 cachefiles_mark_object_buried(cache
, rep
, why
);
320 inode_unlock(d_inode(dir
));
323 cachefiles_io_error(cache
, "Unlink failed");
325 _leave(" = %d", ret
);
329 /* directories have to be moved to the graveyard */
330 _debug("move stale object to graveyard");
331 inode_unlock(d_inode(dir
));
334 /* first step is to make up a grave dentry in the graveyard */
335 sprintf(nbuffer
, "%08x%08x",
336 (uint32_t) get_seconds(),
337 (uint32_t) atomic_inc_return(&cache
->gravecounter
));
339 /* do the multiway lock magic */
340 trap
= lock_rename(cache
->graveyard
, dir
);
342 /* do some checks before getting the grave dentry */
343 if (rep
->d_parent
!= dir
) {
344 /* the entry was probably culled when we dropped the parent dir
346 unlock_rename(cache
->graveyard
, dir
);
347 _leave(" = 0 [culled?]");
351 if (!d_can_lookup(cache
->graveyard
)) {
352 unlock_rename(cache
->graveyard
, dir
);
353 cachefiles_io_error(cache
, "Graveyard no longer a directory");
358 unlock_rename(cache
->graveyard
, dir
);
359 cachefiles_io_error(cache
, "May not make directory loop");
363 if (d_mountpoint(rep
)) {
364 unlock_rename(cache
->graveyard
, dir
);
365 cachefiles_io_error(cache
, "Mountpoint in cache");
369 grave
= lookup_one_len(nbuffer
, cache
->graveyard
, strlen(nbuffer
));
371 unlock_rename(cache
->graveyard
, dir
);
373 if (PTR_ERR(grave
) == -ENOMEM
) {
374 _leave(" = -ENOMEM");
378 cachefiles_io_error(cache
, "Lookup error %ld",
383 if (d_is_positive(grave
)) {
384 unlock_rename(cache
->graveyard
, dir
);
391 if (d_mountpoint(grave
)) {
392 unlock_rename(cache
->graveyard
, dir
);
394 cachefiles_io_error(cache
, "Mountpoint in graveyard");
398 /* target should not be an ancestor of source */
400 unlock_rename(cache
->graveyard
, dir
);
402 cachefiles_io_error(cache
, "May not make directory loop");
406 /* attempt the rename */
407 path
.mnt
= cache
->mnt
;
409 path_to_graveyard
.mnt
= cache
->mnt
;
410 path_to_graveyard
.dentry
= cache
->graveyard
;
411 ret
= security_path_rename(&path
, rep
, &path_to_graveyard
, grave
, 0);
413 cachefiles_io_error(cache
, "Rename security error %d", ret
);
415 ret
= vfs_rename(d_inode(dir
), rep
,
416 d_inode(cache
->graveyard
), grave
, NULL
, 0);
417 if (ret
!= 0 && ret
!= -ENOMEM
)
418 cachefiles_io_error(cache
,
419 "Rename failed with error %d", ret
);
422 cachefiles_mark_object_buried(cache
, rep
, why
);
425 unlock_rename(cache
->graveyard
, dir
);
432 * delete an object representation from the cache
434 int cachefiles_delete_object(struct cachefiles_cache
*cache
,
435 struct cachefiles_object
*object
)
440 _enter(",OBJ%x{%p}", object
->fscache
.debug_id
, object
->dentry
);
442 ASSERT(object
->dentry
);
443 ASSERT(d_backing_inode(object
->dentry
));
444 ASSERT(object
->dentry
->d_parent
);
446 dir
= dget_parent(object
->dentry
);
448 inode_lock_nested(d_inode(dir
), I_MUTEX_PARENT
);
450 if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE
, &object
->fscache
.flags
)) {
451 /* object allocation for the same key preemptively deleted this
452 * object's file so that it could create its own file */
453 _debug("object preemptively buried");
454 inode_unlock(d_inode(dir
));
457 /* we need to check that our parent is _still_ our parent - it
458 * may have been renamed */
459 if (dir
== object
->dentry
->d_parent
) {
460 ret
= cachefiles_bury_object(cache
, dir
,
461 object
->dentry
, false,
462 FSCACHE_OBJECT_WAS_RETIRED
);
464 /* it got moved, presumably by cachefilesd culling it,
465 * so it's no longer in the key path and we can ignore
467 inode_unlock(d_inode(dir
));
473 _leave(" = %d", ret
);
478 * walk from the parent object to the child object through the backing
479 * filesystem, creating directories as we go
481 int cachefiles_walk_to_object(struct cachefiles_object
*parent
,
482 struct cachefiles_object
*object
,
484 struct cachefiles_xattr
*auxdata
)
486 struct cachefiles_cache
*cache
;
487 struct dentry
*dir
, *next
= NULL
;
493 _enter("OBJ%x{%p},OBJ%x,%s,",
494 parent
->fscache
.debug_id
, parent
->dentry
,
495 object
->fscache
.debug_id
, key
);
497 cache
= container_of(parent
->fscache
.cache
,
498 struct cachefiles_cache
, cache
);
499 path
.mnt
= cache
->mnt
;
501 ASSERT(parent
->dentry
);
502 ASSERT(d_backing_inode(parent
->dentry
));
504 if (!(d_is_dir(parent
->dentry
))) {
505 // TODO: convert file to dir
506 _leave("looking up in none directory");
510 dir
= dget(parent
->dentry
);
513 /* attempt to transit the first directory component */
517 /* key ends in a double NUL */
518 key
= key
+ nlen
+ 1;
523 /* search the current directory for the element name */
524 _debug("lookup '%s'", name
);
526 inode_lock_nested(d_inode(dir
), I_MUTEX_PARENT
);
529 next
= lookup_one_len(name
, dir
, nlen
);
530 cachefiles_hist(cachefiles_lookup_histogram
, start
);
534 _debug("next -> %p %s", next
, d_backing_inode(next
) ? "positive" : "negative");
537 object
->new = !d_backing_inode(next
);
539 /* if this element of the path doesn't exist, then the lookup phase
540 * failed, and we can release any readers in the certain knowledge that
541 * there's nothing for them to actually read */
542 if (d_is_negative(next
))
543 fscache_object_lookup_negative(&object
->fscache
);
545 /* we need to create the object if it's negative */
546 if (key
|| object
->type
== FSCACHE_COOKIE_TYPE_INDEX
) {
547 /* index objects and intervening tree levels must be subdirs */
548 if (d_is_negative(next
)) {
549 ret
= cachefiles_has_space(cache
, 1, 0);
554 ret
= security_path_mkdir(&path
, next
, 0);
558 ret
= vfs_mkdir(d_inode(dir
), next
, 0);
559 cachefiles_hist(cachefiles_mkdir_histogram
, start
);
563 ASSERT(d_backing_inode(next
));
565 _debug("mkdir -> %p{%p{ino=%lu}}",
566 next
, d_backing_inode(next
), d_backing_inode(next
)->i_ino
);
568 } else if (!d_can_lookup(next
)) {
569 pr_err("inode %lu is not a directory\n",
570 d_backing_inode(next
)->i_ino
);
576 /* non-index objects start out life as files */
577 if (d_is_negative(next
)) {
578 ret
= cachefiles_has_space(cache
, 1, 0);
583 ret
= security_path_mknod(&path
, next
, S_IFREG
, 0);
587 ret
= vfs_create(d_inode(dir
), next
, S_IFREG
, true);
588 cachefiles_hist(cachefiles_create_histogram
, start
);
592 ASSERT(d_backing_inode(next
));
594 _debug("create -> %p{%p{ino=%lu}}",
595 next
, d_backing_inode(next
), d_backing_inode(next
)->i_ino
);
597 } else if (!d_can_lookup(next
) &&
600 pr_err("inode %lu is not a file or directory\n",
601 d_backing_inode(next
)->i_ino
);
607 /* process the next component */
610 inode_unlock(d_inode(dir
));
617 /* we've found the object we were looking for */
618 object
->dentry
= next
;
620 /* if we've found that the terminal object exists, then we need to
621 * check its attributes and delete it if it's out of date */
623 _debug("validate '%pd'", next
);
625 ret
= cachefiles_check_object_xattr(object
, auxdata
);
626 if (ret
== -ESTALE
) {
627 /* delete the object (the deleter drops the directory
629 object
->dentry
= NULL
;
631 ret
= cachefiles_bury_object(cache
, dir
, next
, true,
632 FSCACHE_OBJECT_IS_STALE
);
639 _debug("redo lookup");
640 fscache_object_retrying_stale(&object
->fscache
);
645 /* note that we're now using this object */
646 ret
= cachefiles_mark_object_active(cache
, object
);
648 inode_unlock(d_inode(dir
));
652 if (ret
== -ETIMEDOUT
)
653 goto mark_active_timed_out
;
655 _debug("=== OBTAINED_OBJECT ===");
658 /* attach data to a newly constructed terminal object */
659 ret
= cachefiles_set_object_xattr(object
, auxdata
);
663 /* always update the atime on an object we've just looked up
664 * (this is used to keep track of culling, and atimes are only
665 * updated by read, write and readdir but not lookup or
671 /* open a file interface onto a data file */
672 if (object
->type
!= FSCACHE_COOKIE_TYPE_INDEX
) {
673 if (d_is_reg(object
->dentry
)) {
674 const struct address_space_operations
*aops
;
677 aops
= d_backing_inode(object
->dentry
)->i_mapping
->a_ops
;
680 if (object
->dentry
->d_sb
->s_blocksize
> PAGE_SIZE
)
683 object
->backer
= object
->dentry
;
685 BUG(); // TODO: open file in data-class subdir
690 fscache_obtained_object(&object
->fscache
);
692 _leave(" = 0 [%lu]", d_backing_inode(object
->dentry
)->i_ino
);
696 fscache_object_mark_killed(&object
->fscache
, FSCACHE_OBJECT_NO_SPACE
);
698 _debug("create error %d", ret
);
700 cachefiles_io_error(cache
, "Create/mkdir failed");
703 mark_active_timed_out
:
704 _debug("mark active timed out");
708 _debug("check error %d", ret
);
709 cachefiles_mark_object_inactive(cache
, object
);
711 dput(object
->dentry
);
712 object
->dentry
= NULL
;
716 _debug("delete error %d", ret
);
720 _debug("lookup error %ld", PTR_ERR(next
));
723 cachefiles_io_error(cache
, "Lookup failed");
726 inode_unlock(d_inode(dir
));
731 _leave(" = error %d", -ret
);
738 struct dentry
*cachefiles_get_directory(struct cachefiles_cache
*cache
,
742 struct dentry
*subdir
;
747 _enter(",,%s", dirname
);
749 /* search the current directory for the element name */
750 inode_lock(d_inode(dir
));
753 subdir
= lookup_one_len(dirname
, dir
, strlen(dirname
));
754 cachefiles_hist(cachefiles_lookup_histogram
, start
);
755 if (IS_ERR(subdir
)) {
756 if (PTR_ERR(subdir
) == -ENOMEM
)
761 _debug("subdir -> %p %s",
762 subdir
, d_backing_inode(subdir
) ? "positive" : "negative");
764 /* we need to create the subdir if it doesn't exist yet */
765 if (d_is_negative(subdir
)) {
766 ret
= cachefiles_has_space(cache
, 1, 0);
770 _debug("attempt mkdir");
772 path
.mnt
= cache
->mnt
;
774 ret
= security_path_mkdir(&path
, subdir
, 0700);
777 ret
= vfs_mkdir(d_inode(dir
), subdir
, 0700);
781 ASSERT(d_backing_inode(subdir
));
783 _debug("mkdir -> %p{%p{ino=%lu}}",
785 d_backing_inode(subdir
),
786 d_backing_inode(subdir
)->i_ino
);
789 inode_unlock(d_inode(dir
));
791 /* we need to make sure the subdir is a directory */
792 ASSERT(d_backing_inode(subdir
));
794 if (!d_can_lookup(subdir
)) {
795 pr_err("%s is not a directory\n", dirname
);
801 if (!d_backing_inode(subdir
)->i_op
->setxattr
||
802 !d_backing_inode(subdir
)->i_op
->getxattr
||
803 !d_backing_inode(subdir
)->i_op
->lookup
||
804 !d_backing_inode(subdir
)->i_op
->mkdir
||
805 !d_backing_inode(subdir
)->i_op
->create
||
806 (!d_backing_inode(subdir
)->i_op
->rename
&&
807 !d_backing_inode(subdir
)->i_op
->rename2
) ||
808 !d_backing_inode(subdir
)->i_op
->rmdir
||
809 !d_backing_inode(subdir
)->i_op
->unlink
)
812 _leave(" = [%lu]", d_backing_inode(subdir
)->i_ino
);
817 _leave(" = %d [check]", ret
);
821 inode_unlock(d_inode(dir
));
823 pr_err("mkdir %s failed with error %d\n", dirname
, ret
);
827 inode_unlock(d_inode(dir
));
828 ret
= PTR_ERR(subdir
);
829 pr_err("Lookup %s failed with error %d\n", dirname
, ret
);
833 inode_unlock(d_inode(dir
));
834 _leave(" = -ENOMEM");
835 return ERR_PTR(-ENOMEM
);
839 * find out if an object is in use or not
840 * - if finds object and it's not in use:
841 * - returns a pointer to the object and a reference on it
842 * - returns with the directory locked
844 static struct dentry
*cachefiles_check_active(struct cachefiles_cache
*cache
,
848 struct cachefiles_object
*object
;
850 struct dentry
*victim
;
857 /* look up the victim */
858 inode_lock_nested(d_inode(dir
), I_MUTEX_PARENT
);
861 victim
= lookup_one_len(filename
, dir
, strlen(filename
));
862 cachefiles_hist(cachefiles_lookup_histogram
, start
);
866 //_debug("victim -> %p %s",
867 // victim, d_backing_inode(victim) ? "positive" : "negative");
869 /* if the object is no longer there then we probably retired the object
870 * at the netfs's request whilst the cull was in progress
872 if (d_is_negative(victim
)) {
873 inode_unlock(d_inode(dir
));
875 _leave(" = -ENOENT [absent]");
876 return ERR_PTR(-ENOENT
);
879 /* check to see if we're using this object */
880 read_lock(&cache
->active_lock
);
882 _n
= cache
->active_nodes
.rb_node
;
885 object
= rb_entry(_n
, struct cachefiles_object
, active_node
);
887 if (object
->dentry
> victim
)
889 else if (object
->dentry
< victim
)
895 read_unlock(&cache
->active_lock
);
897 //_leave(" = %p", victim);
901 read_unlock(&cache
->active_lock
);
902 inode_unlock(d_inode(dir
));
904 //_leave(" = -EBUSY [in use]");
905 return ERR_PTR(-EBUSY
);
908 inode_unlock(d_inode(dir
));
909 ret
= PTR_ERR(victim
);
910 if (ret
== -ENOENT
) {
911 /* file or dir now absent - probably retired by netfs */
912 _leave(" = -ESTALE [absent]");
913 return ERR_PTR(-ESTALE
);
917 cachefiles_io_error(cache
, "Lookup failed");
918 } else if (ret
!= -ENOMEM
) {
919 pr_err("Internal error: %d\n", ret
);
923 _leave(" = %d", ret
);
928 * cull an object if it's not in use
929 * - called only by cache manager daemon
931 int cachefiles_cull(struct cachefiles_cache
*cache
, struct dentry
*dir
,
934 struct dentry
*victim
;
937 _enter(",%pd/,%s", dir
, filename
);
939 victim
= cachefiles_check_active(cache
, dir
, filename
);
941 return PTR_ERR(victim
);
943 _debug("victim -> %p %s",
944 victim
, d_backing_inode(victim
) ? "positive" : "negative");
946 /* okay... the victim is not being used so we can cull it
947 * - start by marking it as stale
949 _debug("victim is cullable");
951 ret
= cachefiles_remove_object_xattr(cache
, victim
);
955 /* actually remove the victim (drops the dir mutex) */
958 ret
= cachefiles_bury_object(cache
, dir
, victim
, false,
959 FSCACHE_OBJECT_WAS_CULLED
);
968 inode_unlock(d_inode(dir
));
971 if (ret
== -ENOENT
) {
972 /* file or dir now absent - probably retired by netfs */
973 _leave(" = -ESTALE [absent]");
977 if (ret
!= -ENOMEM
) {
978 pr_err("Internal error: %d\n", ret
);
982 _leave(" = %d", ret
);
987 * find out if an object is in use or not
988 * - called only by cache manager daemon
989 * - returns -EBUSY or 0 to indicate whether an object is in use or not
991 int cachefiles_check_in_use(struct cachefiles_cache
*cache
, struct dentry
*dir
,
994 struct dentry
*victim
;
999 victim
= cachefiles_check_active(cache
, dir
, filename
);
1001 return PTR_ERR(victim
);
1003 inode_unlock(d_inode(dir
));