1 /* CacheFiles path walking and related routines
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/file.h>
16 #include <linux/fsnotify.h>
17 #include <linux/quotaops.h>
18 #include <linux/xattr.h>
19 #include <linux/mount.h>
20 #include <linux/namei.h>
21 #include <linux/security.h>
22 #include <linux/slab.h>
23 #include <linux/xattr.h>
26 #define CACHEFILES_KEYBUF_SIZE 512
29 * dump debugging info about an object
32 void __cachefiles_printk_object(struct cachefiles_object
*object
,
36 struct fscache_cookie
*cookie
;
37 unsigned keylen
, loop
;
39 pr_err("%sobject: OBJ%x\n", prefix
, object
->fscache
.debug_id
);
40 pr_err("%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
41 prefix
, object
->fscache
.state
->name
,
42 object
->fscache
.flags
, work_busy(&object
->fscache
.work
),
43 object
->fscache
.events
, object
->fscache
.event_mask
);
44 pr_err("%sops=%u inp=%u exc=%u\n",
45 prefix
, object
->fscache
.n_ops
, object
->fscache
.n_in_progress
,
46 object
->fscache
.n_exclusive
);
47 pr_err("%sparent=%p\n",
48 prefix
, object
->fscache
.parent
);
50 spin_lock(&object
->fscache
.lock
);
51 cookie
= object
->fscache
.cookie
;
53 pr_err("%scookie=%p [pr=%p nd=%p fl=%lx]\n",
55 object
->fscache
.cookie
,
56 object
->fscache
.cookie
->parent
,
57 object
->fscache
.cookie
->netfs_data
,
58 object
->fscache
.cookie
->flags
);
59 if (keybuf
&& cookie
->def
)
60 keylen
= cookie
->def
->get_key(cookie
->netfs_data
, keybuf
,
61 CACHEFILES_KEYBUF_SIZE
);
65 pr_err("%scookie=NULL\n", prefix
);
68 spin_unlock(&object
->fscache
.lock
);
71 pr_err("%skey=[%u] '", prefix
, keylen
);
72 for (loop
= 0; loop
< keylen
; loop
++)
73 pr_cont("%02x", keybuf
[loop
]);
79 * dump debugging info about a pair of objects
81 static noinline
void cachefiles_printk_object(struct cachefiles_object
*object
,
82 struct cachefiles_object
*xobject
)
86 keybuf
= kmalloc(CACHEFILES_KEYBUF_SIZE
, GFP_NOIO
);
88 __cachefiles_printk_object(object
, "", keybuf
);
90 __cachefiles_printk_object(xobject
, "x", keybuf
);
95 * mark the owner of a dentry, if there is one, to indicate that that dentry
96 * has been preemptively deleted
97 * - the caller must hold the i_mutex on the dentry's parent as required to
98 * call vfs_unlink(), vfs_rmdir() or vfs_rename()
100 static void cachefiles_mark_object_buried(struct cachefiles_cache
*cache
,
101 struct dentry
*dentry
,
102 enum fscache_why_object_killed why
)
104 struct cachefiles_object
*object
;
107 _enter(",'%pd'", dentry
);
109 write_lock(&cache
->active_lock
);
111 p
= cache
->active_nodes
.rb_node
;
113 object
= rb_entry(p
, struct cachefiles_object
, active_node
);
114 if (object
->dentry
> dentry
)
116 else if (object
->dentry
< dentry
)
122 write_unlock(&cache
->active_lock
);
123 _leave(" [no owner]");
126 /* found the dentry for */
128 kdebug("preemptive burial: OBJ%x [%s] %p",
129 object
->fscache
.debug_id
,
130 object
->fscache
.state
->name
,
133 if (fscache_object_is_live(&object
->fscache
)) {
135 pr_err("Error: Can't preemptively bury live object\n");
136 cachefiles_printk_object(object
, NULL
);
138 if (why
!= FSCACHE_OBJECT_IS_STALE
)
139 fscache_object_mark_killed(&object
->fscache
, why
);
142 write_unlock(&cache
->active_lock
);
143 _leave(" [owner marked]");
147 * record the fact that an object is now active
149 static int cachefiles_mark_object_active(struct cachefiles_cache
*cache
,
150 struct cachefiles_object
*object
)
152 struct cachefiles_object
*xobject
;
153 struct rb_node
**_p
, *_parent
= NULL
;
154 struct dentry
*dentry
;
156 _enter(",%p", object
);
159 write_lock(&cache
->active_lock
);
161 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE
, &object
->flags
)) {
162 pr_err("Error: Object already active\n");
163 cachefiles_printk_object(object
, NULL
);
167 dentry
= object
->dentry
;
168 _p
= &cache
->active_nodes
.rb_node
;
171 xobject
= rb_entry(_parent
,
172 struct cachefiles_object
, active_node
);
174 ASSERT(xobject
!= object
);
176 if (xobject
->dentry
> dentry
)
177 _p
= &(*_p
)->rb_left
;
178 else if (xobject
->dentry
< dentry
)
179 _p
= &(*_p
)->rb_right
;
181 goto wait_for_old_object
;
184 rb_link_node(&object
->active_node
, _parent
, _p
);
185 rb_insert_color(&object
->active_node
, &cache
->active_nodes
);
187 write_unlock(&cache
->active_lock
);
191 /* an old object from a previous incarnation is hogging the slot - we
192 * need to wait for it to be destroyed */
194 if (fscache_object_is_live(&xobject
->fscache
)) {
196 pr_err("Error: Unexpected object collision\n");
197 cachefiles_printk_object(object
, xobject
);
200 atomic_inc(&xobject
->usage
);
201 write_unlock(&cache
->active_lock
);
203 if (test_bit(CACHEFILES_OBJECT_ACTIVE
, &xobject
->flags
)) {
204 wait_queue_head_t
*wq
;
206 signed long timeout
= 60 * HZ
;
210 /* if the object we're waiting for is queued for processing,
211 * then just put ourselves on the queue behind it */
212 if (work_pending(&xobject
->fscache
.work
)) {
213 _debug("queue OBJ%x behind OBJ%x immediately",
214 object
->fscache
.debug_id
,
215 xobject
->fscache
.debug_id
);
219 /* otherwise we sleep until either the object we're waiting for
220 * is done, or the fscache_object is congested */
221 wq
= bit_waitqueue(&xobject
->flags
, CACHEFILES_OBJECT_ACTIVE
);
225 prepare_to_wait(wq
, &wait
, TASK_UNINTERRUPTIBLE
);
226 if (!test_bit(CACHEFILES_OBJECT_ACTIVE
, &xobject
->flags
))
229 requeue
= fscache_object_sleep_till_congested(&timeout
);
230 } while (timeout
> 0 && !requeue
);
231 finish_wait(wq
, &wait
);
234 test_bit(CACHEFILES_OBJECT_ACTIVE
, &xobject
->flags
)) {
235 _debug("queue OBJ%x behind OBJ%x after wait",
236 object
->fscache
.debug_id
,
237 xobject
->fscache
.debug_id
);
243 pr_err("Error: Overlong wait for old active object to go away\n");
244 cachefiles_printk_object(object
, xobject
);
249 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE
, &xobject
->flags
));
251 cache
->cache
.ops
->put_object(&xobject
->fscache
);
255 clear_bit(CACHEFILES_OBJECT_ACTIVE
, &object
->flags
);
256 cache
->cache
.ops
->put_object(&xobject
->fscache
);
257 _leave(" = -ETIMEDOUT");
262 * Mark an object as being inactive.
264 void cachefiles_mark_object_inactive(struct cachefiles_cache
*cache
,
265 struct cachefiles_object
*object
,
268 write_lock(&cache
->active_lock
);
269 rb_erase(&object
->active_node
, &cache
->active_nodes
);
270 clear_bit(CACHEFILES_OBJECT_ACTIVE
, &object
->flags
);
271 write_unlock(&cache
->active_lock
);
273 wake_up_bit(&object
->flags
, CACHEFILES_OBJECT_ACTIVE
);
275 /* This object can now be culled, so we need to let the daemon know
276 * that there is something it can remove if it needs to.
278 atomic_long_add(i_blocks
, &cache
->b_released
);
279 if (atomic_inc_return(&cache
->f_released
))
280 cachefiles_state_changed(cache
);
284 * delete an object representation from the cache
285 * - file backed objects are unlinked
286 * - directory backed objects are stuffed into the graveyard for userspace to
288 * - unlocks the directory mutex
290 static int cachefiles_bury_object(struct cachefiles_cache
*cache
,
294 enum fscache_why_object_killed why
)
296 struct dentry
*grave
, *trap
;
297 struct path path
, path_to_graveyard
;
298 char nbuffer
[8 + 8 + 1];
301 _enter(",'%pd','%pd'", dir
, rep
);
303 _debug("remove %p from %p", rep
, dir
);
305 /* non-directories can just be unlinked */
306 if (!d_is_dir(rep
)) {
307 _debug("unlink stale object");
309 path
.mnt
= cache
->mnt
;
311 ret
= security_path_unlink(&path
, rep
);
313 cachefiles_io_error(cache
, "Unlink security error");
315 ret
= vfs_unlink(d_inode(dir
), rep
, NULL
);
318 cachefiles_mark_object_buried(cache
, rep
, why
);
321 inode_unlock(d_inode(dir
));
324 cachefiles_io_error(cache
, "Unlink failed");
326 _leave(" = %d", ret
);
330 /* directories have to be moved to the graveyard */
331 _debug("move stale object to graveyard");
332 inode_unlock(d_inode(dir
));
335 /* first step is to make up a grave dentry in the graveyard */
336 sprintf(nbuffer
, "%08x%08x",
337 (uint32_t) get_seconds(),
338 (uint32_t) atomic_inc_return(&cache
->gravecounter
));
340 /* do the multiway lock magic */
341 trap
= lock_rename(cache
->graveyard
, dir
);
343 /* do some checks before getting the grave dentry */
344 if (rep
->d_parent
!= dir
) {
345 /* the entry was probably culled when we dropped the parent dir
347 unlock_rename(cache
->graveyard
, dir
);
348 _leave(" = 0 [culled?]");
352 if (!d_can_lookup(cache
->graveyard
)) {
353 unlock_rename(cache
->graveyard
, dir
);
354 cachefiles_io_error(cache
, "Graveyard no longer a directory");
359 unlock_rename(cache
->graveyard
, dir
);
360 cachefiles_io_error(cache
, "May not make directory loop");
364 if (d_mountpoint(rep
)) {
365 unlock_rename(cache
->graveyard
, dir
);
366 cachefiles_io_error(cache
, "Mountpoint in cache");
370 grave
= lookup_one_len(nbuffer
, cache
->graveyard
, strlen(nbuffer
));
372 unlock_rename(cache
->graveyard
, dir
);
374 if (PTR_ERR(grave
) == -ENOMEM
) {
375 _leave(" = -ENOMEM");
379 cachefiles_io_error(cache
, "Lookup error %ld",
384 if (d_is_positive(grave
)) {
385 unlock_rename(cache
->graveyard
, dir
);
392 if (d_mountpoint(grave
)) {
393 unlock_rename(cache
->graveyard
, dir
);
395 cachefiles_io_error(cache
, "Mountpoint in graveyard");
399 /* target should not be an ancestor of source */
401 unlock_rename(cache
->graveyard
, dir
);
403 cachefiles_io_error(cache
, "May not make directory loop");
407 /* attempt the rename */
408 path
.mnt
= cache
->mnt
;
410 path_to_graveyard
.mnt
= cache
->mnt
;
411 path_to_graveyard
.dentry
= cache
->graveyard
;
412 ret
= security_path_rename(&path
, rep
, &path_to_graveyard
, grave
, 0);
414 cachefiles_io_error(cache
, "Rename security error %d", ret
);
416 ret
= vfs_rename(d_inode(dir
), rep
,
417 d_inode(cache
->graveyard
), grave
, NULL
, 0);
418 if (ret
!= 0 && ret
!= -ENOMEM
)
419 cachefiles_io_error(cache
,
420 "Rename failed with error %d", ret
);
423 cachefiles_mark_object_buried(cache
, rep
, why
);
426 unlock_rename(cache
->graveyard
, dir
);
433 * delete an object representation from the cache
435 int cachefiles_delete_object(struct cachefiles_cache
*cache
,
436 struct cachefiles_object
*object
)
441 _enter(",OBJ%x{%p}", object
->fscache
.debug_id
, object
->dentry
);
443 ASSERT(object
->dentry
);
444 ASSERT(d_backing_inode(object
->dentry
));
445 ASSERT(object
->dentry
->d_parent
);
447 dir
= dget_parent(object
->dentry
);
449 inode_lock_nested(d_inode(dir
), I_MUTEX_PARENT
);
451 if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE
, &object
->fscache
.flags
)) {
452 /* object allocation for the same key preemptively deleted this
453 * object's file so that it could create its own file */
454 _debug("object preemptively buried");
455 inode_unlock(d_inode(dir
));
458 /* we need to check that our parent is _still_ our parent - it
459 * may have been renamed */
460 if (dir
== object
->dentry
->d_parent
) {
461 ret
= cachefiles_bury_object(cache
, dir
,
462 object
->dentry
, false,
463 FSCACHE_OBJECT_WAS_RETIRED
);
465 /* it got moved, presumably by cachefilesd culling it,
466 * so it's no longer in the key path and we can ignore
468 inode_unlock(d_inode(dir
));
474 _leave(" = %d", ret
);
479 * walk from the parent object to the child object through the backing
480 * filesystem, creating directories as we go
482 int cachefiles_walk_to_object(struct cachefiles_object
*parent
,
483 struct cachefiles_object
*object
,
485 struct cachefiles_xattr
*auxdata
)
487 struct cachefiles_cache
*cache
;
488 struct dentry
*dir
, *next
= NULL
;
494 _enter("OBJ%x{%p},OBJ%x,%s,",
495 parent
->fscache
.debug_id
, parent
->dentry
,
496 object
->fscache
.debug_id
, key
);
498 cache
= container_of(parent
->fscache
.cache
,
499 struct cachefiles_cache
, cache
);
500 path
.mnt
= cache
->mnt
;
502 ASSERT(parent
->dentry
);
503 ASSERT(d_backing_inode(parent
->dentry
));
505 if (!(d_is_dir(parent
->dentry
))) {
506 // TODO: convert file to dir
507 _leave("looking up in none directory");
511 dir
= dget(parent
->dentry
);
514 /* attempt to transit the first directory component */
518 /* key ends in a double NUL */
519 key
= key
+ nlen
+ 1;
524 /* search the current directory for the element name */
525 _debug("lookup '%s'", name
);
527 inode_lock_nested(d_inode(dir
), I_MUTEX_PARENT
);
530 next
= lookup_one_len(name
, dir
, nlen
);
531 cachefiles_hist(cachefiles_lookup_histogram
, start
);
535 _debug("next -> %p %s", next
, d_backing_inode(next
) ? "positive" : "negative");
538 object
->new = !d_backing_inode(next
);
540 /* if this element of the path doesn't exist, then the lookup phase
541 * failed, and we can release any readers in the certain knowledge that
542 * there's nothing for them to actually read */
543 if (d_is_negative(next
))
544 fscache_object_lookup_negative(&object
->fscache
);
546 /* we need to create the object if it's negative */
547 if (key
|| object
->type
== FSCACHE_COOKIE_TYPE_INDEX
) {
548 /* index objects and intervening tree levels must be subdirs */
549 if (d_is_negative(next
)) {
550 ret
= cachefiles_has_space(cache
, 1, 0);
555 ret
= security_path_mkdir(&path
, next
, 0);
559 ret
= vfs_mkdir(d_inode(dir
), next
, 0);
560 cachefiles_hist(cachefiles_mkdir_histogram
, start
);
564 ASSERT(d_backing_inode(next
));
566 _debug("mkdir -> %p{%p{ino=%lu}}",
567 next
, d_backing_inode(next
), d_backing_inode(next
)->i_ino
);
569 } else if (!d_can_lookup(next
)) {
570 pr_err("inode %lu is not a directory\n",
571 d_backing_inode(next
)->i_ino
);
577 /* non-index objects start out life as files */
578 if (d_is_negative(next
)) {
579 ret
= cachefiles_has_space(cache
, 1, 0);
584 ret
= security_path_mknod(&path
, next
, S_IFREG
, 0);
588 ret
= vfs_create(d_inode(dir
), next
, S_IFREG
, true);
589 cachefiles_hist(cachefiles_create_histogram
, start
);
593 ASSERT(d_backing_inode(next
));
595 _debug("create -> %p{%p{ino=%lu}}",
596 next
, d_backing_inode(next
), d_backing_inode(next
)->i_ino
);
598 } else if (!d_can_lookup(next
) &&
601 pr_err("inode %lu is not a file or directory\n",
602 d_backing_inode(next
)->i_ino
);
608 /* process the next component */
611 inode_unlock(d_inode(dir
));
618 /* we've found the object we were looking for */
619 object
->dentry
= next
;
621 /* if we've found that the terminal object exists, then we need to
622 * check its attributes and delete it if it's out of date */
624 _debug("validate '%pd'", next
);
626 ret
= cachefiles_check_object_xattr(object
, auxdata
);
627 if (ret
== -ESTALE
) {
628 /* delete the object (the deleter drops the directory
630 object
->dentry
= NULL
;
632 ret
= cachefiles_bury_object(cache
, dir
, next
, true,
633 FSCACHE_OBJECT_IS_STALE
);
640 _debug("redo lookup");
641 fscache_object_retrying_stale(&object
->fscache
);
646 /* note that we're now using this object */
647 ret
= cachefiles_mark_object_active(cache
, object
);
649 inode_unlock(d_inode(dir
));
653 if (ret
== -ETIMEDOUT
)
654 goto mark_active_timed_out
;
656 _debug("=== OBTAINED_OBJECT ===");
659 /* attach data to a newly constructed terminal object */
660 ret
= cachefiles_set_object_xattr(object
, auxdata
);
664 /* always update the atime on an object we've just looked up
665 * (this is used to keep track of culling, and atimes are only
666 * updated by read, write and readdir but not lookup or
672 /* open a file interface onto a data file */
673 if (object
->type
!= FSCACHE_COOKIE_TYPE_INDEX
) {
674 if (d_is_reg(object
->dentry
)) {
675 const struct address_space_operations
*aops
;
678 aops
= d_backing_inode(object
->dentry
)->i_mapping
->a_ops
;
681 if (object
->dentry
->d_sb
->s_blocksize
> PAGE_SIZE
)
684 object
->backer
= object
->dentry
;
686 BUG(); // TODO: open file in data-class subdir
691 fscache_obtained_object(&object
->fscache
);
693 _leave(" = 0 [%lu]", d_backing_inode(object
->dentry
)->i_ino
);
697 fscache_object_mark_killed(&object
->fscache
, FSCACHE_OBJECT_NO_SPACE
);
699 _debug("create error %d", ret
);
701 cachefiles_io_error(cache
, "Create/mkdir failed");
704 mark_active_timed_out
:
705 _debug("mark active timed out");
709 _debug("check error %d", ret
);
710 cachefiles_mark_object_inactive(
711 cache
, object
, d_backing_inode(object
->dentry
)->i_blocks
);
713 dput(object
->dentry
);
714 object
->dentry
= NULL
;
718 _debug("delete error %d", ret
);
722 _debug("lookup error %ld", PTR_ERR(next
));
725 cachefiles_io_error(cache
, "Lookup failed");
728 inode_unlock(d_inode(dir
));
733 _leave(" = error %d", -ret
);
740 struct dentry
*cachefiles_get_directory(struct cachefiles_cache
*cache
,
744 struct dentry
*subdir
;
749 _enter(",,%s", dirname
);
751 /* search the current directory for the element name */
752 inode_lock(d_inode(dir
));
755 subdir
= lookup_one_len(dirname
, dir
, strlen(dirname
));
756 cachefiles_hist(cachefiles_lookup_histogram
, start
);
757 if (IS_ERR(subdir
)) {
758 if (PTR_ERR(subdir
) == -ENOMEM
)
763 _debug("subdir -> %p %s",
764 subdir
, d_backing_inode(subdir
) ? "positive" : "negative");
766 /* we need to create the subdir if it doesn't exist yet */
767 if (d_is_negative(subdir
)) {
768 ret
= cachefiles_has_space(cache
, 1, 0);
772 _debug("attempt mkdir");
774 path
.mnt
= cache
->mnt
;
776 ret
= security_path_mkdir(&path
, subdir
, 0700);
779 ret
= vfs_mkdir(d_inode(dir
), subdir
, 0700);
783 ASSERT(d_backing_inode(subdir
));
785 _debug("mkdir -> %p{%p{ino=%lu}}",
787 d_backing_inode(subdir
),
788 d_backing_inode(subdir
)->i_ino
);
791 inode_unlock(d_inode(dir
));
793 /* we need to make sure the subdir is a directory */
794 ASSERT(d_backing_inode(subdir
));
796 if (!d_can_lookup(subdir
)) {
797 pr_err("%s is not a directory\n", dirname
);
803 if (!(d_backing_inode(subdir
)->i_opflags
& IOP_XATTR
) ||
804 !d_backing_inode(subdir
)->i_op
->lookup
||
805 !d_backing_inode(subdir
)->i_op
->mkdir
||
806 !d_backing_inode(subdir
)->i_op
->create
||
807 !d_backing_inode(subdir
)->i_op
->rename
||
808 !d_backing_inode(subdir
)->i_op
->rmdir
||
809 !d_backing_inode(subdir
)->i_op
->unlink
)
812 _leave(" = [%lu]", d_backing_inode(subdir
)->i_ino
);
817 _leave(" = %d [check]", ret
);
821 inode_unlock(d_inode(dir
));
823 pr_err("mkdir %s failed with error %d\n", dirname
, ret
);
827 inode_unlock(d_inode(dir
));
828 ret
= PTR_ERR(subdir
);
829 pr_err("Lookup %s failed with error %d\n", dirname
, ret
);
833 inode_unlock(d_inode(dir
));
834 _leave(" = -ENOMEM");
835 return ERR_PTR(-ENOMEM
);
839 * find out if an object is in use or not
840 * - if finds object and it's not in use:
841 * - returns a pointer to the object and a reference on it
842 * - returns with the directory locked
844 static struct dentry
*cachefiles_check_active(struct cachefiles_cache
*cache
,
848 struct cachefiles_object
*object
;
850 struct dentry
*victim
;
857 /* look up the victim */
858 inode_lock_nested(d_inode(dir
), I_MUTEX_PARENT
);
861 victim
= lookup_one_len(filename
, dir
, strlen(filename
));
862 cachefiles_hist(cachefiles_lookup_histogram
, start
);
866 //_debug("victim -> %p %s",
867 // victim, d_backing_inode(victim) ? "positive" : "negative");
869 /* if the object is no longer there then we probably retired the object
870 * at the netfs's request whilst the cull was in progress
872 if (d_is_negative(victim
)) {
873 inode_unlock(d_inode(dir
));
875 _leave(" = -ENOENT [absent]");
876 return ERR_PTR(-ENOENT
);
879 /* check to see if we're using this object */
880 read_lock(&cache
->active_lock
);
882 _n
= cache
->active_nodes
.rb_node
;
885 object
= rb_entry(_n
, struct cachefiles_object
, active_node
);
887 if (object
->dentry
> victim
)
889 else if (object
->dentry
< victim
)
895 read_unlock(&cache
->active_lock
);
897 //_leave(" = %p", victim);
901 read_unlock(&cache
->active_lock
);
902 inode_unlock(d_inode(dir
));
904 //_leave(" = -EBUSY [in use]");
905 return ERR_PTR(-EBUSY
);
908 inode_unlock(d_inode(dir
));
909 ret
= PTR_ERR(victim
);
910 if (ret
== -ENOENT
) {
911 /* file or dir now absent - probably retired by netfs */
912 _leave(" = -ESTALE [absent]");
913 return ERR_PTR(-ESTALE
);
917 cachefiles_io_error(cache
, "Lookup failed");
918 } else if (ret
!= -ENOMEM
) {
919 pr_err("Internal error: %d\n", ret
);
923 _leave(" = %d", ret
);
928 * cull an object if it's not in use
929 * - called only by cache manager daemon
931 int cachefiles_cull(struct cachefiles_cache
*cache
, struct dentry
*dir
,
934 struct dentry
*victim
;
937 _enter(",%pd/,%s", dir
, filename
);
939 victim
= cachefiles_check_active(cache
, dir
, filename
);
941 return PTR_ERR(victim
);
943 _debug("victim -> %p %s",
944 victim
, d_backing_inode(victim
) ? "positive" : "negative");
946 /* okay... the victim is not being used so we can cull it
947 * - start by marking it as stale
949 _debug("victim is cullable");
951 ret
= cachefiles_remove_object_xattr(cache
, victim
);
955 /* actually remove the victim (drops the dir mutex) */
958 ret
= cachefiles_bury_object(cache
, dir
, victim
, false,
959 FSCACHE_OBJECT_WAS_CULLED
);
968 inode_unlock(d_inode(dir
));
971 if (ret
== -ENOENT
) {
972 /* file or dir now absent - probably retired by netfs */
973 _leave(" = -ESTALE [absent]");
977 if (ret
!= -ENOMEM
) {
978 pr_err("Internal error: %d\n", ret
);
982 _leave(" = %d", ret
);
987 * find out if an object is in use or not
988 * - called only by cache manager daemon
989 * - returns -EBUSY or 0 to indicate whether an object is in use or not
991 int cachefiles_check_in_use(struct cachefiles_cache
*cache
, struct dentry
*dir
,
994 struct dentry
*victim
;
999 victim
= cachefiles_check_active(cache
, dir
, filename
);
1001 return PTR_ERR(victim
);
1003 inode_unlock(d_inode(dir
));