1 /* netfs cookie management
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * See Documentation/filesystems/caching/netfs-api.txt for more information on
15 #define FSCACHE_DEBUG_LEVEL COOKIE
16 #include <linux/module.h>
17 #include <linux/slab.h>
20 struct kmem_cache
*fscache_cookie_jar
;
22 static atomic_t fscache_object_debug_id
= ATOMIC_INIT(0);
24 static int fscache_acquire_non_index_cookie(struct fscache_cookie
*cookie
);
25 static int fscache_alloc_object(struct fscache_cache
*cache
,
26 struct fscache_cookie
*cookie
);
27 static int fscache_attach_object(struct fscache_cookie
*cookie
,
28 struct fscache_object
*object
);
31 * initialise an cookie jar slab element prior to any use
33 void fscache_cookie_init_once(void *_cookie
)
35 struct fscache_cookie
*cookie
= _cookie
;
37 memset(cookie
, 0, sizeof(*cookie
));
38 spin_lock_init(&cookie
->lock
);
39 spin_lock_init(&cookie
->stores_lock
);
40 INIT_HLIST_HEAD(&cookie
->backing_objects
);
44 * request a cookie to represent an object (index, datafile, xattr, etc)
45 * - parent specifies the parent object
46 * - the top level index cookie for each netfs is stored in the fscache_netfs
47 * struct upon registration
48 * - def points to the definition
49 * - the netfs_data will be passed to the functions pointed to in *def
50 * - all attached caches will be searched to see if they contain this object
51 * - index objects aren't stored on disk until there's a dependent file that
53 * - other objects are stored in a selected cache immediately, and all the
54 * indices forming the path to it are instantiated if necessary
55 * - we never let on to the netfs about errors
56 * - we may set a negative cookie pointer, but that's okay
58 struct fscache_cookie
*__fscache_acquire_cookie(
59 struct fscache_cookie
*parent
,
60 const struct fscache_cookie_def
*def
,
63 struct fscache_cookie
*cookie
;
67 _enter("{%s},{%s},%p",
68 parent
? (char *) parent
->def
->name
: "<no-parent>",
69 def
->name
, netfs_data
);
71 fscache_stat(&fscache_n_acquires
);
73 /* if there's no parent cookie, then we don't create one here either */
75 fscache_stat(&fscache_n_acquires_null
);
76 _leave(" [no parent]");
80 /* validate the definition */
81 BUG_ON(!def
->get_key
);
82 BUG_ON(!def
->name
[0]);
84 BUG_ON(def
->type
== FSCACHE_COOKIE_TYPE_INDEX
&&
85 parent
->def
->type
!= FSCACHE_COOKIE_TYPE_INDEX
);
87 /* allocate and initialise a cookie */
88 cookie
= kmem_cache_alloc(fscache_cookie_jar
, GFP_KERNEL
);
90 fscache_stat(&fscache_n_acquires_oom
);
95 atomic_set(&cookie
->usage
, 1);
96 atomic_set(&cookie
->n_children
, 0);
98 /* We keep the active count elevated until relinquishment to prevent an
99 * attempt to wake up every time the object operations queue quiesces.
101 atomic_set(&cookie
->n_active
, 1);
103 atomic_inc(&parent
->usage
);
104 atomic_inc(&parent
->n_children
);
107 cookie
->parent
= parent
;
108 cookie
->netfs_data
= netfs_data
;
111 /* radix tree insertion won't use the preallocation pool unless it's
112 * told it may not wait */
113 INIT_RADIX_TREE(&cookie
->stores
, GFP_NOFS
& ~__GFP_WAIT
);
115 switch (cookie
->def
->type
) {
116 case FSCACHE_COOKIE_TYPE_INDEX
:
117 fscache_stat(&fscache_n_cookie_index
);
119 case FSCACHE_COOKIE_TYPE_DATAFILE
:
120 fscache_stat(&fscache_n_cookie_data
);
123 fscache_stat(&fscache_n_cookie_special
);
127 /* if the object is an index then we need do nothing more here - we
128 * create indices on disk when we need them as an index may exist in
130 if (cookie
->def
->type
!= FSCACHE_COOKIE_TYPE_INDEX
) {
131 if (fscache_acquire_non_index_cookie(cookie
) < 0) {
132 atomic_dec(&parent
->n_children
);
133 __fscache_cookie_put(cookie
);
134 fscache_stat(&fscache_n_acquires_nobufs
);
140 fscache_stat(&fscache_n_acquires_ok
);
141 _leave(" = %p", cookie
);
144 EXPORT_SYMBOL(__fscache_acquire_cookie
);
147 * acquire a non-index cookie
148 * - this must make sure the index chain is instantiated and instantiate the
149 * object representation too
151 static int fscache_acquire_non_index_cookie(struct fscache_cookie
*cookie
)
153 struct fscache_object
*object
;
154 struct fscache_cache
*cache
;
160 cookie
->flags
= 1 << FSCACHE_COOKIE_UNAVAILABLE
;
162 /* now we need to see whether the backing objects for this cookie yet
163 * exist, if not there'll be nothing to search */
164 down_read(&fscache_addremove_sem
);
166 if (list_empty(&fscache_cache_list
)) {
167 up_read(&fscache_addremove_sem
);
168 _leave(" = 0 [no caches]");
172 /* select a cache in which to store the object */
173 cache
= fscache_select_cache_for_object(cookie
->parent
);
175 up_read(&fscache_addremove_sem
);
176 fscache_stat(&fscache_n_acquires_no_cache
);
177 _leave(" = -ENOMEDIUM [no cache]");
181 _debug("cache %s", cache
->tag
->name
);
184 (1 << FSCACHE_COOKIE_LOOKING_UP
) |
185 (1 << FSCACHE_COOKIE_NO_DATA_YET
);
187 /* ask the cache to allocate objects for this cookie and its parent
189 ret
= fscache_alloc_object(cache
, cookie
);
191 up_read(&fscache_addremove_sem
);
192 _leave(" = %d", ret
);
196 /* pass on how big the object we're caching is supposed to be */
197 cookie
->def
->get_attr(cookie
->netfs_data
, &i_size
);
199 spin_lock(&cookie
->lock
);
200 if (hlist_empty(&cookie
->backing_objects
)) {
201 spin_unlock(&cookie
->lock
);
205 object
= hlist_entry(cookie
->backing_objects
.first
,
206 struct fscache_object
, cookie_link
);
208 fscache_set_store_limit(object
, i_size
);
210 /* initiate the process of looking up all the objects in the chain
211 * (done by fscache_initialise_object()) */
212 fscache_raise_event(object
, FSCACHE_OBJECT_EV_NEW_CHILD
);
214 spin_unlock(&cookie
->lock
);
216 /* we may be required to wait for lookup to complete at this point */
217 if (!fscache_defer_lookup
) {
218 _debug("non-deferred lookup %p", &cookie
->flags
);
219 wait_on_bit(&cookie
->flags
, FSCACHE_COOKIE_LOOKING_UP
,
220 fscache_wait_bit
, TASK_UNINTERRUPTIBLE
);
222 if (test_bit(FSCACHE_COOKIE_UNAVAILABLE
, &cookie
->flags
))
226 up_read(&fscache_addremove_sem
);
227 _leave(" = 0 [deferred]");
231 up_read(&fscache_addremove_sem
);
232 _leave(" = -ENOBUFS");
237 * recursively allocate cache object records for a cookie/cache combination
238 * - caller must be holding the addremove sem
240 static int fscache_alloc_object(struct fscache_cache
*cache
,
241 struct fscache_cookie
*cookie
)
243 struct fscache_object
*object
;
246 _enter("%p,%p{%s}", cache
, cookie
, cookie
->def
->name
);
248 spin_lock(&cookie
->lock
);
249 hlist_for_each_entry(object
, &cookie
->backing_objects
,
251 if (object
->cache
== cache
)
252 goto object_already_extant
;
254 spin_unlock(&cookie
->lock
);
256 /* ask the cache to allocate an object (we may end up with duplicate
257 * objects at this stage, but we sort that out later) */
258 fscache_stat(&fscache_n_cop_alloc_object
);
259 object
= cache
->ops
->alloc_object(cache
, cookie
);
260 fscache_stat_d(&fscache_n_cop_alloc_object
);
261 if (IS_ERR(object
)) {
262 fscache_stat(&fscache_n_object_no_alloc
);
263 ret
= PTR_ERR(object
);
267 fscache_stat(&fscache_n_object_alloc
);
269 object
->debug_id
= atomic_inc_return(&fscache_object_debug_id
);
271 _debug("ALLOC OBJ%x: %s {%lx}",
272 object
->debug_id
, cookie
->def
->name
, object
->events
);
274 ret
= fscache_alloc_object(cache
, cookie
->parent
);
278 /* only attach if we managed to allocate all we needed, otherwise
279 * discard the object we just allocated and instead use the one
280 * attached to the cookie */
281 if (fscache_attach_object(cookie
, object
) < 0) {
282 fscache_stat(&fscache_n_cop_put_object
);
283 cache
->ops
->put_object(object
);
284 fscache_stat_d(&fscache_n_cop_put_object
);
290 object_already_extant
:
292 if (fscache_object_is_dead(object
)) {
293 spin_unlock(&cookie
->lock
);
296 spin_unlock(&cookie
->lock
);
297 _leave(" = 0 [found]");
301 fscache_stat(&fscache_n_cop_put_object
);
302 cache
->ops
->put_object(object
);
303 fscache_stat_d(&fscache_n_cop_put_object
);
305 _leave(" = %d", ret
);
310 * attach a cache object to a cookie
312 static int fscache_attach_object(struct fscache_cookie
*cookie
,
313 struct fscache_object
*object
)
315 struct fscache_object
*p
;
316 struct fscache_cache
*cache
= object
->cache
;
319 _enter("{%s},{OBJ%x}", cookie
->def
->name
, object
->debug_id
);
321 spin_lock(&cookie
->lock
);
323 /* there may be multiple initial creations of this object, but we only
326 hlist_for_each_entry(p
, &cookie
->backing_objects
, cookie_link
) {
327 if (p
->cache
== object
->cache
) {
328 if (fscache_object_is_dying(p
))
330 goto cant_attach_object
;
334 /* pin the parent object */
335 spin_lock_nested(&cookie
->parent
->lock
, 1);
336 hlist_for_each_entry(p
, &cookie
->parent
->backing_objects
,
338 if (p
->cache
== object
->cache
) {
339 if (fscache_object_is_dying(p
)) {
341 spin_unlock(&cookie
->parent
->lock
);
342 goto cant_attach_object
;
347 spin_unlock(&p
->lock
);
351 spin_unlock(&cookie
->parent
->lock
);
353 /* attach to the cache's object list */
354 if (list_empty(&object
->cache_link
)) {
355 spin_lock(&cache
->object_list_lock
);
356 list_add(&object
->cache_link
, &cache
->object_list
);
357 spin_unlock(&cache
->object_list_lock
);
360 /* attach to the cookie */
361 object
->cookie
= cookie
;
362 atomic_inc(&cookie
->usage
);
363 hlist_add_head(&object
->cookie_link
, &cookie
->backing_objects
);
365 fscache_objlist_add(object
);
369 spin_unlock(&cookie
->lock
);
370 _leave(" = %d", ret
);
375 * Invalidate an object. Callable with spinlocks held.
377 void __fscache_invalidate(struct fscache_cookie
*cookie
)
379 struct fscache_object
*object
;
381 _enter("{%s}", cookie
->def
->name
);
383 fscache_stat(&fscache_n_invalidates
);
385 /* Only permit invalidation of data files. Invalidating an index will
386 * require the caller to release all its attachments to the tree rooted
387 * there, and if it's doing that, it may as well just retire the
390 ASSERTCMP(cookie
->def
->type
, ==, FSCACHE_COOKIE_TYPE_DATAFILE
);
392 /* We will be updating the cookie too. */
393 BUG_ON(!cookie
->def
->get_aux
);
395 /* If there's an object, we tell the object state machine to handle the
396 * invalidation on our behalf, otherwise there's nothing to do.
398 if (!hlist_empty(&cookie
->backing_objects
)) {
399 spin_lock(&cookie
->lock
);
401 if (!hlist_empty(&cookie
->backing_objects
) &&
402 !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING
,
404 object
= hlist_entry(cookie
->backing_objects
.first
,
405 struct fscache_object
,
407 if (fscache_object_is_live(object
))
409 object
, FSCACHE_OBJECT_EV_INVALIDATE
);
412 spin_unlock(&cookie
->lock
);
417 EXPORT_SYMBOL(__fscache_invalidate
);
420 * Wait for object invalidation to complete.
422 void __fscache_wait_on_invalidate(struct fscache_cookie
*cookie
)
424 _enter("%p", cookie
);
426 wait_on_bit(&cookie
->flags
, FSCACHE_COOKIE_INVALIDATING
,
427 fscache_wait_bit_interruptible
,
428 TASK_UNINTERRUPTIBLE
);
432 EXPORT_SYMBOL(__fscache_wait_on_invalidate
);
435 * update the index entries backing a cookie
437 void __fscache_update_cookie(struct fscache_cookie
*cookie
)
439 struct fscache_object
*object
;
441 fscache_stat(&fscache_n_updates
);
444 fscache_stat(&fscache_n_updates_null
);
445 _leave(" [no cookie]");
449 _enter("{%s}", cookie
->def
->name
);
451 BUG_ON(!cookie
->def
->get_aux
);
453 spin_lock(&cookie
->lock
);
455 /* update the index entry on disk in each cache backing this cookie */
456 hlist_for_each_entry(object
,
457 &cookie
->backing_objects
, cookie_link
) {
458 fscache_raise_event(object
, FSCACHE_OBJECT_EV_UPDATE
);
461 spin_unlock(&cookie
->lock
);
464 EXPORT_SYMBOL(__fscache_update_cookie
);
467 * release a cookie back to the cache
468 * - the object will be marked as recyclable on disk if retire is true
469 * - all dependents of this cookie must have already been unregistered
470 * (indices/files/pages)
472 void __fscache_relinquish_cookie(struct fscache_cookie
*cookie
, int retire
)
474 struct fscache_object
*object
;
476 fscache_stat(&fscache_n_relinquishes
);
478 fscache_stat(&fscache_n_relinquishes_retire
);
481 fscache_stat(&fscache_n_relinquishes_null
);
482 _leave(" [no cookie]");
486 _enter("%p{%s,%p,%d},%d",
487 cookie
, cookie
->def
->name
, cookie
->netfs_data
,
488 atomic_read(&cookie
->n_active
), retire
);
490 ASSERTCMP(atomic_read(&cookie
->n_active
), >, 0);
492 if (atomic_read(&cookie
->n_children
) != 0) {
493 printk(KERN_ERR
"FS-Cache: Cookie '%s' still has children\n",
498 /* No further netfs-accessing operations on this cookie permitted */
499 set_bit(FSCACHE_COOKIE_RELINQUISHED
, &cookie
->flags
);
501 set_bit(FSCACHE_COOKIE_RETIRED
, &cookie
->flags
);
503 spin_lock(&cookie
->lock
);
504 hlist_for_each_entry(object
, &cookie
->backing_objects
, cookie_link
) {
505 fscache_raise_event(object
, FSCACHE_OBJECT_EV_KILL
);
507 spin_unlock(&cookie
->lock
);
509 /* Wait for cessation of activity requiring access to the netfs (when
510 * n_active reaches 0).
512 if (!atomic_dec_and_test(&cookie
->n_active
))
513 wait_on_atomic_t(&cookie
->n_active
, fscache_wait_atomic_t
,
514 TASK_UNINTERRUPTIBLE
);
516 /* Clear pointers back to the netfs */
517 cookie
->netfs_data
= NULL
;
519 BUG_ON(cookie
->stores
.rnode
);
521 if (cookie
->parent
) {
522 ASSERTCMP(atomic_read(&cookie
->parent
->usage
), >, 0);
523 ASSERTCMP(atomic_read(&cookie
->parent
->n_children
), >, 0);
524 atomic_dec(&cookie
->parent
->n_children
);
527 /* Dispose of the netfs's link to the cookie */
528 ASSERTCMP(atomic_read(&cookie
->usage
), >, 0);
529 fscache_cookie_put(cookie
);
533 EXPORT_SYMBOL(__fscache_relinquish_cookie
);
538 void __fscache_cookie_put(struct fscache_cookie
*cookie
)
540 struct fscache_cookie
*parent
;
542 _enter("%p", cookie
);
545 _debug("FREE COOKIE %p", cookie
);
546 parent
= cookie
->parent
;
547 BUG_ON(!hlist_empty(&cookie
->backing_objects
));
548 kmem_cache_free(fscache_cookie_jar
, cookie
);
554 BUG_ON(atomic_read(&cookie
->usage
) <= 0);
555 if (!atomic_dec_and_test(&cookie
->usage
))
563 * check the consistency between the netfs inode and the backing cache
565 * NOTE: it only serves no-index type
567 int __fscache_check_consistency(struct fscache_cookie
*cookie
)
569 struct fscache_operation
*op
;
570 struct fscache_object
*object
;
573 _enter("%p,", cookie
);
575 ASSERTCMP(cookie
->def
->type
, ==, FSCACHE_COOKIE_TYPE_DATAFILE
);
577 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
580 if (hlist_empty(&cookie
->backing_objects
))
583 op
= kzalloc(sizeof(*op
), GFP_NOIO
| __GFP_NOMEMALLOC
| __GFP_NORETRY
);
587 fscache_operation_init(op
, NULL
, NULL
);
588 op
->flags
= FSCACHE_OP_MYTHREAD
|
589 (1 << FSCACHE_OP_WAITING
) |
590 (1 << FSCACHE_OP_UNUSE_COOKIE
);
592 spin_lock(&cookie
->lock
);
594 if (hlist_empty(&cookie
->backing_objects
))
596 object
= hlist_entry(cookie
->backing_objects
.first
,
597 struct fscache_object
, cookie_link
);
598 if (test_bit(FSCACHE_IOERROR
, &object
->cache
->flags
))
601 op
->debug_id
= atomic_inc_return(&fscache_op_debug_id
);
603 atomic_inc(&cookie
->n_active
);
604 if (fscache_submit_op(object
, op
) < 0)
607 /* the work queue now carries its own ref on the object */
608 spin_unlock(&cookie
->lock
);
610 ret
= fscache_wait_for_operation_activation(object
, op
,
613 /* ask the cache to honour the operation */
614 ret
= object
->cache
->ops
->check_consistency(op
);
615 fscache_op_complete(op
, false);
616 } else if (ret
== -ENOBUFS
) {
620 fscache_put_operation(op
);
621 _leave(" = %d", ret
);
625 atomic_dec(&cookie
->n_active
);
627 spin_unlock(&cookie
->lock
);
629 _leave(" = -ESTALE");
632 EXPORT_SYMBOL(__fscache_check_consistency
);