1 /* Cache page management and data I/O routines
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
21 * check to see if a page is being written to the cache
23 bool __fscache_check_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
28 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
30 trace_fscache_check_page(cookie
, page
, val
, 0);
34 EXPORT_SYMBOL(__fscache_check_page_write
);
37 * wait for a page to finish being written to the cache
39 void __fscache_wait_on_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
41 wait_queue_head_t
*wq
= bit_waitqueue(&cookie
->flags
, 0);
43 trace_fscache_page(cookie
, page
, fscache_page_write_wait
);
45 wait_event(*wq
, !__fscache_check_page_write(cookie
, page
));
47 EXPORT_SYMBOL(__fscache_wait_on_page_write
);
50 * wait for a page to finish being written to the cache. Put a timeout here
51 * since we might be called recursively via parent fs.
54 bool release_page_wait_timeout(struct fscache_cookie
*cookie
, struct page
*page
)
56 wait_queue_head_t
*wq
= bit_waitqueue(&cookie
->flags
, 0);
58 return wait_event_timeout(*wq
, !__fscache_check_page_write(cookie
, page
),
63 * decide whether a page can be released, possibly by cancelling a store to it
64 * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged
66 bool __fscache_maybe_release_page(struct fscache_cookie
*cookie
,
73 _enter("%p,%p,%x", cookie
, page
, gfp
);
75 trace_fscache_page(cookie
, page
, fscache_page_maybe_release
);
79 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
82 fscache_stat(&fscache_n_store_vmscan_not_storing
);
83 __fscache_uncache_page(cookie
, page
);
87 /* see if the page is actually undergoing storage - if so we can't get
88 * rid of it till the cache has finished with it */
89 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
90 FSCACHE_COOKIE_STORING_TAG
)) {
95 /* the page is pending storage, so we attempt to cancel the store and
96 * discard the store request so that the page can be reclaimed */
97 spin_lock(&cookie
->stores_lock
);
100 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
101 FSCACHE_COOKIE_STORING_TAG
)) {
102 /* the page started to undergo storage whilst we were looking,
103 * so now we can only wait or return */
104 spin_unlock(&cookie
->stores_lock
);
108 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
109 trace_fscache_page(cookie
, page
, fscache_page_radix_delete
);
110 spin_unlock(&cookie
->stores_lock
);
113 fscache_stat(&fscache_n_store_vmscan_cancelled
);
114 fscache_stat(&fscache_n_store_radix_deletes
);
115 ASSERTCMP(xpage
, ==, page
);
117 fscache_stat(&fscache_n_store_vmscan_gone
);
120 wake_up_bit(&cookie
->flags
, 0);
121 trace_fscache_wake_cookie(cookie
);
124 __fscache_uncache_page(cookie
, page
);
128 /* We will wait here if we're allowed to, but that could deadlock the
129 * allocator as the work threads writing to the cache may all end up
130 * sleeping on memory allocation, so we may need to impose a timeout
132 if (!(gfp
& __GFP_DIRECT_RECLAIM
) || !(gfp
& __GFP_FS
)) {
133 fscache_stat(&fscache_n_store_vmscan_busy
);
137 fscache_stat(&fscache_n_store_vmscan_wait
);
138 if (!release_page_wait_timeout(cookie
, page
))
139 _debug("fscache writeout timeout page: %p{%lx}",
142 gfp
&= ~__GFP_DIRECT_RECLAIM
;
145 EXPORT_SYMBOL(__fscache_maybe_release_page
);
148 * note that a page has finished being written to the cache
150 static void fscache_end_page_write(struct fscache_object
*object
,
153 struct fscache_cookie
*cookie
;
154 struct page
*xpage
= NULL
, *val
;
156 spin_lock(&object
->lock
);
157 cookie
= object
->cookie
;
159 /* delete the page from the tree if it is now no longer
161 spin_lock(&cookie
->stores_lock
);
162 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
163 FSCACHE_COOKIE_STORING_TAG
);
164 trace_fscache_page(cookie
, page
, fscache_page_radix_clear_store
);
165 if (!radix_tree_tag_get(&cookie
->stores
, page
->index
,
166 FSCACHE_COOKIE_PENDING_TAG
)) {
167 fscache_stat(&fscache_n_store_radix_deletes
);
168 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
169 trace_fscache_page(cookie
, page
, fscache_page_radix_delete
);
170 trace_fscache_page(cookie
, page
, fscache_page_write_end
);
172 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
173 trace_fscache_check_page(cookie
, page
, val
, 1);
175 trace_fscache_page(cookie
, page
, fscache_page_write_end_pend
);
177 spin_unlock(&cookie
->stores_lock
);
178 wake_up_bit(&cookie
->flags
, 0);
179 trace_fscache_wake_cookie(cookie
);
181 trace_fscache_page(cookie
, page
, fscache_page_write_end_noc
);
183 spin_unlock(&object
->lock
);
189 * actually apply the changed attributes to a cache object
191 static void fscache_attr_changed_op(struct fscache_operation
*op
)
193 struct fscache_object
*object
= op
->object
;
196 _enter("{OBJ%x OP%x}", object
->debug_id
, op
->debug_id
);
198 fscache_stat(&fscache_n_attr_changed_calls
);
200 if (fscache_object_is_active(object
)) {
201 fscache_stat(&fscache_n_cop_attr_changed
);
202 ret
= object
->cache
->ops
->attr_changed(object
);
203 fscache_stat_d(&fscache_n_cop_attr_changed
);
205 fscache_abort_object(object
);
206 fscache_op_complete(op
, ret
< 0);
208 fscache_op_complete(op
, true);
215 * notification that the attributes on an object have changed
217 int __fscache_attr_changed(struct fscache_cookie
*cookie
)
219 struct fscache_operation
*op
;
220 struct fscache_object
*object
;
221 bool wake_cookie
= false;
223 _enter("%p", cookie
);
225 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
227 fscache_stat(&fscache_n_attr_changed
);
229 op
= kzalloc(sizeof(*op
), GFP_KERNEL
);
231 fscache_stat(&fscache_n_attr_changed_nomem
);
232 _leave(" = -ENOMEM");
236 fscache_operation_init(cookie
, op
, fscache_attr_changed_op
, NULL
, NULL
);
237 trace_fscache_page_op(cookie
, NULL
, op
, fscache_page_op_attr_changed
);
238 op
->flags
= FSCACHE_OP_ASYNC
|
239 (1 << FSCACHE_OP_EXCLUSIVE
) |
240 (1 << FSCACHE_OP_UNUSE_COOKIE
);
242 spin_lock(&cookie
->lock
);
244 if (!fscache_cookie_enabled(cookie
) ||
245 hlist_empty(&cookie
->backing_objects
))
247 object
= hlist_entry(cookie
->backing_objects
.first
,
248 struct fscache_object
, cookie_link
);
250 __fscache_use_cookie(cookie
);
251 if (fscache_submit_exclusive_op(object
, op
) < 0)
253 spin_unlock(&cookie
->lock
);
254 fscache_stat(&fscache_n_attr_changed_ok
);
255 fscache_put_operation(op
);
260 wake_cookie
= __fscache_unuse_cookie(cookie
);
262 spin_unlock(&cookie
->lock
);
263 fscache_put_operation(op
);
265 __fscache_wake_unused_cookie(cookie
);
266 fscache_stat(&fscache_n_attr_changed_nobufs
);
267 _leave(" = %d", -ENOBUFS
);
270 EXPORT_SYMBOL(__fscache_attr_changed
);
273 * Handle cancellation of a pending retrieval op
275 static void fscache_do_cancel_retrieval(struct fscache_operation
*_op
)
277 struct fscache_retrieval
*op
=
278 container_of(_op
, struct fscache_retrieval
, op
);
280 atomic_set(&op
->n_pages
, 0);
284 * release a retrieval op reference
286 static void fscache_release_retrieval_op(struct fscache_operation
*_op
)
288 struct fscache_retrieval
*op
=
289 container_of(_op
, struct fscache_retrieval
, op
);
291 _enter("{OP%x}", op
->op
.debug_id
);
293 ASSERTIFCMP(op
->op
.state
!= FSCACHE_OP_ST_INITIALISED
,
294 atomic_read(&op
->n_pages
), ==, 0);
296 fscache_hist(fscache_retrieval_histogram
, op
->start_time
);
298 fscache_put_context(op
->cookie
, op
->context
);
304 * allocate a retrieval op
306 static struct fscache_retrieval
*fscache_alloc_retrieval(
307 struct fscache_cookie
*cookie
,
308 struct address_space
*mapping
,
309 fscache_rw_complete_t end_io_func
,
312 struct fscache_retrieval
*op
;
314 /* allocate a retrieval operation and attempt to submit it */
315 op
= kzalloc(sizeof(*op
), GFP_NOIO
);
317 fscache_stat(&fscache_n_retrievals_nomem
);
321 fscache_operation_init(cookie
, &op
->op
, NULL
,
322 fscache_do_cancel_retrieval
,
323 fscache_release_retrieval_op
);
324 op
->op
.flags
= FSCACHE_OP_MYTHREAD
|
325 (1UL << FSCACHE_OP_WAITING
) |
326 (1UL << FSCACHE_OP_UNUSE_COOKIE
);
328 op
->mapping
= mapping
;
329 op
->end_io_func
= end_io_func
;
330 op
->context
= context
;
331 op
->start_time
= jiffies
;
332 INIT_LIST_HEAD(&op
->to_do
);
334 /* Pin the netfs read context in case we need to do the actual netfs
335 * read because we've encountered a cache read failure.
338 fscache_get_context(op
->cookie
, context
);
343 * wait for a deferred lookup to complete
345 int fscache_wait_for_deferred_lookup(struct fscache_cookie
*cookie
)
351 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
)) {
352 _leave(" = 0 [imm]");
356 fscache_stat(&fscache_n_retrievals_wait
);
359 if (wait_on_bit(&cookie
->flags
, FSCACHE_COOKIE_LOOKING_UP
,
360 TASK_INTERRUPTIBLE
) != 0) {
361 fscache_stat(&fscache_n_retrievals_intr
);
362 _leave(" = -ERESTARTSYS");
366 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
));
369 fscache_hist(fscache_retrieval_delay_histogram
, jif
);
370 _leave(" = 0 [dly]");
375 * wait for an object to become active (or dead)
377 int fscache_wait_for_operation_activation(struct fscache_object
*object
,
378 struct fscache_operation
*op
,
379 atomic_t
*stat_op_waits
,
380 atomic_t
*stat_object_dead
)
384 if (!test_bit(FSCACHE_OP_WAITING
, &op
->flags
))
389 fscache_stat(stat_op_waits
);
390 if (wait_on_bit(&op
->flags
, FSCACHE_OP_WAITING
,
391 TASK_INTERRUPTIBLE
) != 0) {
392 trace_fscache_op(object
->cookie
, op
, fscache_op_signal
);
393 ret
= fscache_cancel_op(op
, false);
397 /* it's been removed from the pending queue by another party,
398 * so we should get to run shortly */
399 wait_on_bit(&op
->flags
, FSCACHE_OP_WAITING
,
400 TASK_UNINTERRUPTIBLE
);
405 if (op
->state
== FSCACHE_OP_ST_CANCELLED
) {
406 if (stat_object_dead
)
407 fscache_stat(stat_object_dead
);
408 _leave(" = -ENOBUFS [cancelled]");
411 if (unlikely(fscache_object_is_dying(object
) ||
412 fscache_cache_is_broken(object
))) {
413 enum fscache_operation_state state
= op
->state
;
414 trace_fscache_op(object
->cookie
, op
, fscache_op_signal
);
415 fscache_cancel_op(op
, true);
416 if (stat_object_dead
)
417 fscache_stat(stat_object_dead
);
418 _leave(" = -ENOBUFS [obj dead %d]", state
);
425 * read a page from the cache or allocate a block in which to store it
427 * -ENOMEM - out of memory, nothing done
428 * -ERESTARTSYS - interrupted
429 * -ENOBUFS - no backing object available in which to cache the block
430 * -ENODATA - no data available in the backing object for this block
431 * 0 - dispatched a read - it'll call end_io_func() when finished
433 int __fscache_read_or_alloc_page(struct fscache_cookie
*cookie
,
435 fscache_rw_complete_t end_io_func
,
439 struct fscache_retrieval
*op
;
440 struct fscache_object
*object
;
441 bool wake_cookie
= false;
444 _enter("%p,%p,,,", cookie
, page
);
446 fscache_stat(&fscache_n_retrievals
);
448 if (hlist_empty(&cookie
->backing_objects
))
451 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
452 _leave(" = -ENOBUFS [invalidating]");
456 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
457 ASSERTCMP(page
, !=, NULL
);
459 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
462 op
= fscache_alloc_retrieval(cookie
, page
->mapping
,
463 end_io_func
, context
);
465 _leave(" = -ENOMEM");
468 atomic_set(&op
->n_pages
, 1);
469 trace_fscache_page_op(cookie
, page
, &op
->op
, fscache_page_op_retr_one
);
471 spin_lock(&cookie
->lock
);
473 if (!fscache_cookie_enabled(cookie
) ||
474 hlist_empty(&cookie
->backing_objects
))
476 object
= hlist_entry(cookie
->backing_objects
.first
,
477 struct fscache_object
, cookie_link
);
479 ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP
, &object
->flags
));
481 __fscache_use_cookie(cookie
);
482 atomic_inc(&object
->n_reads
);
483 __set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
485 if (fscache_submit_op(object
, &op
->op
) < 0)
486 goto nobufs_unlock_dec
;
487 spin_unlock(&cookie
->lock
);
489 fscache_stat(&fscache_n_retrieval_ops
);
491 /* we wait for the operation to become active, and then process it
492 * *here*, in this thread, and not in the thread pool */
493 ret
= fscache_wait_for_operation_activation(
495 __fscache_stat(&fscache_n_retrieval_op_waits
),
496 __fscache_stat(&fscache_n_retrievals_object_dead
));
500 /* ask the cache to honour the operation */
501 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
502 fscache_stat(&fscache_n_cop_allocate_page
);
503 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
504 fscache_stat_d(&fscache_n_cop_allocate_page
);
508 fscache_stat(&fscache_n_cop_read_or_alloc_page
);
509 ret
= object
->cache
->ops
->read_or_alloc_page(op
, page
, gfp
);
510 fscache_stat_d(&fscache_n_cop_read_or_alloc_page
);
515 fscache_stat(&fscache_n_retrievals_nomem
);
516 else if (ret
== -ERESTARTSYS
)
517 fscache_stat(&fscache_n_retrievals_intr
);
518 else if (ret
== -ENODATA
)
519 fscache_stat(&fscache_n_retrievals_nodata
);
521 fscache_stat(&fscache_n_retrievals_nobufs
);
523 fscache_stat(&fscache_n_retrievals_ok
);
525 fscache_put_retrieval(op
);
526 _leave(" = %d", ret
);
530 atomic_dec(&object
->n_reads
);
531 wake_cookie
= __fscache_unuse_cookie(cookie
);
533 spin_unlock(&cookie
->lock
);
535 __fscache_wake_unused_cookie(cookie
);
536 fscache_put_retrieval(op
);
538 fscache_stat(&fscache_n_retrievals_nobufs
);
539 _leave(" = -ENOBUFS");
542 EXPORT_SYMBOL(__fscache_read_or_alloc_page
);
545 * read a list of page from the cache or allocate a block in which to store
548 * -ENOMEM - out of memory, some pages may be being read
549 * -ERESTARTSYS - interrupted, some pages may be being read
550 * -ENOBUFS - no backing object or space available in which to cache any
551 * pages not being read
552 * -ENODATA - no data available in the backing object for some or all of
554 * 0 - dispatched a read on all pages
556 * end_io_func() will be called for each page read from the cache as it is
557 * finishes being read
559 * any pages for which a read is dispatched will be removed from pages and
562 int __fscache_read_or_alloc_pages(struct fscache_cookie
*cookie
,
563 struct address_space
*mapping
,
564 struct list_head
*pages
,
566 fscache_rw_complete_t end_io_func
,
570 struct fscache_retrieval
*op
;
571 struct fscache_object
*object
;
572 bool wake_cookie
= false;
575 _enter("%p,,%d,,,", cookie
, *nr_pages
);
577 fscache_stat(&fscache_n_retrievals
);
579 if (hlist_empty(&cookie
->backing_objects
))
582 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
583 _leave(" = -ENOBUFS [invalidating]");
587 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
588 ASSERTCMP(*nr_pages
, >, 0);
589 ASSERT(!list_empty(pages
));
591 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
594 op
= fscache_alloc_retrieval(cookie
, mapping
, end_io_func
, context
);
597 atomic_set(&op
->n_pages
, *nr_pages
);
598 trace_fscache_page_op(cookie
, NULL
, &op
->op
, fscache_page_op_retr_multi
);
600 spin_lock(&cookie
->lock
);
602 if (!fscache_cookie_enabled(cookie
) ||
603 hlist_empty(&cookie
->backing_objects
))
605 object
= hlist_entry(cookie
->backing_objects
.first
,
606 struct fscache_object
, cookie_link
);
608 __fscache_use_cookie(cookie
);
609 atomic_inc(&object
->n_reads
);
610 __set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
612 if (fscache_submit_op(object
, &op
->op
) < 0)
613 goto nobufs_unlock_dec
;
614 spin_unlock(&cookie
->lock
);
616 fscache_stat(&fscache_n_retrieval_ops
);
618 /* we wait for the operation to become active, and then process it
619 * *here*, in this thread, and not in the thread pool */
620 ret
= fscache_wait_for_operation_activation(
622 __fscache_stat(&fscache_n_retrieval_op_waits
),
623 __fscache_stat(&fscache_n_retrievals_object_dead
));
627 /* ask the cache to honour the operation */
628 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
629 fscache_stat(&fscache_n_cop_allocate_pages
);
630 ret
= object
->cache
->ops
->allocate_pages(
631 op
, pages
, nr_pages
, gfp
);
632 fscache_stat_d(&fscache_n_cop_allocate_pages
);
634 fscache_stat(&fscache_n_cop_read_or_alloc_pages
);
635 ret
= object
->cache
->ops
->read_or_alloc_pages(
636 op
, pages
, nr_pages
, gfp
);
637 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages
);
642 fscache_stat(&fscache_n_retrievals_nomem
);
643 else if (ret
== -ERESTARTSYS
)
644 fscache_stat(&fscache_n_retrievals_intr
);
645 else if (ret
== -ENODATA
)
646 fscache_stat(&fscache_n_retrievals_nodata
);
648 fscache_stat(&fscache_n_retrievals_nobufs
);
650 fscache_stat(&fscache_n_retrievals_ok
);
652 fscache_put_retrieval(op
);
653 _leave(" = %d", ret
);
657 atomic_dec(&object
->n_reads
);
658 wake_cookie
= __fscache_unuse_cookie(cookie
);
660 spin_unlock(&cookie
->lock
);
661 fscache_put_retrieval(op
);
663 __fscache_wake_unused_cookie(cookie
);
665 fscache_stat(&fscache_n_retrievals_nobufs
);
666 _leave(" = -ENOBUFS");
669 EXPORT_SYMBOL(__fscache_read_or_alloc_pages
);
672 * allocate a block in the cache on which to store a page
674 * -ENOMEM - out of memory, nothing done
675 * -ERESTARTSYS - interrupted
676 * -ENOBUFS - no backing object available in which to cache the block
677 * 0 - block allocated
679 int __fscache_alloc_page(struct fscache_cookie
*cookie
,
683 struct fscache_retrieval
*op
;
684 struct fscache_object
*object
;
685 bool wake_cookie
= false;
688 _enter("%p,%p,,,", cookie
, page
);
690 fscache_stat(&fscache_n_allocs
);
692 if (hlist_empty(&cookie
->backing_objects
))
695 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
696 ASSERTCMP(page
, !=, NULL
);
698 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
699 _leave(" = -ENOBUFS [invalidating]");
703 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
706 op
= fscache_alloc_retrieval(cookie
, page
->mapping
, NULL
, NULL
);
709 atomic_set(&op
->n_pages
, 1);
710 trace_fscache_page_op(cookie
, page
, &op
->op
, fscache_page_op_alloc_one
);
712 spin_lock(&cookie
->lock
);
714 if (!fscache_cookie_enabled(cookie
) ||
715 hlist_empty(&cookie
->backing_objects
))
717 object
= hlist_entry(cookie
->backing_objects
.first
,
718 struct fscache_object
, cookie_link
);
720 __fscache_use_cookie(cookie
);
721 if (fscache_submit_op(object
, &op
->op
) < 0)
722 goto nobufs_unlock_dec
;
723 spin_unlock(&cookie
->lock
);
725 fscache_stat(&fscache_n_alloc_ops
);
727 ret
= fscache_wait_for_operation_activation(
729 __fscache_stat(&fscache_n_alloc_op_waits
),
730 __fscache_stat(&fscache_n_allocs_object_dead
));
734 /* ask the cache to honour the operation */
735 fscache_stat(&fscache_n_cop_allocate_page
);
736 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
737 fscache_stat_d(&fscache_n_cop_allocate_page
);
740 if (ret
== -ERESTARTSYS
)
741 fscache_stat(&fscache_n_allocs_intr
);
743 fscache_stat(&fscache_n_allocs_nobufs
);
745 fscache_stat(&fscache_n_allocs_ok
);
747 fscache_put_retrieval(op
);
748 _leave(" = %d", ret
);
752 wake_cookie
= __fscache_unuse_cookie(cookie
);
754 spin_unlock(&cookie
->lock
);
755 fscache_put_retrieval(op
);
757 __fscache_wake_unused_cookie(cookie
);
759 fscache_stat(&fscache_n_allocs_nobufs
);
760 _leave(" = -ENOBUFS");
763 EXPORT_SYMBOL(__fscache_alloc_page
);
766 * Unmark pages allocate in the readahead code path (via:
767 * fscache_readpages_or_alloc) after delegating to the base filesystem
769 void __fscache_readpages_cancel(struct fscache_cookie
*cookie
,
770 struct list_head
*pages
)
774 list_for_each_entry(page
, pages
, lru
) {
775 if (PageFsCache(page
))
776 __fscache_uncache_page(cookie
, page
);
779 EXPORT_SYMBOL(__fscache_readpages_cancel
);
782 * release a write op reference
784 static void fscache_release_write_op(struct fscache_operation
*_op
)
786 _enter("{OP%x}", _op
->debug_id
);
790 * perform the background storage of a page into the cache
792 static void fscache_write_op(struct fscache_operation
*_op
)
794 struct fscache_storage
*op
=
795 container_of(_op
, struct fscache_storage
, op
);
796 struct fscache_object
*object
= op
->op
.object
;
797 struct fscache_cookie
*cookie
;
803 _enter("{OP%x,%d}", op
->op
.debug_id
, atomic_read(&op
->op
.usage
));
806 spin_lock(&object
->lock
);
807 cookie
= object
->cookie
;
809 if (!fscache_object_is_active(object
)) {
810 /* If we get here, then the on-disk cache object likely no
811 * longer exists, so we should just cancel this write
814 spin_unlock(&object
->lock
);
815 fscache_op_complete(&op
->op
, true);
816 _leave(" [inactive]");
821 /* If we get here, then the cookie belonging to the object was
822 * detached, probably by the cookie being withdrawn due to
823 * memory pressure, which means that the pages we might write
824 * to the cache from no longer exist - therefore, we can just
825 * cancel this write operation.
827 spin_unlock(&object
->lock
);
828 fscache_op_complete(&op
->op
, true);
829 _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
830 _op
->flags
, _op
->state
, object
->state
->short_name
,
835 spin_lock(&cookie
->stores_lock
);
837 fscache_stat(&fscache_n_store_calls
);
839 /* find a page to store */
842 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0, 1,
843 FSCACHE_COOKIE_PENDING_TAG
);
844 trace_fscache_gang_lookup(cookie
, &op
->op
, results
, n
, op
->store_limit
);
848 _debug("gang %d [%lx]", n
, page
->index
);
850 radix_tree_tag_set(&cookie
->stores
, page
->index
,
851 FSCACHE_COOKIE_STORING_TAG
);
852 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
853 FSCACHE_COOKIE_PENDING_TAG
);
854 trace_fscache_page(cookie
, page
, fscache_page_radix_pend2store
);
856 spin_unlock(&cookie
->stores_lock
);
857 spin_unlock(&object
->lock
);
859 if (page
->index
>= op
->store_limit
)
862 fscache_stat(&fscache_n_store_pages
);
863 fscache_stat(&fscache_n_cop_write_page
);
864 ret
= object
->cache
->ops
->write_page(op
, page
);
865 fscache_stat_d(&fscache_n_cop_write_page
);
866 trace_fscache_wrote_page(cookie
, page
, &op
->op
, ret
);
867 fscache_end_page_write(object
, page
);
869 fscache_abort_object(object
);
870 fscache_op_complete(&op
->op
, true);
872 fscache_enqueue_operation(&op
->op
);
879 fscache_stat(&fscache_n_store_pages_over_limit
);
880 trace_fscache_wrote_page(cookie
, page
, &op
->op
, -ENOBUFS
);
881 fscache_end_page_write(object
, page
);
885 /* this writer is going away and there aren't any more things to
888 spin_unlock(&cookie
->stores_lock
);
889 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
890 spin_unlock(&object
->lock
);
891 fscache_op_complete(&op
->op
, false);
896 * Clear the pages pending writing for invalidation
898 void fscache_invalidate_writes(struct fscache_cookie
*cookie
)
907 spin_lock(&cookie
->stores_lock
);
908 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0,
910 FSCACHE_COOKIE_PENDING_TAG
);
912 spin_unlock(&cookie
->stores_lock
);
916 for (i
= n
- 1; i
>= 0; i
--) {
918 radix_tree_delete(&cookie
->stores
, page
->index
);
919 trace_fscache_page(cookie
, page
, fscache_page_radix_delete
);
920 trace_fscache_page(cookie
, page
, fscache_page_inval
);
923 spin_unlock(&cookie
->stores_lock
);
925 for (i
= n
- 1; i
>= 0; i
--)
926 put_page(results
[i
]);
929 wake_up_bit(&cookie
->flags
, 0);
930 trace_fscache_wake_cookie(cookie
);
936 * request a page be stored in the cache
938 * -ENOMEM - out of memory, nothing done
939 * -ENOBUFS - no backing object available in which to cache the page
940 * 0 - dispatched a write - it'll call end_io_func() when finished
942 * if the cookie still has a backing object at this point, that object can be
943 * in one of a few states with respect to storage processing:
945 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
950 * (b) writes deferred till post-creation (mark page for writing and
951 * return immediately)
953 * (2) negative lookup, object created, initial fill being made from netfs
955 * (a) fill point not yet reached this page (mark page for writing and
958 * (b) fill point passed this page (queue op to store this page)
960 * (3) object extant (queue op to store this page)
962 * any other state is invalid
964 int __fscache_write_page(struct fscache_cookie
*cookie
,
969 struct fscache_storage
*op
;
970 struct fscache_object
*object
;
971 bool wake_cookie
= false;
974 _enter("%p,%x,", cookie
, (u32
) page
->flags
);
976 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
977 ASSERT(PageFsCache(page
));
979 fscache_stat(&fscache_n_stores
);
981 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
982 _leave(" = -ENOBUFS [invalidating]");
986 op
= kzalloc(sizeof(*op
), GFP_NOIO
| __GFP_NOMEMALLOC
| __GFP_NORETRY
);
990 fscache_operation_init(cookie
, &op
->op
, fscache_write_op
, NULL
,
991 fscache_release_write_op
);
992 op
->op
.flags
= FSCACHE_OP_ASYNC
|
993 (1 << FSCACHE_OP_WAITING
) |
994 (1 << FSCACHE_OP_UNUSE_COOKIE
);
996 ret
= radix_tree_maybe_preload(gfp
& ~__GFP_HIGHMEM
);
1000 trace_fscache_page_op(cookie
, page
, &op
->op
, fscache_page_op_write_one
);
1003 spin_lock(&cookie
->lock
);
1005 if (!fscache_cookie_enabled(cookie
) ||
1006 hlist_empty(&cookie
->backing_objects
))
1008 object
= hlist_entry(cookie
->backing_objects
.first
,
1009 struct fscache_object
, cookie_link
);
1010 if (test_bit(FSCACHE_IOERROR
, &object
->cache
->flags
))
1013 trace_fscache_page(cookie
, page
, fscache_page_write
);
1015 /* add the page to the pending-storage radix tree on the backing
1017 spin_lock(&object
->lock
);
1019 if (object
->store_limit_l
!= object_size
)
1020 fscache_set_store_limit(object
, object_size
);
1022 spin_lock(&cookie
->stores_lock
);
1024 _debug("store limit %llx", (unsigned long long) object
->store_limit
);
1026 ret
= radix_tree_insert(&cookie
->stores
, page
->index
, page
);
1029 goto already_queued
;
1030 _debug("insert failed %d", ret
);
1031 goto nobufs_unlock_obj
;
1034 trace_fscache_page(cookie
, page
, fscache_page_radix_insert
);
1035 radix_tree_tag_set(&cookie
->stores
, page
->index
,
1036 FSCACHE_COOKIE_PENDING_TAG
);
1037 trace_fscache_page(cookie
, page
, fscache_page_radix_set_pend
);
1040 /* we only want one writer at a time, but we do need to queue new
1041 * writers after exclusive ops */
1042 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
))
1043 goto already_pending
;
1045 spin_unlock(&cookie
->stores_lock
);
1046 spin_unlock(&object
->lock
);
1048 op
->op
.debug_id
= atomic_inc_return(&fscache_op_debug_id
);
1049 op
->store_limit
= object
->store_limit
;
1051 __fscache_use_cookie(cookie
);
1052 if (fscache_submit_op(object
, &op
->op
) < 0)
1055 spin_unlock(&cookie
->lock
);
1056 radix_tree_preload_end();
1057 fscache_stat(&fscache_n_store_ops
);
1058 fscache_stat(&fscache_n_stores_ok
);
1060 /* the work queue now carries its own ref on the object */
1061 fscache_put_operation(&op
->op
);
1066 fscache_stat(&fscache_n_stores_again
);
1068 spin_unlock(&cookie
->stores_lock
);
1069 spin_unlock(&object
->lock
);
1070 spin_unlock(&cookie
->lock
);
1071 radix_tree_preload_end();
1072 fscache_put_operation(&op
->op
);
1073 fscache_stat(&fscache_n_stores_ok
);
1078 spin_lock(&cookie
->stores_lock
);
1079 radix_tree_delete(&cookie
->stores
, page
->index
);
1080 trace_fscache_page(cookie
, page
, fscache_page_radix_delete
);
1081 spin_unlock(&cookie
->stores_lock
);
1082 wake_cookie
= __fscache_unuse_cookie(cookie
);
1088 spin_unlock(&cookie
->stores_lock
);
1089 spin_unlock(&object
->lock
);
1091 spin_unlock(&cookie
->lock
);
1092 radix_tree_preload_end();
1093 fscache_put_operation(&op
->op
);
1095 __fscache_wake_unused_cookie(cookie
);
1096 fscache_stat(&fscache_n_stores_nobufs
);
1097 _leave(" = -ENOBUFS");
1101 fscache_put_operation(&op
->op
);
1103 fscache_stat(&fscache_n_stores_oom
);
1104 _leave(" = -ENOMEM");
1107 EXPORT_SYMBOL(__fscache_write_page
);
1110 * remove a page from the cache
1112 void __fscache_uncache_page(struct fscache_cookie
*cookie
, struct page
*page
)
1114 struct fscache_object
*object
;
1116 _enter(",%p", page
);
1118 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
1119 ASSERTCMP(page
, !=, NULL
);
1121 fscache_stat(&fscache_n_uncaches
);
1123 /* cache withdrawal may beat us to it */
1124 if (!PageFsCache(page
))
1127 trace_fscache_page(cookie
, page
, fscache_page_uncache
);
1129 /* get the object */
1130 spin_lock(&cookie
->lock
);
1132 if (hlist_empty(&cookie
->backing_objects
)) {
1133 ClearPageFsCache(page
);
1137 object
= hlist_entry(cookie
->backing_objects
.first
,
1138 struct fscache_object
, cookie_link
);
1140 /* there might now be stuff on disk we could read */
1141 clear_bit(FSCACHE_COOKIE_NO_DATA_YET
, &cookie
->flags
);
1143 /* only invoke the cache backend if we managed to mark the page
1144 * uncached here; this deals with synchronisation vs withdrawal */
1145 if (TestClearPageFsCache(page
) &&
1146 object
->cache
->ops
->uncache_page
) {
1147 /* the cache backend releases the cookie lock */
1148 fscache_stat(&fscache_n_cop_uncache_page
);
1149 object
->cache
->ops
->uncache_page(object
, page
);
1150 fscache_stat_d(&fscache_n_cop_uncache_page
);
1155 spin_unlock(&cookie
->lock
);
1159 EXPORT_SYMBOL(__fscache_uncache_page
);
1162 * fscache_mark_page_cached - Mark a page as being cached
1163 * @op: The retrieval op pages are being marked for
1164 * @page: The page to be marked
1166 * Mark a netfs page as being cached. After this is called, the netfs
1167 * must call fscache_uncache_page() to remove the mark.
1169 void fscache_mark_page_cached(struct fscache_retrieval
*op
, struct page
*page
)
1171 struct fscache_cookie
*cookie
= op
->op
.object
->cookie
;
1173 #ifdef CONFIG_FSCACHE_STATS
1174 atomic_inc(&fscache_n_marks
);
1177 trace_fscache_page(cookie
, page
, fscache_page_cached
);
1179 _debug("- mark %p{%lx}", page
, page
->index
);
1180 if (TestSetPageFsCache(page
)) {
1181 static bool once_only
;
1184 pr_warn("Cookie type %s marked page %lx multiple times\n",
1185 cookie
->def
->name
, page
->index
);
1189 if (cookie
->def
->mark_page_cached
)
1190 cookie
->def
->mark_page_cached(cookie
->netfs_data
,
1193 EXPORT_SYMBOL(fscache_mark_page_cached
);
1196 * fscache_mark_pages_cached - Mark pages as being cached
1197 * @op: The retrieval op pages are being marked for
1198 * @pagevec: The pages to be marked
1200 * Mark a bunch of netfs pages as being cached. After this is called,
1201 * the netfs must call fscache_uncache_page() to remove the mark.
1203 void fscache_mark_pages_cached(struct fscache_retrieval
*op
,
1204 struct pagevec
*pagevec
)
1208 for (loop
= 0; loop
< pagevec
->nr
; loop
++)
1209 fscache_mark_page_cached(op
, pagevec
->pages
[loop
]);
1211 pagevec_reinit(pagevec
);
1213 EXPORT_SYMBOL(fscache_mark_pages_cached
);
1216 * Uncache all the pages in an inode that are marked PG_fscache, assuming them
1217 * to be associated with the given cookie.
1219 void __fscache_uncache_all_inode_pages(struct fscache_cookie
*cookie
,
1220 struct inode
*inode
)
1222 struct address_space
*mapping
= inode
->i_mapping
;
1223 struct pagevec pvec
;
1227 _enter("%p,%p", cookie
, inode
);
1229 if (!mapping
|| mapping
->nrpages
== 0) {
1230 _leave(" [no pages]");
1234 pagevec_init(&pvec
);
1237 if (!pagevec_lookup(&pvec
, mapping
, &next
))
1239 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
1240 struct page
*page
= pvec
.pages
[i
];
1241 if (PageFsCache(page
)) {
1242 __fscache_wait_on_page_write(cookie
, page
);
1243 __fscache_uncache_page(cookie
, page
);
1246 pagevec_release(&pvec
);
1252 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages
);