1 /* Storage object read/write
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #include <linux/mount.h>
13 #include <linux/slab.h>
14 #include <linux/file.h>
15 #include <linux/swap.h>
19 * detect wake up events generated by the unlocking of pages in which we're
21 * - we use this to detect read completion of backing pages
22 * - the caller holds the waitqueue lock
24 static int cachefiles_read_waiter(wait_queue_entry_t
*wait
, unsigned mode
,
27 struct cachefiles_one_read
*monitor
=
28 container_of(wait
, struct cachefiles_one_read
, monitor
);
29 struct cachefiles_object
*object
;
30 struct fscache_retrieval
*op
= monitor
->op
;
31 struct wait_bit_key
*key
= _key
;
32 struct page
*page
= wait
->private;
36 _enter("{%lu},%u,%d,{%p,%u}",
37 monitor
->netfs_page
->index
, mode
, sync
,
38 key
->flags
, key
->bit_nr
);
40 if (key
->flags
!= &page
->flags
||
41 key
->bit_nr
!= PG_locked
)
44 _debug("--- monitor %p %lx ---", page
, page
->flags
);
46 if (!PageUptodate(page
) && !PageError(page
)) {
47 /* unlocked, not uptodate and not erronous? */
48 _debug("page probably truncated");
51 /* remove from the waitqueue */
52 list_del(&wait
->entry
);
54 /* move onto the action list and queue for FS-Cache thread pool */
57 /* We need to temporarily bump the usage count as we don't own a ref
58 * here otherwise cachefiles_read_copier() may free the op between the
59 * monitor being enqueued on the op->to_do list and the op getting
60 * enqueued on the work queue.
62 fscache_get_retrieval(op
);
64 object
= container_of(op
->op
.object
, struct cachefiles_object
, fscache
);
65 spin_lock(&object
->work_lock
);
66 list_add_tail(&monitor
->op_link
, &op
->to_do
);
67 spin_unlock(&object
->work_lock
);
69 fscache_enqueue_retrieval(op
);
70 fscache_put_retrieval(op
);
75 * handle a probably truncated page
76 * - check to see if the page is still relevant and reissue the read if
78 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
79 * must wait again and 0 if successful
81 static int cachefiles_read_reissue(struct cachefiles_object
*object
,
82 struct cachefiles_one_read
*monitor
)
84 struct address_space
*bmapping
= d_backing_inode(object
->backer
)->i_mapping
;
85 struct page
*backpage
= monitor
->back_page
, *backpage2
;
88 _enter("{ino=%lx},{%lx,%lx}",
89 d_backing_inode(object
->backer
)->i_ino
,
90 backpage
->index
, backpage
->flags
);
92 /* skip if the page was truncated away completely */
93 if (backpage
->mapping
!= bmapping
) {
94 _leave(" = -ENODATA [mapping]");
98 backpage2
= find_get_page(bmapping
, backpage
->index
);
100 _leave(" = -ENODATA [gone]");
104 if (backpage
!= backpage2
) {
106 _leave(" = -ENODATA [different]");
110 /* the page is still there and we already have a ref on it, so we don't
114 INIT_LIST_HEAD(&monitor
->op_link
);
115 add_page_wait_queue(backpage
, &monitor
->monitor
);
117 if (trylock_page(backpage
)) {
119 if (PageError(backpage
))
122 if (PageUptodate(backpage
))
125 _debug("reissue read");
126 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
131 /* but the page may have been read before the monitor was installed, so
132 * the monitor may miss the event - so we have to ensure that we do get
133 * one in such a case */
134 if (trylock_page(backpage
)) {
135 _debug("jumpstart %p {%lx}", backpage
, backpage
->flags
);
136 unlock_page(backpage
);
139 /* it'll reappear on the todo list */
140 _leave(" = -EINPROGRESS");
144 unlock_page(backpage
);
145 spin_lock_irq(&object
->work_lock
);
146 list_del(&monitor
->op_link
);
147 spin_unlock_irq(&object
->work_lock
);
148 _leave(" = %d", ret
);
153 * copy data from backing pages to netfs pages to complete a read operation
154 * - driven by FS-Cache's thread pool
156 static void cachefiles_read_copier(struct fscache_operation
*_op
)
158 struct cachefiles_one_read
*monitor
;
159 struct cachefiles_object
*object
;
160 struct fscache_retrieval
*op
;
163 op
= container_of(_op
, struct fscache_retrieval
, op
);
164 object
= container_of(op
->op
.object
,
165 struct cachefiles_object
, fscache
);
167 _enter("{ino=%lu}", d_backing_inode(object
->backer
)->i_ino
);
170 spin_lock_irq(&object
->work_lock
);
172 while (!list_empty(&op
->to_do
)) {
173 monitor
= list_entry(op
->to_do
.next
,
174 struct cachefiles_one_read
, op_link
);
175 list_del(&monitor
->op_link
);
177 spin_unlock_irq(&object
->work_lock
);
179 _debug("- copy {%lu}", monitor
->back_page
->index
);
182 if (test_bit(FSCACHE_COOKIE_INVALIDATING
,
183 &object
->fscache
.cookie
->flags
)) {
185 } else if (PageUptodate(monitor
->back_page
)) {
186 copy_highpage(monitor
->netfs_page
, monitor
->back_page
);
187 fscache_mark_page_cached(monitor
->op
,
188 monitor
->netfs_page
);
190 } else if (!PageError(monitor
->back_page
)) {
191 /* the page has probably been truncated */
192 error
= cachefiles_read_reissue(object
, monitor
);
193 if (error
== -EINPROGRESS
)
197 cachefiles_io_error_obj(
199 "Readpage failed on backing file %lx",
200 (unsigned long) monitor
->back_page
->flags
);
204 put_page(monitor
->back_page
);
206 fscache_end_io(op
, monitor
->netfs_page
, error
);
207 put_page(monitor
->netfs_page
);
208 fscache_retrieval_complete(op
, 1);
209 fscache_put_retrieval(op
);
213 /* let the thread pool have some air occasionally */
215 if (max
< 0 || need_resched()) {
216 if (!list_empty(&op
->to_do
))
217 fscache_enqueue_retrieval(op
);
218 _leave(" [maxed out]");
222 spin_lock_irq(&object
->work_lock
);
225 spin_unlock_irq(&object
->work_lock
);
230 * read the corresponding page to the given set from the backing file
231 * - an uncertain page is simply discarded, to be tried again another time
233 static int cachefiles_read_backing_file_one(struct cachefiles_object
*object
,
234 struct fscache_retrieval
*op
,
235 struct page
*netpage
)
237 struct cachefiles_one_read
*monitor
;
238 struct address_space
*bmapping
;
239 struct page
*newpage
, *backpage
;
244 _debug("read back %p{%lu,%d}",
245 netpage
, netpage
->index
, page_count(netpage
));
247 monitor
= kzalloc(sizeof(*monitor
), cachefiles_gfp
);
251 monitor
->netfs_page
= netpage
;
252 monitor
->op
= fscache_get_retrieval(op
);
254 init_waitqueue_func_entry(&monitor
->monitor
, cachefiles_read_waiter
);
256 /* attempt to get hold of the backing page */
257 bmapping
= d_backing_inode(object
->backer
)->i_mapping
;
261 backpage
= find_get_page(bmapping
, netpage
->index
);
263 goto backing_page_already_present
;
266 newpage
= __page_cache_alloc(cachefiles_gfp
);
271 ret
= add_to_page_cache_lru(newpage
, bmapping
,
272 netpage
->index
, cachefiles_gfp
);
274 goto installed_new_backing_page
;
279 /* we've installed a new backing page, so now we need to start
281 installed_new_backing_page
:
282 _debug("- new %p", newpage
);
288 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
292 /* set the monitor to transfer the data across */
293 monitor_backing_page
:
294 _debug("- monitor add");
296 /* install the monitor */
297 get_page(monitor
->netfs_page
);
299 monitor
->back_page
= backpage
;
300 monitor
->monitor
.private = backpage
;
301 add_page_wait_queue(backpage
, &monitor
->monitor
);
304 /* but the page may have been read before the monitor was installed, so
305 * the monitor may miss the event - so we have to ensure that we do get
306 * one in such a case */
307 if (trylock_page(backpage
)) {
308 _debug("jumpstart %p {%lx}", backpage
, backpage
->flags
);
309 unlock_page(backpage
);
313 /* if the backing page is already present, it can be in one of
314 * three states: read in progress, read failed or read okay */
315 backing_page_already_present
:
323 if (PageError(backpage
))
326 if (PageUptodate(backpage
))
327 goto backing_page_already_uptodate
;
329 if (!trylock_page(backpage
))
330 goto monitor_backing_page
;
331 _debug("read %p {%lx}", backpage
, backpage
->flags
);
332 goto read_backing_page
;
334 /* the backing page is already up to date, attach the netfs
335 * page to the pagecache and LRU and copy the data across */
336 backing_page_already_uptodate
:
337 _debug("- uptodate");
339 fscache_mark_page_cached(op
, netpage
);
341 copy_highpage(netpage
, backpage
);
342 fscache_end_io(op
, netpage
, 0);
343 fscache_retrieval_complete(op
, 1);
353 fscache_put_retrieval(monitor
->op
);
356 _leave(" = %d", ret
);
360 _debug("read error %d", ret
);
361 if (ret
== -ENOMEM
) {
362 fscache_retrieval_complete(op
, 1);
366 cachefiles_io_error_obj(object
, "Page read error on backing file");
367 fscache_retrieval_complete(op
, 1);
374 fscache_put_retrieval(monitor
->op
);
377 fscache_retrieval_complete(op
, 1);
378 _leave(" = -ENOMEM");
383 * read a page from the cache or allocate a block in which to store it
384 * - cache withdrawal is prevented by the caller
385 * - returns -EINTR if interrupted
386 * - returns -ENOMEM if ran out of memory
387 * - returns -ENOBUFS if no buffers can be made available
388 * - returns -ENOBUFS if page is beyond EOF
389 * - if the page is backed by a block in the cache:
390 * - a read will be started which will call the callback on completion
391 * - 0 will be returned
392 * - else if the page is unbacked:
393 * - the metadata will be retained
394 * - -ENODATA will be returned
396 int cachefiles_read_or_alloc_page(struct fscache_retrieval
*op
,
400 struct cachefiles_object
*object
;
401 struct cachefiles_cache
*cache
;
403 sector_t block0
, block
;
407 object
= container_of(op
->op
.object
,
408 struct cachefiles_object
, fscache
);
409 cache
= container_of(object
->fscache
.cache
,
410 struct cachefiles_cache
, cache
);
412 _enter("{%p},{%lx},,,", object
, page
->index
);
417 inode
= d_backing_inode(object
->backer
);
418 ASSERT(S_ISREG(inode
->i_mode
));
419 ASSERT(inode
->i_mapping
->a_ops
->bmap
);
420 ASSERT(inode
->i_mapping
->a_ops
->readpages
);
422 /* calculate the shift required to use bmap */
423 shift
= PAGE_SHIFT
- inode
->i_sb
->s_blocksize_bits
;
425 op
->op
.flags
&= FSCACHE_OP_KEEP_FLAGS
;
426 op
->op
.flags
|= FSCACHE_OP_ASYNC
;
427 op
->op
.processor
= cachefiles_read_copier
;
429 /* we assume the absence or presence of the first block is a good
430 * enough indication for the page as a whole
431 * - TODO: don't use bmap() for this as it is _not_ actually good
432 * enough for this as it doesn't indicate errors, but it's all we've
435 block0
= page
->index
;
438 block
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
, block0
);
439 _debug("%llx -> %llx",
440 (unsigned long long) block0
,
441 (unsigned long long) block
);
444 /* submit the apparently valid page to the backing fs to be
446 ret
= cachefiles_read_backing_file_one(object
, op
, page
);
447 } else if (cachefiles_has_space(cache
, 0, 1) == 0) {
448 /* there's space in the cache we can use */
449 fscache_mark_page_cached(op
, page
);
450 fscache_retrieval_complete(op
, 1);
456 _leave(" = %d", ret
);
460 fscache_retrieval_complete(op
, 1);
461 _leave(" = -ENOBUFS");
466 * read the corresponding pages to the given set from the backing file
467 * - any uncertain pages are simply discarded, to be tried again another time
469 static int cachefiles_read_backing_file(struct cachefiles_object
*object
,
470 struct fscache_retrieval
*op
,
471 struct list_head
*list
)
473 struct cachefiles_one_read
*monitor
= NULL
;
474 struct address_space
*bmapping
= d_backing_inode(object
->backer
)->i_mapping
;
475 struct page
*newpage
= NULL
, *netpage
, *_n
, *backpage
= NULL
;
480 list_for_each_entry_safe(netpage
, _n
, list
, lru
) {
481 list_del(&netpage
->lru
);
483 _debug("read back %p{%lu,%d}",
484 netpage
, netpage
->index
, page_count(netpage
));
487 monitor
= kzalloc(sizeof(*monitor
), cachefiles_gfp
);
491 monitor
->op
= fscache_get_retrieval(op
);
492 init_waitqueue_func_entry(&monitor
->monitor
,
493 cachefiles_read_waiter
);
497 backpage
= find_get_page(bmapping
, netpage
->index
);
499 goto backing_page_already_present
;
502 newpage
= __page_cache_alloc(cachefiles_gfp
);
507 ret
= add_to_page_cache_lru(newpage
, bmapping
,
511 goto installed_new_backing_page
;
516 /* we've installed a new backing page, so now we need
517 * to start it reading */
518 installed_new_backing_page
:
519 _debug("- new %p", newpage
);
525 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
529 /* add the netfs page to the pagecache and LRU, and set the
530 * monitor to transfer the data across */
531 monitor_backing_page
:
532 _debug("- monitor add");
534 ret
= add_to_page_cache_lru(netpage
, op
->mapping
,
535 netpage
->index
, cachefiles_gfp
);
537 if (ret
== -EEXIST
) {
542 fscache_retrieval_complete(op
, 1);
548 /* install a monitor */
550 monitor
->netfs_page
= netpage
;
553 monitor
->back_page
= backpage
;
554 monitor
->monitor
.private = backpage
;
555 add_page_wait_queue(backpage
, &monitor
->monitor
);
558 /* but the page may have been read before the monitor was
559 * installed, so the monitor may miss the event - so we have to
560 * ensure that we do get one in such a case */
561 if (trylock_page(backpage
)) {
562 _debug("2unlock %p {%lx}", backpage
, backpage
->flags
);
563 unlock_page(backpage
);
573 /* if the backing page is already present, it can be in one of
574 * three states: read in progress, read failed or read okay */
575 backing_page_already_present
:
576 _debug("- present %p", backpage
);
578 if (PageError(backpage
))
581 if (PageUptodate(backpage
))
582 goto backing_page_already_uptodate
;
584 _debug("- not ready %p{%lx}", backpage
, backpage
->flags
);
586 if (!trylock_page(backpage
))
587 goto monitor_backing_page
;
589 if (PageError(backpage
)) {
590 _debug("error %lx", backpage
->flags
);
591 unlock_page(backpage
);
595 if (PageUptodate(backpage
))
596 goto backing_page_already_uptodate_unlock
;
598 /* we've locked a page that's neither up to date nor erroneous,
599 * so we need to attempt to read it again */
600 goto reread_backing_page
;
602 /* the backing page is already up to date, attach the netfs
603 * page to the pagecache and LRU and copy the data across */
604 backing_page_already_uptodate_unlock
:
605 _debug("uptodate %lx", backpage
->flags
);
606 unlock_page(backpage
);
607 backing_page_already_uptodate
:
608 _debug("- uptodate");
610 ret
= add_to_page_cache_lru(netpage
, op
->mapping
,
611 netpage
->index
, cachefiles_gfp
);
613 if (ret
== -EEXIST
) {
618 fscache_retrieval_complete(op
, 1);
624 copy_highpage(netpage
, backpage
);
629 fscache_mark_page_cached(op
, netpage
);
631 /* the netpage is unlocked and marked up to date here */
632 fscache_end_io(op
, netpage
, 0);
635 fscache_retrieval_complete(op
, 1);
652 fscache_put_retrieval(op
);
656 list_for_each_entry_safe(netpage
, _n
, list
, lru
) {
657 list_del(&netpage
->lru
);
659 fscache_retrieval_complete(op
, 1);
662 _leave(" = %d", ret
);
668 goto record_page_complete
;
671 _debug("read error %d", ret
);
673 goto record_page_complete
;
675 cachefiles_io_error_obj(object
, "Page read error on backing file");
677 record_page_complete
:
678 fscache_retrieval_complete(op
, 1);
683 * read a list of pages from the cache or allocate blocks in which to store
686 int cachefiles_read_or_alloc_pages(struct fscache_retrieval
*op
,
687 struct list_head
*pages
,
691 struct cachefiles_object
*object
;
692 struct cachefiles_cache
*cache
;
693 struct list_head backpages
;
694 struct pagevec pagevec
;
696 struct page
*page
, *_n
;
697 unsigned shift
, nrbackpages
;
698 int ret
, ret2
, space
;
700 object
= container_of(op
->op
.object
,
701 struct cachefiles_object
, fscache
);
702 cache
= container_of(object
->fscache
.cache
,
703 struct cachefiles_cache
, cache
);
705 _enter("{OBJ%x,%d},,%d,,",
706 object
->fscache
.debug_id
, atomic_read(&op
->op
.usage
),
713 if (cachefiles_has_space(cache
, 0, *nr_pages
) < 0)
716 inode
= d_backing_inode(object
->backer
);
717 ASSERT(S_ISREG(inode
->i_mode
));
718 ASSERT(inode
->i_mapping
->a_ops
->bmap
);
719 ASSERT(inode
->i_mapping
->a_ops
->readpages
);
721 /* calculate the shift required to use bmap */
722 shift
= PAGE_SHIFT
- inode
->i_sb
->s_blocksize_bits
;
724 pagevec_init(&pagevec
);
726 op
->op
.flags
&= FSCACHE_OP_KEEP_FLAGS
;
727 op
->op
.flags
|= FSCACHE_OP_ASYNC
;
728 op
->op
.processor
= cachefiles_read_copier
;
730 INIT_LIST_HEAD(&backpages
);
733 ret
= space
? -ENODATA
: -ENOBUFS
;
734 list_for_each_entry_safe(page
, _n
, pages
, lru
) {
735 sector_t block0
, block
;
737 /* we assume the absence or presence of the first block is a
738 * good enough indication for the page as a whole
739 * - TODO: don't use bmap() for this as it is _not_ actually
740 * good enough for this as it doesn't indicate errors, but
741 * it's all we've got for the moment
743 block0
= page
->index
;
746 block
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
,
748 _debug("%llx -> %llx",
749 (unsigned long long) block0
,
750 (unsigned long long) block
);
753 /* we have data - add it to the list to give to the
755 list_move(&page
->lru
, &backpages
);
758 } else if (space
&& pagevec_add(&pagevec
, page
) == 0) {
759 fscache_mark_pages_cached(op
, &pagevec
);
760 fscache_retrieval_complete(op
, 1);
763 fscache_retrieval_complete(op
, 1);
767 if (pagevec_count(&pagevec
) > 0)
768 fscache_mark_pages_cached(op
, &pagevec
);
770 if (list_empty(pages
))
773 /* submit the apparently valid pages to the backing fs to be read from
775 if (nrbackpages
> 0) {
776 ret2
= cachefiles_read_backing_file(object
, op
, &backpages
);
777 if (ret2
== -ENOMEM
|| ret2
== -EINTR
)
781 _leave(" = %d [nr=%u%s]",
782 ret
, *nr_pages
, list_empty(pages
) ? " empty" : "");
786 fscache_retrieval_complete(op
, *nr_pages
);
791 * allocate a block in the cache in which to store a page
792 * - cache withdrawal is prevented by the caller
793 * - returns -EINTR if interrupted
794 * - returns -ENOMEM if ran out of memory
795 * - returns -ENOBUFS if no buffers can be made available
796 * - returns -ENOBUFS if page is beyond EOF
798 * - the metadata will be retained
799 * - 0 will be returned
801 int cachefiles_allocate_page(struct fscache_retrieval
*op
,
805 struct cachefiles_object
*object
;
806 struct cachefiles_cache
*cache
;
809 object
= container_of(op
->op
.object
,
810 struct cachefiles_object
, fscache
);
811 cache
= container_of(object
->fscache
.cache
,
812 struct cachefiles_cache
, cache
);
814 _enter("%p,{%lx},", object
, page
->index
);
816 ret
= cachefiles_has_space(cache
, 0, 1);
818 fscache_mark_page_cached(op
, page
);
822 fscache_retrieval_complete(op
, 1);
823 _leave(" = %d", ret
);
828 * allocate blocks in the cache in which to store a set of pages
829 * - cache withdrawal is prevented by the caller
830 * - returns -EINTR if interrupted
831 * - returns -ENOMEM if ran out of memory
832 * - returns -ENOBUFS if some buffers couldn't be made available
833 * - returns -ENOBUFS if some pages are beyond EOF
835 * - -ENODATA will be returned
836 * - metadata will be retained for any page marked
838 int cachefiles_allocate_pages(struct fscache_retrieval
*op
,
839 struct list_head
*pages
,
843 struct cachefiles_object
*object
;
844 struct cachefiles_cache
*cache
;
845 struct pagevec pagevec
;
849 object
= container_of(op
->op
.object
,
850 struct cachefiles_object
, fscache
);
851 cache
= container_of(object
->fscache
.cache
,
852 struct cachefiles_cache
, cache
);
854 _enter("%p,,,%d,", object
, *nr_pages
);
856 ret
= cachefiles_has_space(cache
, 0, *nr_pages
);
858 pagevec_init(&pagevec
);
860 list_for_each_entry(page
, pages
, lru
) {
861 if (pagevec_add(&pagevec
, page
) == 0)
862 fscache_mark_pages_cached(op
, &pagevec
);
865 if (pagevec_count(&pagevec
) > 0)
866 fscache_mark_pages_cached(op
, &pagevec
);
872 fscache_retrieval_complete(op
, *nr_pages
);
873 _leave(" = %d", ret
);
878 * request a page be stored in the cache
879 * - cache withdrawal is prevented by the caller
880 * - this request may be ignored if there's no cache block available, in which
881 * case -ENOBUFS will be returned
882 * - if the op is in progress, 0 will be returned
884 int cachefiles_write_page(struct fscache_storage
*op
, struct page
*page
)
886 struct cachefiles_object
*object
;
887 struct cachefiles_cache
*cache
;
896 ASSERT(page
!= NULL
);
898 object
= container_of(op
->op
.object
,
899 struct cachefiles_object
, fscache
);
901 _enter("%p,%p{%lx},,,", object
, page
, page
->index
);
903 if (!object
->backer
) {
904 _leave(" = -ENOBUFS");
908 ASSERT(d_is_reg(object
->backer
));
910 cache
= container_of(object
->fscache
.cache
,
911 struct cachefiles_cache
, cache
);
913 pos
= (loff_t
)page
->index
<< PAGE_SHIFT
;
915 /* We mustn't write more data than we have, so we have to beware of a
916 * partial page at EOF.
918 eof
= object
->fscache
.store_limit_l
;
922 /* write the page to the backing filesystem and let it store it in its
924 path
.mnt
= cache
->mnt
;
925 path
.dentry
= object
->backer
;
926 file
= dentry_open(&path
, O_RDWR
| O_LARGEFILE
, cache
->cache_cred
);
933 if (eof
& ~PAGE_MASK
) {
934 if (eof
- pos
< PAGE_SIZE
) {
935 _debug("cut short %llx to %llx",
938 ASSERTCMP(pos
+ len
, ==, eof
);
943 ret
= __kernel_write(file
, data
, len
, &pos
);
956 cachefiles_io_error_obj(object
,
957 "Write page to backing file failed");
959 _leave(" = -ENOBUFS [%d]", ret
);
964 * detach a backing block from a page
965 * - cache withdrawal is prevented by the caller
967 void cachefiles_uncache_page(struct fscache_object
*_object
, struct page
*page
)
968 __releases(&object
->fscache
.cookie
->lock
)
970 struct cachefiles_object
*object
;
972 object
= container_of(_object
, struct cachefiles_object
, fscache
);
974 _enter("%p,{%lu}", object
, page
->index
);
976 spin_unlock(&object
->fscache
.cookie
->lock
);