1 /* Storage object read/write
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #include <linux/mount.h>
13 #include <linux/slab.h>
14 #include <linux/file.h>
18 * detect wake up events generated by the unlocking of pages in which we're
20 * - we use this to detect read completion of backing pages
21 * - the caller holds the waitqueue lock
23 static int cachefiles_read_waiter(wait_queue_t
*wait
, unsigned mode
,
26 struct cachefiles_one_read
*monitor
=
27 container_of(wait
, struct cachefiles_one_read
, monitor
);
28 struct cachefiles_object
*object
;
29 struct wait_bit_key
*key
= _key
;
30 struct page
*page
= wait
->private;
34 _enter("{%lu},%u,%d,{%p,%u}",
35 monitor
->netfs_page
->index
, mode
, sync
,
36 key
->flags
, key
->bit_nr
);
38 if (key
->flags
!= &page
->flags
||
39 key
->bit_nr
!= PG_locked
)
42 _debug("--- monitor %p %lx ---", page
, page
->flags
);
44 if (!PageUptodate(page
) && !PageError(page
)) {
45 /* unlocked, not uptodate and not erronous? */
46 _debug("page probably truncated");
49 /* remove from the waitqueue */
50 list_del(&wait
->task_list
);
52 /* move onto the action list and queue for FS-Cache thread pool */
55 object
= container_of(monitor
->op
->op
.object
,
56 struct cachefiles_object
, fscache
);
58 spin_lock(&object
->work_lock
);
59 list_add_tail(&monitor
->op_link
, &monitor
->op
->to_do
);
60 spin_unlock(&object
->work_lock
);
62 fscache_enqueue_retrieval(monitor
->op
);
67 * handle a probably truncated page
68 * - check to see if the page is still relevant and reissue the read if
70 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
71 * must wait again and 0 if successful
73 static int cachefiles_read_reissue(struct cachefiles_object
*object
,
74 struct cachefiles_one_read
*monitor
)
76 struct address_space
*bmapping
= object
->backer
->d_inode
->i_mapping
;
77 struct page
*backpage
= monitor
->back_page
, *backpage2
;
80 _enter("{ino=%lx},{%lx,%lx}",
81 object
->backer
->d_inode
->i_ino
,
82 backpage
->index
, backpage
->flags
);
84 /* skip if the page was truncated away completely */
85 if (backpage
->mapping
!= bmapping
) {
86 _leave(" = -ENODATA [mapping]");
90 backpage2
= find_get_page(bmapping
, backpage
->index
);
92 _leave(" = -ENODATA [gone]");
96 if (backpage
!= backpage2
) {
98 _leave(" = -ENODATA [different]");
102 /* the page is still there and we already have a ref on it, so we don't
106 INIT_LIST_HEAD(&monitor
->op_link
);
107 add_page_wait_queue(backpage
, &monitor
->monitor
);
109 if (trylock_page(backpage
)) {
111 if (PageError(backpage
))
114 if (PageUptodate(backpage
))
117 _debug("reissue read");
118 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
123 /* but the page may have been read before the monitor was installed, so
124 * the monitor may miss the event - so we have to ensure that we do get
125 * one in such a case */
126 if (trylock_page(backpage
)) {
127 _debug("jumpstart %p {%lx}", backpage
, backpage
->flags
);
128 unlock_page(backpage
);
131 /* it'll reappear on the todo list */
132 _leave(" = -EINPROGRESS");
136 unlock_page(backpage
);
137 spin_lock_irq(&object
->work_lock
);
138 list_del(&monitor
->op_link
);
139 spin_unlock_irq(&object
->work_lock
);
140 _leave(" = %d", ret
);
145 * copy data from backing pages to netfs pages to complete a read operation
146 * - driven by FS-Cache's thread pool
148 static void cachefiles_read_copier(struct fscache_operation
*_op
)
150 struct cachefiles_one_read
*monitor
;
151 struct cachefiles_object
*object
;
152 struct fscache_retrieval
*op
;
153 struct pagevec pagevec
;
156 op
= container_of(_op
, struct fscache_retrieval
, op
);
157 object
= container_of(op
->op
.object
,
158 struct cachefiles_object
, fscache
);
160 _enter("{ino=%lu}", object
->backer
->d_inode
->i_ino
);
162 pagevec_init(&pagevec
, 0);
165 spin_lock_irq(&object
->work_lock
);
167 while (!list_empty(&op
->to_do
)) {
168 monitor
= list_entry(op
->to_do
.next
,
169 struct cachefiles_one_read
, op_link
);
170 list_del(&monitor
->op_link
);
172 spin_unlock_irq(&object
->work_lock
);
174 _debug("- copy {%lu}", monitor
->back_page
->index
);
177 if (test_bit(FSCACHE_COOKIE_INVALIDATING
,
178 &object
->fscache
.cookie
->flags
)) {
180 } else if (PageUptodate(monitor
->back_page
)) {
181 copy_highpage(monitor
->netfs_page
, monitor
->back_page
);
182 fscache_mark_page_cached(monitor
->op
,
183 monitor
->netfs_page
);
185 } else if (!PageError(monitor
->back_page
)) {
186 /* the page has probably been truncated */
187 error
= cachefiles_read_reissue(object
, monitor
);
188 if (error
== -EINPROGRESS
)
192 cachefiles_io_error_obj(
194 "Readpage failed on backing file %lx",
195 (unsigned long) monitor
->back_page
->flags
);
199 page_cache_release(monitor
->back_page
);
201 fscache_end_io(op
, monitor
->netfs_page
, error
);
202 page_cache_release(monitor
->netfs_page
);
203 fscache_retrieval_complete(op
, 1);
204 fscache_put_retrieval(op
);
208 /* let the thread pool have some air occasionally */
210 if (max
< 0 || need_resched()) {
211 if (!list_empty(&op
->to_do
))
212 fscache_enqueue_retrieval(op
);
213 _leave(" [maxed out]");
217 spin_lock_irq(&object
->work_lock
);
220 spin_unlock_irq(&object
->work_lock
);
225 * read the corresponding page to the given set from the backing file
226 * - an uncertain page is simply discarded, to be tried again another time
228 static int cachefiles_read_backing_file_one(struct cachefiles_object
*object
,
229 struct fscache_retrieval
*op
,
230 struct page
*netpage
,
231 struct pagevec
*pagevec
)
233 struct cachefiles_one_read
*monitor
;
234 struct address_space
*bmapping
;
235 struct page
*newpage
, *backpage
;
240 pagevec_reinit(pagevec
);
242 _debug("read back %p{%lu,%d}",
243 netpage
, netpage
->index
, page_count(netpage
));
245 monitor
= kzalloc(sizeof(*monitor
), cachefiles_gfp
);
249 monitor
->netfs_page
= netpage
;
250 monitor
->op
= fscache_get_retrieval(op
);
252 init_waitqueue_func_entry(&monitor
->monitor
, cachefiles_read_waiter
);
254 /* attempt to get hold of the backing page */
255 bmapping
= object
->backer
->d_inode
->i_mapping
;
259 backpage
= find_get_page(bmapping
, netpage
->index
);
261 goto backing_page_already_present
;
264 newpage
= __page_cache_alloc(cachefiles_gfp
|
270 ret
= add_to_page_cache(newpage
, bmapping
,
271 netpage
->index
, cachefiles_gfp
);
273 goto installed_new_backing_page
;
278 /* we've installed a new backing page, so now we need to add it
279 * to the LRU list and start it reading */
280 installed_new_backing_page
:
281 _debug("- new %p", newpage
);
286 page_cache_get(backpage
);
287 pagevec_add(pagevec
, backpage
);
288 __pagevec_lru_add_file(pagevec
);
291 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
295 /* set the monitor to transfer the data across */
296 monitor_backing_page
:
297 _debug("- monitor add");
299 /* install the monitor */
300 page_cache_get(monitor
->netfs_page
);
301 page_cache_get(backpage
);
302 monitor
->back_page
= backpage
;
303 monitor
->monitor
.private = backpage
;
304 add_page_wait_queue(backpage
, &monitor
->monitor
);
307 /* but the page may have been read before the monitor was installed, so
308 * the monitor may miss the event - so we have to ensure that we do get
309 * one in such a case */
310 if (trylock_page(backpage
)) {
311 _debug("jumpstart %p {%lx}", backpage
, backpage
->flags
);
312 unlock_page(backpage
);
316 /* if the backing page is already present, it can be in one of
317 * three states: read in progress, read failed or read okay */
318 backing_page_already_present
:
322 page_cache_release(newpage
);
326 if (PageError(backpage
))
329 if (PageUptodate(backpage
))
330 goto backing_page_already_uptodate
;
332 if (!trylock_page(backpage
))
333 goto monitor_backing_page
;
334 _debug("read %p {%lx}", backpage
, backpage
->flags
);
335 goto read_backing_page
;
337 /* the backing page is already up to date, attach the netfs
338 * page to the pagecache and LRU and copy the data across */
339 backing_page_already_uptodate
:
340 _debug("- uptodate");
342 fscache_mark_page_cached(op
, netpage
);
344 copy_highpage(netpage
, backpage
);
345 fscache_end_io(op
, netpage
, 0);
346 fscache_retrieval_complete(op
, 1);
354 page_cache_release(backpage
);
356 fscache_put_retrieval(monitor
->op
);
359 _leave(" = %d", ret
);
363 _debug("read error %d", ret
);
364 if (ret
== -ENOMEM
) {
365 fscache_retrieval_complete(op
, 1);
369 cachefiles_io_error_obj(object
, "Page read error on backing file");
370 fscache_retrieval_complete(op
, 1);
375 page_cache_release(newpage
);
377 fscache_put_retrieval(monitor
->op
);
380 fscache_retrieval_complete(op
, 1);
381 _leave(" = -ENOMEM");
386 * read a page from the cache or allocate a block in which to store it
387 * - cache withdrawal is prevented by the caller
388 * - returns -EINTR if interrupted
389 * - returns -ENOMEM if ran out of memory
390 * - returns -ENOBUFS if no buffers can be made available
391 * - returns -ENOBUFS if page is beyond EOF
392 * - if the page is backed by a block in the cache:
393 * - a read will be started which will call the callback on completion
394 * - 0 will be returned
395 * - else if the page is unbacked:
396 * - the metadata will be retained
397 * - -ENODATA will be returned
399 int cachefiles_read_or_alloc_page(struct fscache_retrieval
*op
,
403 struct cachefiles_object
*object
;
404 struct cachefiles_cache
*cache
;
405 struct pagevec pagevec
;
407 sector_t block0
, block
;
411 object
= container_of(op
->op
.object
,
412 struct cachefiles_object
, fscache
);
413 cache
= container_of(object
->fscache
.cache
,
414 struct cachefiles_cache
, cache
);
416 _enter("{%p},{%lx},,,", object
, page
->index
);
421 inode
= object
->backer
->d_inode
;
422 ASSERT(S_ISREG(inode
->i_mode
));
423 ASSERT(inode
->i_mapping
->a_ops
->bmap
);
424 ASSERT(inode
->i_mapping
->a_ops
->readpages
);
426 /* calculate the shift required to use bmap */
427 if (inode
->i_sb
->s_blocksize
> PAGE_SIZE
)
430 shift
= PAGE_SHIFT
- inode
->i_sb
->s_blocksize_bits
;
432 op
->op
.flags
&= FSCACHE_OP_KEEP_FLAGS
;
433 op
->op
.flags
|= FSCACHE_OP_ASYNC
;
434 op
->op
.processor
= cachefiles_read_copier
;
436 pagevec_init(&pagevec
, 0);
438 /* we assume the absence or presence of the first block is a good
439 * enough indication for the page as a whole
440 * - TODO: don't use bmap() for this as it is _not_ actually good
441 * enough for this as it doesn't indicate errors, but it's all we've
444 block0
= page
->index
;
447 block
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
, block0
);
448 _debug("%llx -> %llx",
449 (unsigned long long) block0
,
450 (unsigned long long) block
);
453 /* submit the apparently valid page to the backing fs to be
455 ret
= cachefiles_read_backing_file_one(object
, op
, page
,
457 } else if (cachefiles_has_space(cache
, 0, 1) == 0) {
458 /* there's space in the cache we can use */
459 fscache_mark_page_cached(op
, page
);
460 fscache_retrieval_complete(op
, 1);
466 _leave(" = %d", ret
);
470 fscache_retrieval_complete(op
, 1);
471 _leave(" = -ENOBUFS");
476 * read the corresponding pages to the given set from the backing file
477 * - any uncertain pages are simply discarded, to be tried again another time
479 static int cachefiles_read_backing_file(struct cachefiles_object
*object
,
480 struct fscache_retrieval
*op
,
481 struct list_head
*list
)
483 struct cachefiles_one_read
*monitor
= NULL
;
484 struct address_space
*bmapping
= object
->backer
->d_inode
->i_mapping
;
485 struct pagevec lru_pvec
;
486 struct page
*newpage
= NULL
, *netpage
, *_n
, *backpage
= NULL
;
491 pagevec_init(&lru_pvec
, 0);
493 list_for_each_entry_safe(netpage
, _n
, list
, lru
) {
494 list_del(&netpage
->lru
);
496 _debug("read back %p{%lu,%d}",
497 netpage
, netpage
->index
, page_count(netpage
));
500 monitor
= kzalloc(sizeof(*monitor
), cachefiles_gfp
);
504 monitor
->op
= fscache_get_retrieval(op
);
505 init_waitqueue_func_entry(&monitor
->monitor
,
506 cachefiles_read_waiter
);
510 backpage
= find_get_page(bmapping
, netpage
->index
);
512 goto backing_page_already_present
;
515 newpage
= __page_cache_alloc(cachefiles_gfp
|
521 ret
= add_to_page_cache(newpage
, bmapping
,
522 netpage
->index
, cachefiles_gfp
);
524 goto installed_new_backing_page
;
529 /* we've installed a new backing page, so now we need to add it
530 * to the LRU list and start it reading */
531 installed_new_backing_page
:
532 _debug("- new %p", newpage
);
537 page_cache_get(backpage
);
538 if (!pagevec_add(&lru_pvec
, backpage
))
539 __pagevec_lru_add_file(&lru_pvec
);
542 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
546 /* add the netfs page to the pagecache and LRU, and set the
547 * monitor to transfer the data across */
548 monitor_backing_page
:
549 _debug("- monitor add");
551 ret
= add_to_page_cache(netpage
, op
->mapping
, netpage
->index
,
554 if (ret
== -EEXIST
) {
555 page_cache_release(netpage
);
556 fscache_retrieval_complete(op
, 1);
562 page_cache_get(netpage
);
563 if (!pagevec_add(&lru_pvec
, netpage
))
564 __pagevec_lru_add_file(&lru_pvec
);
566 /* install a monitor */
567 page_cache_get(netpage
);
568 monitor
->netfs_page
= netpage
;
570 page_cache_get(backpage
);
571 monitor
->back_page
= backpage
;
572 monitor
->monitor
.private = backpage
;
573 add_page_wait_queue(backpage
, &monitor
->monitor
);
576 /* but the page may have been read before the monitor was
577 * installed, so the monitor may miss the event - so we have to
578 * ensure that we do get one in such a case */
579 if (trylock_page(backpage
)) {
580 _debug("2unlock %p {%lx}", backpage
, backpage
->flags
);
581 unlock_page(backpage
);
584 page_cache_release(backpage
);
587 page_cache_release(netpage
);
591 /* if the backing page is already present, it can be in one of
592 * three states: read in progress, read failed or read okay */
593 backing_page_already_present
:
594 _debug("- present %p", backpage
);
596 if (PageError(backpage
))
599 if (PageUptodate(backpage
))
600 goto backing_page_already_uptodate
;
602 _debug("- not ready %p{%lx}", backpage
, backpage
->flags
);
604 if (!trylock_page(backpage
))
605 goto monitor_backing_page
;
607 if (PageError(backpage
)) {
608 _debug("error %lx", backpage
->flags
);
609 unlock_page(backpage
);
613 if (PageUptodate(backpage
))
614 goto backing_page_already_uptodate_unlock
;
616 /* we've locked a page that's neither up to date nor erroneous,
617 * so we need to attempt to read it again */
618 goto reread_backing_page
;
620 /* the backing page is already up to date, attach the netfs
621 * page to the pagecache and LRU and copy the data across */
622 backing_page_already_uptodate_unlock
:
623 _debug("uptodate %lx", backpage
->flags
);
624 unlock_page(backpage
);
625 backing_page_already_uptodate
:
626 _debug("- uptodate");
628 ret
= add_to_page_cache(netpage
, op
->mapping
, netpage
->index
,
631 if (ret
== -EEXIST
) {
632 page_cache_release(netpage
);
633 fscache_retrieval_complete(op
, 1);
639 copy_highpage(netpage
, backpage
);
641 page_cache_release(backpage
);
644 fscache_mark_page_cached(op
, netpage
);
646 page_cache_get(netpage
);
647 if (!pagevec_add(&lru_pvec
, netpage
))
648 __pagevec_lru_add_file(&lru_pvec
);
650 /* the netpage is unlocked and marked up to date here */
651 fscache_end_io(op
, netpage
, 0);
652 page_cache_release(netpage
);
654 fscache_retrieval_complete(op
, 1);
664 pagevec_lru_add_file(&lru_pvec
);
667 page_cache_release(newpage
);
669 page_cache_release(netpage
);
671 page_cache_release(backpage
);
673 fscache_put_retrieval(op
);
677 list_for_each_entry_safe(netpage
, _n
, list
, lru
) {
678 list_del(&netpage
->lru
);
679 page_cache_release(netpage
);
680 fscache_retrieval_complete(op
, 1);
683 _leave(" = %d", ret
);
689 goto record_page_complete
;
692 _debug("read error %d", ret
);
694 goto record_page_complete
;
696 cachefiles_io_error_obj(object
, "Page read error on backing file");
698 record_page_complete
:
699 fscache_retrieval_complete(op
, 1);
704 * read a list of pages from the cache or allocate blocks in which to store
707 int cachefiles_read_or_alloc_pages(struct fscache_retrieval
*op
,
708 struct list_head
*pages
,
712 struct cachefiles_object
*object
;
713 struct cachefiles_cache
*cache
;
714 struct list_head backpages
;
715 struct pagevec pagevec
;
717 struct page
*page
, *_n
;
718 unsigned shift
, nrbackpages
;
719 int ret
, ret2
, space
;
721 object
= container_of(op
->op
.object
,
722 struct cachefiles_object
, fscache
);
723 cache
= container_of(object
->fscache
.cache
,
724 struct cachefiles_cache
, cache
);
726 _enter("{OBJ%x,%d},,%d,,",
727 object
->fscache
.debug_id
, atomic_read(&op
->op
.usage
),
734 if (cachefiles_has_space(cache
, 0, *nr_pages
) < 0)
737 inode
= object
->backer
->d_inode
;
738 ASSERT(S_ISREG(inode
->i_mode
));
739 ASSERT(inode
->i_mapping
->a_ops
->bmap
);
740 ASSERT(inode
->i_mapping
->a_ops
->readpages
);
742 /* calculate the shift required to use bmap */
743 if (inode
->i_sb
->s_blocksize
> PAGE_SIZE
)
746 shift
= PAGE_SHIFT
- inode
->i_sb
->s_blocksize_bits
;
748 pagevec_init(&pagevec
, 0);
750 op
->op
.flags
&= FSCACHE_OP_KEEP_FLAGS
;
751 op
->op
.flags
|= FSCACHE_OP_ASYNC
;
752 op
->op
.processor
= cachefiles_read_copier
;
754 INIT_LIST_HEAD(&backpages
);
757 ret
= space
? -ENODATA
: -ENOBUFS
;
758 list_for_each_entry_safe(page
, _n
, pages
, lru
) {
759 sector_t block0
, block
;
761 /* we assume the absence or presence of the first block is a
762 * good enough indication for the page as a whole
763 * - TODO: don't use bmap() for this as it is _not_ actually
764 * good enough for this as it doesn't indicate errors, but
765 * it's all we've got for the moment
767 block0
= page
->index
;
770 block
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
,
772 _debug("%llx -> %llx",
773 (unsigned long long) block0
,
774 (unsigned long long) block
);
777 /* we have data - add it to the list to give to the
779 list_move(&page
->lru
, &backpages
);
782 } else if (space
&& pagevec_add(&pagevec
, page
) == 0) {
783 fscache_mark_pages_cached(op
, &pagevec
);
784 fscache_retrieval_complete(op
, 1);
787 fscache_retrieval_complete(op
, 1);
791 if (pagevec_count(&pagevec
) > 0)
792 fscache_mark_pages_cached(op
, &pagevec
);
794 if (list_empty(pages
))
797 /* submit the apparently valid pages to the backing fs to be read from
799 if (nrbackpages
> 0) {
800 ret2
= cachefiles_read_backing_file(object
, op
, &backpages
);
801 if (ret2
== -ENOMEM
|| ret2
== -EINTR
)
805 _leave(" = %d [nr=%u%s]",
806 ret
, *nr_pages
, list_empty(pages
) ? " empty" : "");
810 fscache_retrieval_complete(op
, *nr_pages
);
815 * allocate a block in the cache in which to store a page
816 * - cache withdrawal is prevented by the caller
817 * - returns -EINTR if interrupted
818 * - returns -ENOMEM if ran out of memory
819 * - returns -ENOBUFS if no buffers can be made available
820 * - returns -ENOBUFS if page is beyond EOF
822 * - the metadata will be retained
823 * - 0 will be returned
825 int cachefiles_allocate_page(struct fscache_retrieval
*op
,
829 struct cachefiles_object
*object
;
830 struct cachefiles_cache
*cache
;
833 object
= container_of(op
->op
.object
,
834 struct cachefiles_object
, fscache
);
835 cache
= container_of(object
->fscache
.cache
,
836 struct cachefiles_cache
, cache
);
838 _enter("%p,{%lx},", object
, page
->index
);
840 ret
= cachefiles_has_space(cache
, 0, 1);
842 fscache_mark_page_cached(op
, page
);
846 fscache_retrieval_complete(op
, 1);
847 _leave(" = %d", ret
);
852 * allocate blocks in the cache in which to store a set of pages
853 * - cache withdrawal is prevented by the caller
854 * - returns -EINTR if interrupted
855 * - returns -ENOMEM if ran out of memory
856 * - returns -ENOBUFS if some buffers couldn't be made available
857 * - returns -ENOBUFS if some pages are beyond EOF
859 * - -ENODATA will be returned
860 * - metadata will be retained for any page marked
862 int cachefiles_allocate_pages(struct fscache_retrieval
*op
,
863 struct list_head
*pages
,
867 struct cachefiles_object
*object
;
868 struct cachefiles_cache
*cache
;
869 struct pagevec pagevec
;
873 object
= container_of(op
->op
.object
,
874 struct cachefiles_object
, fscache
);
875 cache
= container_of(object
->fscache
.cache
,
876 struct cachefiles_cache
, cache
);
878 _enter("%p,,,%d,", object
, *nr_pages
);
880 ret
= cachefiles_has_space(cache
, 0, *nr_pages
);
882 pagevec_init(&pagevec
, 0);
884 list_for_each_entry(page
, pages
, lru
) {
885 if (pagevec_add(&pagevec
, page
) == 0)
886 fscache_mark_pages_cached(op
, &pagevec
);
889 if (pagevec_count(&pagevec
) > 0)
890 fscache_mark_pages_cached(op
, &pagevec
);
896 fscache_retrieval_complete(op
, *nr_pages
);
897 _leave(" = %d", ret
);
902 * request a page be stored in the cache
903 * - cache withdrawal is prevented by the caller
904 * - this request may be ignored if there's no cache block available, in which
905 * case -ENOBUFS will be returned
906 * - if the op is in progress, 0 will be returned
908 int cachefiles_write_page(struct fscache_storage
*op
, struct page
*page
)
910 struct cachefiles_object
*object
;
911 struct cachefiles_cache
*cache
;
921 ASSERT(page
!= NULL
);
923 object
= container_of(op
->op
.object
,
924 struct cachefiles_object
, fscache
);
926 _enter("%p,%p{%lx},,,", object
, page
, page
->index
);
928 if (!object
->backer
) {
929 _leave(" = -ENOBUFS");
933 ASSERT(S_ISREG(object
->backer
->d_inode
->i_mode
));
935 cache
= container_of(object
->fscache
.cache
,
936 struct cachefiles_cache
, cache
);
938 /* write the page to the backing filesystem and let it store it in its
940 path
.mnt
= cache
->mnt
;
941 path
.dentry
= object
->backer
;
942 file
= dentry_open(&path
, O_RDWR
| O_LARGEFILE
, cache
->cache_cred
);
947 if (file
->f_op
->write
) {
948 pos
= (loff_t
) page
->index
<< PAGE_SHIFT
;
950 /* we mustn't write more data than we have, so we have
951 * to beware of a partial page at EOF */
952 eof
= object
->fscache
.store_limit_l
;
954 if (eof
& ~PAGE_MASK
) {
955 ASSERTCMP(pos
, <, eof
);
956 if (eof
- pos
< PAGE_SIZE
) {
957 _debug("cut short %llx to %llx",
960 ASSERTCMP(pos
+ len
, ==, eof
);
967 ret
= file
->f_op
->write(
968 file
, (const void __user
*) data
, len
, &pos
);
979 cachefiles_io_error_obj(
980 object
, "Write page to backing file failed");
984 _leave(" = %d", ret
);
989 * detach a backing block from a page
990 * - cache withdrawal is prevented by the caller
992 void cachefiles_uncache_page(struct fscache_object
*_object
, struct page
*page
)
994 struct cachefiles_object
*object
;
995 struct cachefiles_cache
*cache
;
997 object
= container_of(_object
, struct cachefiles_object
, fscache
);
998 cache
= container_of(object
->fscache
.cache
,
999 struct cachefiles_cache
, cache
);
1001 _enter("%p,{%lu}", object
, page
->index
);
1003 spin_unlock(&object
->fscache
.cookie
->lock
);