1 /* Storage object read/write
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #include <linux/mount.h>
13 #include <linux/slab.h>
14 #include <linux/file.h>
15 #include <linux/swap.h>
19 * detect wake up events generated by the unlocking of pages in which we're
21 * - we use this to detect read completion of backing pages
22 * - the caller holds the waitqueue lock
24 static int cachefiles_read_waiter(wait_queue_t
*wait
, unsigned mode
,
27 struct cachefiles_one_read
*monitor
=
28 container_of(wait
, struct cachefiles_one_read
, monitor
);
29 struct cachefiles_object
*object
;
30 struct wait_bit_key
*key
= _key
;
31 struct page
*page
= wait
->private;
35 _enter("{%lu},%u,%d,{%p,%u}",
36 monitor
->netfs_page
->index
, mode
, sync
,
37 key
->flags
, key
->bit_nr
);
39 if (key
->flags
!= &page
->flags
||
40 key
->bit_nr
!= PG_locked
)
43 _debug("--- monitor %p %lx ---", page
, page
->flags
);
45 if (!PageUptodate(page
) && !PageError(page
)) {
46 /* unlocked, not uptodate and not erronous? */
47 _debug("page probably truncated");
50 /* remove from the waitqueue */
51 list_del(&wait
->task_list
);
53 /* move onto the action list and queue for FS-Cache thread pool */
56 object
= container_of(monitor
->op
->op
.object
,
57 struct cachefiles_object
, fscache
);
59 spin_lock(&object
->work_lock
);
60 list_add_tail(&monitor
->op_link
, &monitor
->op
->to_do
);
61 spin_unlock(&object
->work_lock
);
63 fscache_enqueue_retrieval(monitor
->op
);
68 * handle a probably truncated page
69 * - check to see if the page is still relevant and reissue the read if
71 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
72 * must wait again and 0 if successful
74 static int cachefiles_read_reissue(struct cachefiles_object
*object
,
75 struct cachefiles_one_read
*monitor
)
77 struct address_space
*bmapping
= object
->backer
->d_inode
->i_mapping
;
78 struct page
*backpage
= monitor
->back_page
, *backpage2
;
81 _enter("{ino=%lx},{%lx,%lx}",
82 object
->backer
->d_inode
->i_ino
,
83 backpage
->index
, backpage
->flags
);
85 /* skip if the page was truncated away completely */
86 if (backpage
->mapping
!= bmapping
) {
87 _leave(" = -ENODATA [mapping]");
91 backpage2
= find_get_page(bmapping
, backpage
->index
);
93 _leave(" = -ENODATA [gone]");
97 if (backpage
!= backpage2
) {
99 _leave(" = -ENODATA [different]");
103 /* the page is still there and we already have a ref on it, so we don't
107 INIT_LIST_HEAD(&monitor
->op_link
);
108 add_page_wait_queue(backpage
, &monitor
->monitor
);
110 if (trylock_page(backpage
)) {
112 if (PageError(backpage
))
115 if (PageUptodate(backpage
))
118 _debug("reissue read");
119 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
124 /* but the page may have been read before the monitor was installed, so
125 * the monitor may miss the event - so we have to ensure that we do get
126 * one in such a case */
127 if (trylock_page(backpage
)) {
128 _debug("jumpstart %p {%lx}", backpage
, backpage
->flags
);
129 unlock_page(backpage
);
132 /* it'll reappear on the todo list */
133 _leave(" = -EINPROGRESS");
137 unlock_page(backpage
);
138 spin_lock_irq(&object
->work_lock
);
139 list_del(&monitor
->op_link
);
140 spin_unlock_irq(&object
->work_lock
);
141 _leave(" = %d", ret
);
146 * copy data from backing pages to netfs pages to complete a read operation
147 * - driven by FS-Cache's thread pool
149 static void cachefiles_read_copier(struct fscache_operation
*_op
)
151 struct cachefiles_one_read
*monitor
;
152 struct cachefiles_object
*object
;
153 struct fscache_retrieval
*op
;
154 struct pagevec pagevec
;
157 op
= container_of(_op
, struct fscache_retrieval
, op
);
158 object
= container_of(op
->op
.object
,
159 struct cachefiles_object
, fscache
);
161 _enter("{ino=%lu}", object
->backer
->d_inode
->i_ino
);
163 pagevec_init(&pagevec
, 0);
166 spin_lock_irq(&object
->work_lock
);
168 while (!list_empty(&op
->to_do
)) {
169 monitor
= list_entry(op
->to_do
.next
,
170 struct cachefiles_one_read
, op_link
);
171 list_del(&monitor
->op_link
);
173 spin_unlock_irq(&object
->work_lock
);
175 _debug("- copy {%lu}", monitor
->back_page
->index
);
178 if (test_bit(FSCACHE_COOKIE_INVALIDATING
,
179 &object
->fscache
.cookie
->flags
)) {
181 } else if (PageUptodate(monitor
->back_page
)) {
182 copy_highpage(monitor
->netfs_page
, monitor
->back_page
);
183 fscache_mark_page_cached(monitor
->op
,
184 monitor
->netfs_page
);
186 } else if (!PageError(monitor
->back_page
)) {
187 /* the page has probably been truncated */
188 error
= cachefiles_read_reissue(object
, monitor
);
189 if (error
== -EINPROGRESS
)
193 cachefiles_io_error_obj(
195 "Readpage failed on backing file %lx",
196 (unsigned long) monitor
->back_page
->flags
);
200 page_cache_release(monitor
->back_page
);
202 fscache_end_io(op
, monitor
->netfs_page
, error
);
203 page_cache_release(monitor
->netfs_page
);
204 fscache_retrieval_complete(op
, 1);
205 fscache_put_retrieval(op
);
209 /* let the thread pool have some air occasionally */
211 if (max
< 0 || need_resched()) {
212 if (!list_empty(&op
->to_do
))
213 fscache_enqueue_retrieval(op
);
214 _leave(" [maxed out]");
218 spin_lock_irq(&object
->work_lock
);
221 spin_unlock_irq(&object
->work_lock
);
226 * read the corresponding page to the given set from the backing file
227 * - an uncertain page is simply discarded, to be tried again another time
229 static int cachefiles_read_backing_file_one(struct cachefiles_object
*object
,
230 struct fscache_retrieval
*op
,
231 struct page
*netpage
)
233 struct cachefiles_one_read
*monitor
;
234 struct address_space
*bmapping
;
235 struct page
*newpage
, *backpage
;
240 _debug("read back %p{%lu,%d}",
241 netpage
, netpage
->index
, page_count(netpage
));
243 monitor
= kzalloc(sizeof(*monitor
), cachefiles_gfp
);
247 monitor
->netfs_page
= netpage
;
248 monitor
->op
= fscache_get_retrieval(op
);
250 init_waitqueue_func_entry(&monitor
->monitor
, cachefiles_read_waiter
);
252 /* attempt to get hold of the backing page */
253 bmapping
= object
->backer
->d_inode
->i_mapping
;
257 backpage
= find_get_page(bmapping
, netpage
->index
);
259 goto backing_page_already_present
;
262 newpage
= __page_cache_alloc(cachefiles_gfp
|
268 ret
= add_to_page_cache(newpage
, bmapping
,
269 netpage
->index
, cachefiles_gfp
);
271 goto installed_new_backing_page
;
276 /* we've installed a new backing page, so now we need to add it
277 * to the LRU list and start it reading */
278 installed_new_backing_page
:
279 _debug("- new %p", newpage
);
284 lru_cache_add_file(backpage
);
287 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
291 /* set the monitor to transfer the data across */
292 monitor_backing_page
:
293 _debug("- monitor add");
295 /* install the monitor */
296 page_cache_get(monitor
->netfs_page
);
297 page_cache_get(backpage
);
298 monitor
->back_page
= backpage
;
299 monitor
->monitor
.private = backpage
;
300 add_page_wait_queue(backpage
, &monitor
->monitor
);
303 /* but the page may have been read before the monitor was installed, so
304 * the monitor may miss the event - so we have to ensure that we do get
305 * one in such a case */
306 if (trylock_page(backpage
)) {
307 _debug("jumpstart %p {%lx}", backpage
, backpage
->flags
);
308 unlock_page(backpage
);
312 /* if the backing page is already present, it can be in one of
313 * three states: read in progress, read failed or read okay */
314 backing_page_already_present
:
318 page_cache_release(newpage
);
322 if (PageError(backpage
))
325 if (PageUptodate(backpage
))
326 goto backing_page_already_uptodate
;
328 if (!trylock_page(backpage
))
329 goto monitor_backing_page
;
330 _debug("read %p {%lx}", backpage
, backpage
->flags
);
331 goto read_backing_page
;
333 /* the backing page is already up to date, attach the netfs
334 * page to the pagecache and LRU and copy the data across */
335 backing_page_already_uptodate
:
336 _debug("- uptodate");
338 fscache_mark_page_cached(op
, netpage
);
340 copy_highpage(netpage
, backpage
);
341 fscache_end_io(op
, netpage
, 0);
342 fscache_retrieval_complete(op
, 1);
350 page_cache_release(backpage
);
352 fscache_put_retrieval(monitor
->op
);
355 _leave(" = %d", ret
);
359 _debug("read error %d", ret
);
360 if (ret
== -ENOMEM
) {
361 fscache_retrieval_complete(op
, 1);
365 cachefiles_io_error_obj(object
, "Page read error on backing file");
366 fscache_retrieval_complete(op
, 1);
371 page_cache_release(newpage
);
373 fscache_put_retrieval(monitor
->op
);
376 fscache_retrieval_complete(op
, 1);
377 _leave(" = -ENOMEM");
382 * read a page from the cache or allocate a block in which to store it
383 * - cache withdrawal is prevented by the caller
384 * - returns -EINTR if interrupted
385 * - returns -ENOMEM if ran out of memory
386 * - returns -ENOBUFS if no buffers can be made available
387 * - returns -ENOBUFS if page is beyond EOF
388 * - if the page is backed by a block in the cache:
389 * - a read will be started which will call the callback on completion
390 * - 0 will be returned
391 * - else if the page is unbacked:
392 * - the metadata will be retained
393 * - -ENODATA will be returned
395 int cachefiles_read_or_alloc_page(struct fscache_retrieval
*op
,
399 struct cachefiles_object
*object
;
400 struct cachefiles_cache
*cache
;
401 struct pagevec pagevec
;
403 sector_t block0
, block
;
407 object
= container_of(op
->op
.object
,
408 struct cachefiles_object
, fscache
);
409 cache
= container_of(object
->fscache
.cache
,
410 struct cachefiles_cache
, cache
);
412 _enter("{%p},{%lx},,,", object
, page
->index
);
417 inode
= object
->backer
->d_inode
;
418 ASSERT(S_ISREG(inode
->i_mode
));
419 ASSERT(inode
->i_mapping
->a_ops
->bmap
);
420 ASSERT(inode
->i_mapping
->a_ops
->readpages
);
422 /* calculate the shift required to use bmap */
423 if (inode
->i_sb
->s_blocksize
> PAGE_SIZE
)
426 shift
= PAGE_SHIFT
- inode
->i_sb
->s_blocksize_bits
;
428 op
->op
.flags
&= FSCACHE_OP_KEEP_FLAGS
;
429 op
->op
.flags
|= FSCACHE_OP_ASYNC
;
430 op
->op
.processor
= cachefiles_read_copier
;
432 pagevec_init(&pagevec
, 0);
434 /* we assume the absence or presence of the first block is a good
435 * enough indication for the page as a whole
436 * - TODO: don't use bmap() for this as it is _not_ actually good
437 * enough for this as it doesn't indicate errors, but it's all we've
440 block0
= page
->index
;
443 block
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
, block0
);
444 _debug("%llx -> %llx",
445 (unsigned long long) block0
,
446 (unsigned long long) block
);
449 /* submit the apparently valid page to the backing fs to be
451 ret
= cachefiles_read_backing_file_one(object
, op
, page
);
452 } else if (cachefiles_has_space(cache
, 0, 1) == 0) {
453 /* there's space in the cache we can use */
454 fscache_mark_page_cached(op
, page
);
455 fscache_retrieval_complete(op
, 1);
461 _leave(" = %d", ret
);
465 fscache_retrieval_complete(op
, 1);
466 _leave(" = -ENOBUFS");
471 * read the corresponding pages to the given set from the backing file
472 * - any uncertain pages are simply discarded, to be tried again another time
474 static int cachefiles_read_backing_file(struct cachefiles_object
*object
,
475 struct fscache_retrieval
*op
,
476 struct list_head
*list
)
478 struct cachefiles_one_read
*monitor
= NULL
;
479 struct address_space
*bmapping
= object
->backer
->d_inode
->i_mapping
;
480 struct page
*newpage
= NULL
, *netpage
, *_n
, *backpage
= NULL
;
485 list_for_each_entry_safe(netpage
, _n
, list
, lru
) {
486 list_del(&netpage
->lru
);
488 _debug("read back %p{%lu,%d}",
489 netpage
, netpage
->index
, page_count(netpage
));
492 monitor
= kzalloc(sizeof(*monitor
), cachefiles_gfp
);
496 monitor
->op
= fscache_get_retrieval(op
);
497 init_waitqueue_func_entry(&monitor
->monitor
,
498 cachefiles_read_waiter
);
502 backpage
= find_get_page(bmapping
, netpage
->index
);
504 goto backing_page_already_present
;
507 newpage
= __page_cache_alloc(cachefiles_gfp
|
513 ret
= add_to_page_cache(newpage
, bmapping
,
514 netpage
->index
, cachefiles_gfp
);
516 goto installed_new_backing_page
;
521 /* we've installed a new backing page, so now we need to add it
522 * to the LRU list and start it reading */
523 installed_new_backing_page
:
524 _debug("- new %p", newpage
);
529 lru_cache_add_file(backpage
);
532 ret
= bmapping
->a_ops
->readpage(NULL
, backpage
);
536 /* add the netfs page to the pagecache and LRU, and set the
537 * monitor to transfer the data across */
538 monitor_backing_page
:
539 _debug("- monitor add");
541 ret
= add_to_page_cache(netpage
, op
->mapping
, netpage
->index
,
544 if (ret
== -EEXIST
) {
545 page_cache_release(netpage
);
546 fscache_retrieval_complete(op
, 1);
552 lru_cache_add_file(netpage
);
554 /* install a monitor */
555 page_cache_get(netpage
);
556 monitor
->netfs_page
= netpage
;
558 page_cache_get(backpage
);
559 monitor
->back_page
= backpage
;
560 monitor
->monitor
.private = backpage
;
561 add_page_wait_queue(backpage
, &monitor
->monitor
);
564 /* but the page may have been read before the monitor was
565 * installed, so the monitor may miss the event - so we have to
566 * ensure that we do get one in such a case */
567 if (trylock_page(backpage
)) {
568 _debug("2unlock %p {%lx}", backpage
, backpage
->flags
);
569 unlock_page(backpage
);
572 page_cache_release(backpage
);
575 page_cache_release(netpage
);
579 /* if the backing page is already present, it can be in one of
580 * three states: read in progress, read failed or read okay */
581 backing_page_already_present
:
582 _debug("- present %p", backpage
);
584 if (PageError(backpage
))
587 if (PageUptodate(backpage
))
588 goto backing_page_already_uptodate
;
590 _debug("- not ready %p{%lx}", backpage
, backpage
->flags
);
592 if (!trylock_page(backpage
))
593 goto monitor_backing_page
;
595 if (PageError(backpage
)) {
596 _debug("error %lx", backpage
->flags
);
597 unlock_page(backpage
);
601 if (PageUptodate(backpage
))
602 goto backing_page_already_uptodate_unlock
;
604 /* we've locked a page that's neither up to date nor erroneous,
605 * so we need to attempt to read it again */
606 goto reread_backing_page
;
608 /* the backing page is already up to date, attach the netfs
609 * page to the pagecache and LRU and copy the data across */
610 backing_page_already_uptodate_unlock
:
611 _debug("uptodate %lx", backpage
->flags
);
612 unlock_page(backpage
);
613 backing_page_already_uptodate
:
614 _debug("- uptodate");
616 ret
= add_to_page_cache(netpage
, op
->mapping
, netpage
->index
,
619 if (ret
== -EEXIST
) {
620 page_cache_release(netpage
);
621 fscache_retrieval_complete(op
, 1);
627 copy_highpage(netpage
, backpage
);
629 page_cache_release(backpage
);
632 fscache_mark_page_cached(op
, netpage
);
634 lru_cache_add_file(netpage
);
636 /* the netpage is unlocked and marked up to date here */
637 fscache_end_io(op
, netpage
, 0);
638 page_cache_release(netpage
);
640 fscache_retrieval_complete(op
, 1);
651 page_cache_release(newpage
);
653 page_cache_release(netpage
);
655 page_cache_release(backpage
);
657 fscache_put_retrieval(op
);
661 list_for_each_entry_safe(netpage
, _n
, list
, lru
) {
662 list_del(&netpage
->lru
);
663 page_cache_release(netpage
);
664 fscache_retrieval_complete(op
, 1);
667 _leave(" = %d", ret
);
673 goto record_page_complete
;
676 _debug("read error %d", ret
);
678 goto record_page_complete
;
680 cachefiles_io_error_obj(object
, "Page read error on backing file");
682 record_page_complete
:
683 fscache_retrieval_complete(op
, 1);
688 * read a list of pages from the cache or allocate blocks in which to store
691 int cachefiles_read_or_alloc_pages(struct fscache_retrieval
*op
,
692 struct list_head
*pages
,
696 struct cachefiles_object
*object
;
697 struct cachefiles_cache
*cache
;
698 struct list_head backpages
;
699 struct pagevec pagevec
;
701 struct page
*page
, *_n
;
702 unsigned shift
, nrbackpages
;
703 int ret
, ret2
, space
;
705 object
= container_of(op
->op
.object
,
706 struct cachefiles_object
, fscache
);
707 cache
= container_of(object
->fscache
.cache
,
708 struct cachefiles_cache
, cache
);
710 _enter("{OBJ%x,%d},,%d,,",
711 object
->fscache
.debug_id
, atomic_read(&op
->op
.usage
),
718 if (cachefiles_has_space(cache
, 0, *nr_pages
) < 0)
721 inode
= object
->backer
->d_inode
;
722 ASSERT(S_ISREG(inode
->i_mode
));
723 ASSERT(inode
->i_mapping
->a_ops
->bmap
);
724 ASSERT(inode
->i_mapping
->a_ops
->readpages
);
726 /* calculate the shift required to use bmap */
727 if (inode
->i_sb
->s_blocksize
> PAGE_SIZE
)
730 shift
= PAGE_SHIFT
- inode
->i_sb
->s_blocksize_bits
;
732 pagevec_init(&pagevec
, 0);
734 op
->op
.flags
&= FSCACHE_OP_KEEP_FLAGS
;
735 op
->op
.flags
|= FSCACHE_OP_ASYNC
;
736 op
->op
.processor
= cachefiles_read_copier
;
738 INIT_LIST_HEAD(&backpages
);
741 ret
= space
? -ENODATA
: -ENOBUFS
;
742 list_for_each_entry_safe(page
, _n
, pages
, lru
) {
743 sector_t block0
, block
;
745 /* we assume the absence or presence of the first block is a
746 * good enough indication for the page as a whole
747 * - TODO: don't use bmap() for this as it is _not_ actually
748 * good enough for this as it doesn't indicate errors, but
749 * it's all we've got for the moment
751 block0
= page
->index
;
754 block
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
,
756 _debug("%llx -> %llx",
757 (unsigned long long) block0
,
758 (unsigned long long) block
);
761 /* we have data - add it to the list to give to the
763 list_move(&page
->lru
, &backpages
);
766 } else if (space
&& pagevec_add(&pagevec
, page
) == 0) {
767 fscache_mark_pages_cached(op
, &pagevec
);
768 fscache_retrieval_complete(op
, 1);
771 fscache_retrieval_complete(op
, 1);
775 if (pagevec_count(&pagevec
) > 0)
776 fscache_mark_pages_cached(op
, &pagevec
);
778 if (list_empty(pages
))
781 /* submit the apparently valid pages to the backing fs to be read from
783 if (nrbackpages
> 0) {
784 ret2
= cachefiles_read_backing_file(object
, op
, &backpages
);
785 if (ret2
== -ENOMEM
|| ret2
== -EINTR
)
789 _leave(" = %d [nr=%u%s]",
790 ret
, *nr_pages
, list_empty(pages
) ? " empty" : "");
794 fscache_retrieval_complete(op
, *nr_pages
);
799 * allocate a block in the cache in which to store a page
800 * - cache withdrawal is prevented by the caller
801 * - returns -EINTR if interrupted
802 * - returns -ENOMEM if ran out of memory
803 * - returns -ENOBUFS if no buffers can be made available
804 * - returns -ENOBUFS if page is beyond EOF
806 * - the metadata will be retained
807 * - 0 will be returned
809 int cachefiles_allocate_page(struct fscache_retrieval
*op
,
813 struct cachefiles_object
*object
;
814 struct cachefiles_cache
*cache
;
817 object
= container_of(op
->op
.object
,
818 struct cachefiles_object
, fscache
);
819 cache
= container_of(object
->fscache
.cache
,
820 struct cachefiles_cache
, cache
);
822 _enter("%p,{%lx},", object
, page
->index
);
824 ret
= cachefiles_has_space(cache
, 0, 1);
826 fscache_mark_page_cached(op
, page
);
830 fscache_retrieval_complete(op
, 1);
831 _leave(" = %d", ret
);
836 * allocate blocks in the cache in which to store a set of pages
837 * - cache withdrawal is prevented by the caller
838 * - returns -EINTR if interrupted
839 * - returns -ENOMEM if ran out of memory
840 * - returns -ENOBUFS if some buffers couldn't be made available
841 * - returns -ENOBUFS if some pages are beyond EOF
843 * - -ENODATA will be returned
844 * - metadata will be retained for any page marked
846 int cachefiles_allocate_pages(struct fscache_retrieval
*op
,
847 struct list_head
*pages
,
851 struct cachefiles_object
*object
;
852 struct cachefiles_cache
*cache
;
853 struct pagevec pagevec
;
857 object
= container_of(op
->op
.object
,
858 struct cachefiles_object
, fscache
);
859 cache
= container_of(object
->fscache
.cache
,
860 struct cachefiles_cache
, cache
);
862 _enter("%p,,,%d,", object
, *nr_pages
);
864 ret
= cachefiles_has_space(cache
, 0, *nr_pages
);
866 pagevec_init(&pagevec
, 0);
868 list_for_each_entry(page
, pages
, lru
) {
869 if (pagevec_add(&pagevec
, page
) == 0)
870 fscache_mark_pages_cached(op
, &pagevec
);
873 if (pagevec_count(&pagevec
) > 0)
874 fscache_mark_pages_cached(op
, &pagevec
);
880 fscache_retrieval_complete(op
, *nr_pages
);
881 _leave(" = %d", ret
);
886 * request a page be stored in the cache
887 * - cache withdrawal is prevented by the caller
888 * - this request may be ignored if there's no cache block available, in which
889 * case -ENOBUFS will be returned
890 * - if the op is in progress, 0 will be returned
892 int cachefiles_write_page(struct fscache_storage
*op
, struct page
*page
)
894 struct cachefiles_object
*object
;
895 struct cachefiles_cache
*cache
;
905 ASSERT(page
!= NULL
);
907 object
= container_of(op
->op
.object
,
908 struct cachefiles_object
, fscache
);
910 _enter("%p,%p{%lx},,,", object
, page
, page
->index
);
912 if (!object
->backer
) {
913 _leave(" = -ENOBUFS");
917 ASSERT(S_ISREG(object
->backer
->d_inode
->i_mode
));
919 cache
= container_of(object
->fscache
.cache
,
920 struct cachefiles_cache
, cache
);
922 /* write the page to the backing filesystem and let it store it in its
924 path
.mnt
= cache
->mnt
;
925 path
.dentry
= object
->backer
;
926 file
= dentry_open(&path
, O_RDWR
| O_LARGEFILE
, cache
->cache_cred
);
931 if (file
->f_op
->write
) {
932 pos
= (loff_t
) page
->index
<< PAGE_SHIFT
;
934 /* we mustn't write more data than we have, so we have
935 * to beware of a partial page at EOF */
936 eof
= object
->fscache
.store_limit_l
;
938 if (eof
& ~PAGE_MASK
) {
939 ASSERTCMP(pos
, <, eof
);
940 if (eof
- pos
< PAGE_SIZE
) {
941 _debug("cut short %llx to %llx",
944 ASSERTCMP(pos
+ len
, ==, eof
);
949 file_start_write(file
);
952 ret
= file
->f_op
->write(
953 file
, (const void __user
*) data
, len
, &pos
);
956 file_end_write(file
);
965 cachefiles_io_error_obj(
966 object
, "Write page to backing file failed");
970 _leave(" = %d", ret
);
975 * detach a backing block from a page
976 * - cache withdrawal is prevented by the caller
978 void cachefiles_uncache_page(struct fscache_object
*_object
, struct page
*page
)
980 struct cachefiles_object
*object
;
981 struct cachefiles_cache
*cache
;
983 object
= container_of(_object
, struct cachefiles_object
, fscache
);
984 cache
= container_of(object
->fscache
.cache
,
985 struct cachefiles_cache
, cache
);
987 _enter("%p,{%lu}", object
, page
->index
);
989 spin_unlock(&object
->fscache
.cookie
->lock
);