1 /* handling of writes to regular files and writing back to the server
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/backing-dev.h>
13 #include <linux/slab.h>
15 #include <linux/pagemap.h>
16 #include <linux/writeback.h>
17 #include <linux/pagevec.h>
21 * mark a page as having been made dirty and thus needing writeback
23 int afs_set_page_dirty(struct page
*page
)
26 return __set_page_dirty_nobuffers(page
);
30 * partly or wholly fill a page that's under preparation for writing
32 static int afs_fill_page(struct afs_vnode
*vnode
, struct key
*key
,
33 loff_t pos
, unsigned int len
, struct page
*page
)
38 _enter(",,%llu", (unsigned long long)pos
);
40 req
= kzalloc(sizeof(struct afs_read
) + sizeof(struct page
*),
45 refcount_set(&req
->usage
, 1);
49 req
->pages
= req
->array
;
53 ret
= afs_fetch_data(vnode
, key
, req
);
57 _debug("got NOENT from server"
58 " - marking file deleted and stale");
59 set_bit(AFS_VNODE_DELETED
, &vnode
->flags
);
69 * prepare to perform part of a write to a page
71 int afs_write_begin(struct file
*file
, struct address_space
*mapping
,
72 loff_t pos
, unsigned len
, unsigned flags
,
73 struct page
**pagep
, void **fsdata
)
75 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
77 struct key
*key
= afs_file_key(file
);
79 unsigned f
, from
= pos
& (PAGE_SIZE
- 1);
80 unsigned t
, to
= from
+ len
;
81 pgoff_t index
= pos
>> PAGE_SHIFT
;
84 _enter("{%x:%u},{%lx},%u,%u",
85 vnode
->fid
.vid
, vnode
->fid
.vnode
, index
, from
, to
);
87 /* We want to store information about how much of a page is altered in
90 BUILD_BUG_ON(PAGE_SIZE
> 32768 && sizeof(page
->private) < 8);
92 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
96 if (!PageUptodate(page
) && len
!= PAGE_SIZE
) {
97 ret
= afs_fill_page(vnode
, key
, pos
& PAGE_MASK
, PAGE_SIZE
, page
);
101 _leave(" = %d [prep]", ret
);
104 SetPageUptodate(page
);
107 /* page won't leak in error case: it eventually gets cleaned off LRU */
111 /* See if this page is already partially written in a way that we can
112 * merge the new write with.
115 if (PagePrivate(page
)) {
116 priv
= page_private(page
);
117 f
= priv
& AFS_PRIV_MAX
;
118 t
= priv
>> AFS_PRIV_SHIFT
;
123 if (PageWriteback(page
)) {
124 trace_afs_page_dirty(vnode
, tracepoint_string("alrdy"),
126 goto flush_conflicting_write
;
128 /* If the file is being filled locally, allow inter-write
129 * spaces to be merged into writes. If it's not, only write
130 * back what the user gives us.
132 if (!test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
) &&
133 (to
< f
|| from
> t
))
134 goto flush_conflicting_write
;
144 priv
= (unsigned long)t
<< AFS_PRIV_SHIFT
;
146 trace_afs_page_dirty(vnode
, tracepoint_string("begin"),
148 SetPagePrivate(page
);
149 set_page_private(page
, priv
);
153 /* The previous write and this write aren't adjacent or overlapping, so
154 * flush the page out.
156 flush_conflicting_write
:
157 _debug("flush conflict");
158 ret
= write_one_page(page
);
160 _leave(" = %d", ret
);
164 ret
= lock_page_killable(page
);
166 _leave(" = %d", ret
);
173 * finalise part of a write to a page
175 int afs_write_end(struct file
*file
, struct address_space
*mapping
,
176 loff_t pos
, unsigned len
, unsigned copied
,
177 struct page
*page
, void *fsdata
)
179 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
180 struct key
*key
= afs_file_key(file
);
181 loff_t i_size
, maybe_i_size
;
184 _enter("{%x:%u},{%lx}",
185 vnode
->fid
.vid
, vnode
->fid
.vnode
, page
->index
);
187 maybe_i_size
= pos
+ copied
;
189 i_size
= i_size_read(&vnode
->vfs_inode
);
190 if (maybe_i_size
> i_size
) {
191 spin_lock(&vnode
->wb_lock
);
192 i_size
= i_size_read(&vnode
->vfs_inode
);
193 if (maybe_i_size
> i_size
)
194 i_size_write(&vnode
->vfs_inode
, maybe_i_size
);
195 spin_unlock(&vnode
->wb_lock
);
198 if (!PageUptodate(page
)) {
200 /* Try and load any missing data from the server. The
201 * unmarshalling routine will take care of clearing any
202 * bits that are beyond the EOF.
204 ret
= afs_fill_page(vnode
, key
, pos
+ copied
,
209 SetPageUptodate(page
);
212 set_page_dirty(page
);
224 * kill all the pages in the given range
226 static void afs_kill_pages(struct address_space
*mapping
,
227 pgoff_t first
, pgoff_t last
)
229 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
231 unsigned count
, loop
;
233 _enter("{%x:%u},%lx-%lx",
234 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
239 _debug("kill %lx-%lx", first
, last
);
241 count
= last
- first
+ 1;
242 if (count
> PAGEVEC_SIZE
)
243 count
= PAGEVEC_SIZE
;
244 pv
.nr
= find_get_pages_contig(mapping
, first
, count
, pv
.pages
);
245 ASSERTCMP(pv
.nr
, ==, count
);
247 for (loop
= 0; loop
< count
; loop
++) {
248 struct page
*page
= pv
.pages
[loop
];
249 ClearPageUptodate(page
);
251 end_page_writeback(page
);
252 if (page
->index
>= first
)
253 first
= page
->index
+ 1;
255 generic_error_remove_page(mapping
, page
);
258 __pagevec_release(&pv
);
259 } while (first
<= last
);
265 * Redirty all the pages in a given range.
267 static void afs_redirty_pages(struct writeback_control
*wbc
,
268 struct address_space
*mapping
,
269 pgoff_t first
, pgoff_t last
)
271 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
273 unsigned count
, loop
;
275 _enter("{%x:%u},%lx-%lx",
276 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
281 _debug("redirty %lx-%lx", first
, last
);
283 count
= last
- first
+ 1;
284 if (count
> PAGEVEC_SIZE
)
285 count
= PAGEVEC_SIZE
;
286 pv
.nr
= find_get_pages_contig(mapping
, first
, count
, pv
.pages
);
287 ASSERTCMP(pv
.nr
, ==, count
);
289 for (loop
= 0; loop
< count
; loop
++) {
290 struct page
*page
= pv
.pages
[loop
];
292 redirty_page_for_writepage(wbc
, page
);
293 end_page_writeback(page
);
294 if (page
->index
>= first
)
295 first
= page
->index
+ 1;
298 __pagevec_release(&pv
);
299 } while (first
<= last
);
307 static int afs_store_data(struct address_space
*mapping
,
308 pgoff_t first
, pgoff_t last
,
309 unsigned offset
, unsigned to
)
311 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
312 struct afs_fs_cursor fc
;
313 struct afs_wb_key
*wbk
= NULL
;
315 int ret
= -ENOKEY
, ret2
;
317 _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
322 first
, last
, offset
, to
);
324 spin_lock(&vnode
->wb_lock
);
325 p
= vnode
->wb_keys
.next
;
327 /* Iterate through the list looking for a valid key to use. */
329 while (p
!= &vnode
->wb_keys
) {
330 wbk
= list_entry(p
, struct afs_wb_key
, vnode_link
);
331 _debug("wbk %u", key_serial(wbk
->key
));
332 ret2
= key_validate(wbk
->key
);
340 spin_unlock(&vnode
->wb_lock
);
342 _leave(" = %d [no keys]", ret
);
346 refcount_inc(&wbk
->usage
);
347 spin_unlock(&vnode
->wb_lock
);
349 _debug("USE WB KEY %u", key_serial(wbk
->key
));
352 if (afs_begin_vnode_operation(&fc
, vnode
, wbk
->key
)) {
353 while (afs_select_fileserver(&fc
)) {
354 fc
.cb_break
= afs_calc_vnode_cb_break(vnode
);
355 afs_fs_store_data(&fc
, mapping
, first
, last
, offset
, to
);
358 afs_check_for_remote_deletion(&fc
, fc
.vnode
);
359 afs_vnode_commit_status(&fc
, vnode
, fc
.cb_break
);
360 ret
= afs_end_vnode_operation(&fc
);
365 afs_stat_v(vnode
, n_stores
);
366 atomic_long_add((last
* PAGE_SIZE
+ to
) -
367 (first
* PAGE_SIZE
+ offset
),
368 &afs_v2net(vnode
)->n_store_bytes
);
377 spin_lock(&vnode
->wb_lock
);
378 p
= wbk
->vnode_link
.next
;
384 _leave(" = %d", ret
);
389 * Synchronously write back the locked page and any subsequent non-locked dirty
392 static int afs_write_back_from_locked_page(struct address_space
*mapping
,
393 struct writeback_control
*wbc
,
394 struct page
*primary_page
,
397 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
398 struct page
*pages
[8], *page
;
399 unsigned long count
, priv
;
400 unsigned n
, offset
, to
, f
, t
;
401 pgoff_t start
, first
, last
;
404 _enter(",%lx", primary_page
->index
);
407 if (test_set_page_writeback(primary_page
))
410 /* Find all consecutive lockable dirty pages that have contiguous
411 * written regions, stopping when we find a page that is not
412 * immediately lockable, is not dirty or is missing, or we reach the
415 start
= primary_page
->index
;
416 priv
= page_private(primary_page
);
417 offset
= priv
& AFS_PRIV_MAX
;
418 to
= priv
>> AFS_PRIV_SHIFT
;
419 trace_afs_page_dirty(vnode
, tracepoint_string("store"),
420 primary_page
->index
, priv
);
422 WARN_ON(offset
== to
);
424 trace_afs_page_dirty(vnode
, tracepoint_string("WARN"),
425 primary_page
->index
, priv
);
427 if (start
>= final_page
||
428 (to
< PAGE_SIZE
&& !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
)))
433 _debug("more %lx [%lx]", start
, count
);
434 n
= final_page
- start
+ 1;
435 if (n
> ARRAY_SIZE(pages
))
436 n
= ARRAY_SIZE(pages
);
437 n
= find_get_pages_contig(mapping
, start
, ARRAY_SIZE(pages
), pages
);
438 _debug("fgpc %u", n
);
441 if (pages
[0]->index
!= start
) {
443 put_page(pages
[--n
]);
448 for (loop
= 0; loop
< n
; loop
++) {
450 if (to
!= PAGE_SIZE
&&
451 !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
))
453 if (page
->index
> final_page
)
455 if (!trylock_page(page
))
457 if (!PageDirty(page
) || PageWriteback(page
)) {
462 priv
= page_private(page
);
463 f
= priv
& AFS_PRIV_MAX
;
464 t
= priv
>> AFS_PRIV_SHIFT
;
466 !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
)) {
472 trace_afs_page_dirty(vnode
, tracepoint_string("store+"),
475 if (!clear_page_dirty_for_io(page
))
477 if (test_set_page_writeback(page
))
484 for (; loop
< n
; loop
++)
485 put_page(pages
[loop
]);
490 } while (start
<= final_page
&& count
< 65536);
493 /* We now have a contiguous set of dirty pages, each with writeback
494 * set; the first page is still locked at this point, but all the rest
495 * have been unlocked.
497 unlock_page(primary_page
);
499 first
= primary_page
->index
;
500 last
= first
+ count
- 1;
502 _debug("write back %lx[%u..] to %lx[..%u]", first
, offset
, last
, to
);
504 ret
= afs_store_data(mapping
, first
, last
, offset
, to
);
511 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret
);
519 afs_redirty_pages(wbc
, mapping
, first
, last
);
520 mapping_set_error(mapping
, ret
);
525 afs_redirty_pages(wbc
, mapping
, first
, last
);
526 mapping_set_error(mapping
, -ENOSPC
);
536 afs_kill_pages(mapping
, first
, last
);
537 mapping_set_error(mapping
, ret
);
541 _leave(" = %d", ret
);
546 * write a page back to the server
547 * - the caller locked the page for us
549 int afs_writepage(struct page
*page
, struct writeback_control
*wbc
)
553 _enter("{%lx},", page
->index
);
555 ret
= afs_write_back_from_locked_page(page
->mapping
, wbc
, page
,
556 wbc
->range_end
>> PAGE_SHIFT
);
558 _leave(" = %d", ret
);
562 wbc
->nr_to_write
-= ret
;
569 * write a region of pages back to the server
571 static int afs_writepages_region(struct address_space
*mapping
,
572 struct writeback_control
*wbc
,
573 pgoff_t index
, pgoff_t end
, pgoff_t
*_next
)
578 _enter(",,%lx,%lx,", index
, end
);
581 n
= find_get_pages_range_tag(mapping
, &index
, end
,
582 PAGECACHE_TAG_DIRTY
, 1, &page
);
586 _debug("wback %lx", page
->index
);
589 * at this point we hold neither the i_pages lock nor the
590 * page lock: the page may be truncated or invalidated
591 * (changing page->mapping to NULL), or even swizzled
592 * back from swapper_space to tmpfs file mapping
594 ret
= lock_page_killable(page
);
597 _leave(" = %d", ret
);
601 if (page
->mapping
!= mapping
|| !PageDirty(page
)) {
607 if (PageWriteback(page
)) {
609 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
610 wait_on_page_writeback(page
);
615 if (!clear_page_dirty_for_io(page
))
617 ret
= afs_write_back_from_locked_page(mapping
, wbc
, page
, end
);
620 _leave(" = %d", ret
);
624 wbc
->nr_to_write
-= ret
;
627 } while (index
< end
&& wbc
->nr_to_write
> 0);
630 _leave(" = 0 [%lx]", *_next
);
635 * write some of the pending data back to the server
637 int afs_writepages(struct address_space
*mapping
,
638 struct writeback_control
*wbc
)
640 pgoff_t start
, end
, next
;
645 if (wbc
->range_cyclic
) {
646 start
= mapping
->writeback_index
;
648 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
649 if (start
> 0 && wbc
->nr_to_write
> 0 && ret
== 0)
650 ret
= afs_writepages_region(mapping
, wbc
, 0, start
,
652 mapping
->writeback_index
= next
;
653 } else if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
) {
654 end
= (pgoff_t
)(LLONG_MAX
>> PAGE_SHIFT
);
655 ret
= afs_writepages_region(mapping
, wbc
, 0, end
, &next
);
656 if (wbc
->nr_to_write
> 0)
657 mapping
->writeback_index
= next
;
659 start
= wbc
->range_start
>> PAGE_SHIFT
;
660 end
= wbc
->range_end
>> PAGE_SHIFT
;
661 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
664 _leave(" = %d", ret
);
669 * completion of write to server
671 void afs_pages_written_back(struct afs_vnode
*vnode
, struct afs_call
*call
)
675 unsigned count
, loop
;
676 pgoff_t first
= call
->first
, last
= call
->last
;
678 _enter("{%x:%u},{%lx-%lx}",
679 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
684 _debug("done %lx-%lx", first
, last
);
686 count
= last
- first
+ 1;
687 if (count
> PAGEVEC_SIZE
)
688 count
= PAGEVEC_SIZE
;
689 pv
.nr
= find_get_pages_contig(vnode
->vfs_inode
.i_mapping
,
690 first
, count
, pv
.pages
);
691 ASSERTCMP(pv
.nr
, ==, count
);
693 for (loop
= 0; loop
< count
; loop
++) {
694 priv
= page_private(pv
.pages
[loop
]);
695 trace_afs_page_dirty(vnode
, tracepoint_string("clear"),
696 pv
.pages
[loop
]->index
, priv
);
697 set_page_private(pv
.pages
[loop
], 0);
698 end_page_writeback(pv
.pages
[loop
]);
701 __pagevec_release(&pv
);
702 } while (first
<= last
);
704 afs_prune_wb_keys(vnode
);
709 * write to an AFS file
711 ssize_t
afs_file_write(struct kiocb
*iocb
, struct iov_iter
*from
)
713 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(iocb
->ki_filp
));
715 size_t count
= iov_iter_count(from
);
717 _enter("{%x.%u},{%zu},",
718 vnode
->fid
.vid
, vnode
->fid
.vnode
, count
);
720 if (IS_SWAPFILE(&vnode
->vfs_inode
)) {
722 "AFS: Attempt to write to active swap file!\n");
729 result
= generic_file_write_iter(iocb
, from
);
731 _leave(" = %zd", result
);
736 * flush any dirty pages for this process, and check for write errors.
737 * - the return status from this call provides a reliable indication of
738 * whether any write errors occurred for this process.
740 int afs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
742 struct inode
*inode
= file_inode(file
);
743 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
745 _enter("{%x:%u},{n=%pD},%d",
746 vnode
->fid
.vid
, vnode
->fid
.vnode
, file
,
749 return file_write_and_wait_range(file
, start
, end
);
753 * notification that a previously read-only page is about to become writable
754 * - if it returns an error, the caller will deliver a bus error signal
756 vm_fault_t
afs_page_mkwrite(struct vm_fault
*vmf
)
758 struct file
*file
= vmf
->vma
->vm_file
;
759 struct inode
*inode
= file_inode(file
);
760 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
763 _enter("{{%x:%u}},{%lx}",
764 vnode
->fid
.vid
, vnode
->fid
.vnode
, vmf
->page
->index
);
766 sb_start_pagefault(inode
->i_sb
);
768 /* Wait for the page to be written to the cache before we allow it to
769 * be modified. We then assume the entire page will need writing back.
771 #ifdef CONFIG_AFS_FSCACHE
772 fscache_wait_on_page_write(vnode
->cache
, vmf
->page
);
775 if (PageWriteback(vmf
->page
) &&
776 wait_on_page_bit_killable(vmf
->page
, PG_writeback
) < 0)
777 return VM_FAULT_RETRY
;
779 if (lock_page_killable(vmf
->page
) < 0)
780 return VM_FAULT_RETRY
;
782 /* We mustn't change page->private until writeback is complete as that
783 * details the portion of the page we need to write back and we might
784 * need to redirty the page if there's a problem.
786 wait_on_page_writeback(vmf
->page
);
788 priv
= (unsigned long)PAGE_SIZE
<< AFS_PRIV_SHIFT
; /* To */
789 priv
|= 0; /* From */
790 trace_afs_page_dirty(vnode
, tracepoint_string("mkwrite"),
791 vmf
->page
->index
, priv
);
792 SetPagePrivate(vmf
->page
);
793 set_page_private(vmf
->page
, priv
);
795 sb_end_pagefault(inode
->i_sb
);
796 return VM_FAULT_LOCKED
;
800 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
802 void afs_prune_wb_keys(struct afs_vnode
*vnode
)
804 LIST_HEAD(graveyard
);
805 struct afs_wb_key
*wbk
, *tmp
;
807 /* Discard unused keys */
808 spin_lock(&vnode
->wb_lock
);
810 if (!mapping_tagged(&vnode
->vfs_inode
.i_data
, PAGECACHE_TAG_WRITEBACK
) &&
811 !mapping_tagged(&vnode
->vfs_inode
.i_data
, PAGECACHE_TAG_DIRTY
)) {
812 list_for_each_entry_safe(wbk
, tmp
, &vnode
->wb_keys
, vnode_link
) {
813 if (refcount_read(&wbk
->usage
) == 1)
814 list_move(&wbk
->vnode_link
, &graveyard
);
818 spin_unlock(&vnode
->wb_lock
);
820 while (!list_empty(&graveyard
)) {
821 wbk
= list_entry(graveyard
.next
, struct afs_wb_key
, vnode_link
);
822 list_del(&wbk
->vnode_link
);
828 * Clean up a page during invalidation.
830 int afs_launder_page(struct page
*page
)
832 struct address_space
*mapping
= page
->mapping
;
833 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
838 _enter("{%lx}", page
->index
);
840 priv
= page_private(page
);
841 if (clear_page_dirty_for_io(page
)) {
844 if (PagePrivate(page
)) {
845 f
= priv
& AFS_PRIV_MAX
;
846 t
= priv
>> AFS_PRIV_SHIFT
;
849 trace_afs_page_dirty(vnode
, tracepoint_string("launder"),
851 ret
= afs_store_data(mapping
, page
->index
, page
->index
, t
, f
);
854 trace_afs_page_dirty(vnode
, tracepoint_string("laundered"),
856 set_page_private(page
, 0);
857 ClearPagePrivate(page
);
859 #ifdef CONFIG_AFS_FSCACHE
860 if (PageFsCache(page
)) {
861 fscache_wait_on_page_write(vnode
->cache
, page
);
862 fscache_uncache_page(vnode
->cache
, page
);