1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
17 * mark a page as having been made dirty and thus needing writeback
19 int afs_set_page_dirty(struct page
*page
)
22 return __set_page_dirty_nobuffers(page
);
26 * partly or wholly fill a page that's under preparation for writing
28 static int afs_fill_page(struct afs_vnode
*vnode
, struct key
*key
,
29 loff_t pos
, unsigned int len
, struct page
*page
)
36 _enter(",,%llu", (unsigned long long)pos
);
38 if (pos
>= vnode
->vfs_inode
.i_size
) {
40 ASSERTCMP(p
+ len
, <=, PAGE_SIZE
);
42 memset(data
+ p
, 0, len
);
47 req
= kzalloc(struct_size(req
, array
, 1), GFP_KERNEL
);
51 refcount_set(&req
->usage
, 1);
55 req
->pages
= req
->array
;
59 ret
= afs_fetch_data(vnode
, key
, req
);
63 _debug("got NOENT from server"
64 " - marking file deleted and stale");
65 set_bit(AFS_VNODE_DELETED
, &vnode
->flags
);
75 * prepare to perform part of a write to a page
77 int afs_write_begin(struct file
*file
, struct address_space
*mapping
,
78 loff_t pos
, unsigned len
, unsigned flags
,
79 struct page
**pagep
, void **fsdata
)
81 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
83 struct key
*key
= afs_file_key(file
);
85 unsigned f
, from
= pos
& (PAGE_SIZE
- 1);
86 unsigned t
, to
= from
+ len
;
87 pgoff_t index
= pos
>> PAGE_SHIFT
;
90 _enter("{%llx:%llu},{%lx},%u,%u",
91 vnode
->fid
.vid
, vnode
->fid
.vnode
, index
, from
, to
);
93 /* We want to store information about how much of a page is altered in
96 BUILD_BUG_ON(PAGE_SIZE
> 32768 && sizeof(page
->private) < 8);
98 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
102 if (!PageUptodate(page
) && len
!= PAGE_SIZE
) {
103 ret
= afs_fill_page(vnode
, key
, pos
& PAGE_MASK
, PAGE_SIZE
, page
);
107 _leave(" = %d [prep]", ret
);
110 SetPageUptodate(page
);
113 /* page won't leak in error case: it eventually gets cleaned off LRU */
117 /* See if this page is already partially written in a way that we can
118 * merge the new write with.
121 if (PagePrivate(page
)) {
122 priv
= page_private(page
);
123 f
= priv
& AFS_PRIV_MAX
;
124 t
= priv
>> AFS_PRIV_SHIFT
;
129 if (PageWriteback(page
)) {
130 trace_afs_page_dirty(vnode
, tracepoint_string("alrdy"),
132 goto flush_conflicting_write
;
134 /* If the file is being filled locally, allow inter-write
135 * spaces to be merged into writes. If it's not, only write
136 * back what the user gives us.
138 if (!test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
) &&
139 (to
< f
|| from
> t
))
140 goto flush_conflicting_write
;
150 priv
= (unsigned long)t
<< AFS_PRIV_SHIFT
;
152 trace_afs_page_dirty(vnode
, tracepoint_string("begin"),
154 SetPagePrivate(page
);
155 set_page_private(page
, priv
);
159 /* The previous write and this write aren't adjacent or overlapping, so
160 * flush the page out.
162 flush_conflicting_write
:
163 _debug("flush conflict");
164 ret
= write_one_page(page
);
166 _leave(" = %d", ret
);
170 ret
= lock_page_killable(page
);
172 _leave(" = %d", ret
);
179 * finalise part of a write to a page
181 int afs_write_end(struct file
*file
, struct address_space
*mapping
,
182 loff_t pos
, unsigned len
, unsigned copied
,
183 struct page
*page
, void *fsdata
)
185 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
186 struct key
*key
= afs_file_key(file
);
187 loff_t i_size
, maybe_i_size
;
190 _enter("{%llx:%llu},{%lx}",
191 vnode
->fid
.vid
, vnode
->fid
.vnode
, page
->index
);
193 maybe_i_size
= pos
+ copied
;
195 i_size
= i_size_read(&vnode
->vfs_inode
);
196 if (maybe_i_size
> i_size
) {
197 write_seqlock(&vnode
->cb_lock
);
198 i_size
= i_size_read(&vnode
->vfs_inode
);
199 if (maybe_i_size
> i_size
)
200 i_size_write(&vnode
->vfs_inode
, maybe_i_size
);
201 write_sequnlock(&vnode
->cb_lock
);
204 if (!PageUptodate(page
)) {
206 /* Try and load any missing data from the server. The
207 * unmarshalling routine will take care of clearing any
208 * bits that are beyond the EOF.
210 ret
= afs_fill_page(vnode
, key
, pos
+ copied
,
215 SetPageUptodate(page
);
218 set_page_dirty(page
);
230 * kill all the pages in the given range
232 static void afs_kill_pages(struct address_space
*mapping
,
233 pgoff_t first
, pgoff_t last
)
235 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
237 unsigned count
, loop
;
239 _enter("{%llx:%llu},%lx-%lx",
240 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
245 _debug("kill %lx-%lx", first
, last
);
247 count
= last
- first
+ 1;
248 if (count
> PAGEVEC_SIZE
)
249 count
= PAGEVEC_SIZE
;
250 pv
.nr
= find_get_pages_contig(mapping
, first
, count
, pv
.pages
);
251 ASSERTCMP(pv
.nr
, ==, count
);
253 for (loop
= 0; loop
< count
; loop
++) {
254 struct page
*page
= pv
.pages
[loop
];
255 ClearPageUptodate(page
);
257 end_page_writeback(page
);
258 if (page
->index
>= first
)
259 first
= page
->index
+ 1;
261 generic_error_remove_page(mapping
, page
);
265 __pagevec_release(&pv
);
266 } while (first
<= last
);
272 * Redirty all the pages in a given range.
274 static void afs_redirty_pages(struct writeback_control
*wbc
,
275 struct address_space
*mapping
,
276 pgoff_t first
, pgoff_t last
)
278 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
280 unsigned count
, loop
;
282 _enter("{%llx:%llu},%lx-%lx",
283 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
288 _debug("redirty %lx-%lx", first
, last
);
290 count
= last
- first
+ 1;
291 if (count
> PAGEVEC_SIZE
)
292 count
= PAGEVEC_SIZE
;
293 pv
.nr
= find_get_pages_contig(mapping
, first
, count
, pv
.pages
);
294 ASSERTCMP(pv
.nr
, ==, count
);
296 for (loop
= 0; loop
< count
; loop
++) {
297 struct page
*page
= pv
.pages
[loop
];
299 redirty_page_for_writepage(wbc
, page
);
300 end_page_writeback(page
);
301 if (page
->index
>= first
)
302 first
= page
->index
+ 1;
305 __pagevec_release(&pv
);
306 } while (first
<= last
);
312 * completion of write to server
314 static void afs_pages_written_back(struct afs_vnode
*vnode
,
315 pgoff_t first
, pgoff_t last
)
319 unsigned count
, loop
;
321 _enter("{%llx:%llu},{%lx-%lx}",
322 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
327 _debug("done %lx-%lx", first
, last
);
329 count
= last
- first
+ 1;
330 if (count
> PAGEVEC_SIZE
)
331 count
= PAGEVEC_SIZE
;
332 pv
.nr
= find_get_pages_contig(vnode
->vfs_inode
.i_mapping
,
333 first
, count
, pv
.pages
);
334 ASSERTCMP(pv
.nr
, ==, count
);
336 for (loop
= 0; loop
< count
; loop
++) {
337 priv
= page_private(pv
.pages
[loop
]);
338 trace_afs_page_dirty(vnode
, tracepoint_string("clear"),
339 pv
.pages
[loop
]->index
, priv
);
340 set_page_private(pv
.pages
[loop
], 0);
341 end_page_writeback(pv
.pages
[loop
]);
344 __pagevec_release(&pv
);
345 } while (first
<= last
);
347 afs_prune_wb_keys(vnode
);
354 static int afs_store_data(struct address_space
*mapping
,
355 pgoff_t first
, pgoff_t last
,
356 unsigned offset
, unsigned to
)
358 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
359 struct afs_fs_cursor fc
;
360 struct afs_status_cb
*scb
;
361 struct afs_wb_key
*wbk
= NULL
;
363 int ret
= -ENOKEY
, ret2
;
365 _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
370 first
, last
, offset
, to
);
372 scb
= kzalloc(sizeof(struct afs_status_cb
), GFP_NOFS
);
376 spin_lock(&vnode
->wb_lock
);
377 p
= vnode
->wb_keys
.next
;
379 /* Iterate through the list looking for a valid key to use. */
381 while (p
!= &vnode
->wb_keys
) {
382 wbk
= list_entry(p
, struct afs_wb_key
, vnode_link
);
383 _debug("wbk %u", key_serial(wbk
->key
));
384 ret2
= key_validate(wbk
->key
);
392 spin_unlock(&vnode
->wb_lock
);
395 _leave(" = %d [no keys]", ret
);
399 refcount_inc(&wbk
->usage
);
400 spin_unlock(&vnode
->wb_lock
);
402 _debug("USE WB KEY %u", key_serial(wbk
->key
));
405 if (afs_begin_vnode_operation(&fc
, vnode
, wbk
->key
, false)) {
406 afs_dataversion_t data_version
= vnode
->status
.data_version
+ 1;
408 while (afs_select_fileserver(&fc
)) {
409 fc
.cb_break
= afs_calc_vnode_cb_break(vnode
);
410 afs_fs_store_data(&fc
, mapping
, first
, last
, offset
, to
, scb
);
413 afs_check_for_remote_deletion(&fc
, vnode
);
414 afs_vnode_commit_status(&fc
, vnode
, fc
.cb_break
,
416 if (fc
.ac
.error
== 0)
417 afs_pages_written_back(vnode
, first
, last
);
418 ret
= afs_end_vnode_operation(&fc
);
423 afs_stat_v(vnode
, n_stores
);
424 atomic_long_add((last
* PAGE_SIZE
+ to
) -
425 (first
* PAGE_SIZE
+ offset
),
426 &afs_v2net(vnode
)->n_store_bytes
);
435 spin_lock(&vnode
->wb_lock
);
436 p
= wbk
->vnode_link
.next
;
443 _leave(" = %d", ret
);
448 * Synchronously write back the locked page and any subsequent non-locked dirty
451 static int afs_write_back_from_locked_page(struct address_space
*mapping
,
452 struct writeback_control
*wbc
,
453 struct page
*primary_page
,
456 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
457 struct page
*pages
[8], *page
;
458 unsigned long count
, priv
;
459 unsigned n
, offset
, to
, f
, t
;
460 pgoff_t start
, first
, last
;
463 _enter(",%lx", primary_page
->index
);
466 if (test_set_page_writeback(primary_page
))
469 /* Find all consecutive lockable dirty pages that have contiguous
470 * written regions, stopping when we find a page that is not
471 * immediately lockable, is not dirty or is missing, or we reach the
474 start
= primary_page
->index
;
475 priv
= page_private(primary_page
);
476 offset
= priv
& AFS_PRIV_MAX
;
477 to
= priv
>> AFS_PRIV_SHIFT
;
478 trace_afs_page_dirty(vnode
, tracepoint_string("store"),
479 primary_page
->index
, priv
);
481 WARN_ON(offset
== to
);
483 trace_afs_page_dirty(vnode
, tracepoint_string("WARN"),
484 primary_page
->index
, priv
);
486 if (start
>= final_page
||
487 (to
< PAGE_SIZE
&& !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
)))
492 _debug("more %lx [%lx]", start
, count
);
493 n
= final_page
- start
+ 1;
494 if (n
> ARRAY_SIZE(pages
))
495 n
= ARRAY_SIZE(pages
);
496 n
= find_get_pages_contig(mapping
, start
, ARRAY_SIZE(pages
), pages
);
497 _debug("fgpc %u", n
);
500 if (pages
[0]->index
!= start
) {
502 put_page(pages
[--n
]);
507 for (loop
= 0; loop
< n
; loop
++) {
509 if (to
!= PAGE_SIZE
&&
510 !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
))
512 if (page
->index
> final_page
)
514 if (!trylock_page(page
))
516 if (!PageDirty(page
) || PageWriteback(page
)) {
521 priv
= page_private(page
);
522 f
= priv
& AFS_PRIV_MAX
;
523 t
= priv
>> AFS_PRIV_SHIFT
;
525 !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
)) {
531 trace_afs_page_dirty(vnode
, tracepoint_string("store+"),
534 if (!clear_page_dirty_for_io(page
))
536 if (test_set_page_writeback(page
))
543 for (; loop
< n
; loop
++)
544 put_page(pages
[loop
]);
549 } while (start
<= final_page
&& count
< 65536);
552 /* We now have a contiguous set of dirty pages, each with writeback
553 * set; the first page is still locked at this point, but all the rest
554 * have been unlocked.
556 unlock_page(primary_page
);
558 first
= primary_page
->index
;
559 last
= first
+ count
- 1;
561 _debug("write back %lx[%u..] to %lx[..%u]", first
, offset
, last
, to
);
563 ret
= afs_store_data(mapping
, first
, last
, offset
, to
);
570 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret
);
578 afs_redirty_pages(wbc
, mapping
, first
, last
);
579 mapping_set_error(mapping
, ret
);
584 afs_redirty_pages(wbc
, mapping
, first
, last
);
585 mapping_set_error(mapping
, -ENOSPC
);
595 trace_afs_file_error(vnode
, ret
, afs_file_error_writeback_fail
);
596 afs_kill_pages(mapping
, first
, last
);
597 mapping_set_error(mapping
, ret
);
601 _leave(" = %d", ret
);
606 * write a page back to the server
607 * - the caller locked the page for us
609 int afs_writepage(struct page
*page
, struct writeback_control
*wbc
)
613 _enter("{%lx},", page
->index
);
615 ret
= afs_write_back_from_locked_page(page
->mapping
, wbc
, page
,
616 wbc
->range_end
>> PAGE_SHIFT
);
618 _leave(" = %d", ret
);
622 wbc
->nr_to_write
-= ret
;
629 * write a region of pages back to the server
631 static int afs_writepages_region(struct address_space
*mapping
,
632 struct writeback_control
*wbc
,
633 pgoff_t index
, pgoff_t end
, pgoff_t
*_next
)
638 _enter(",,%lx,%lx,", index
, end
);
641 n
= find_get_pages_range_tag(mapping
, &index
, end
,
642 PAGECACHE_TAG_DIRTY
, 1, &page
);
646 _debug("wback %lx", page
->index
);
649 * at this point we hold neither the i_pages lock nor the
650 * page lock: the page may be truncated or invalidated
651 * (changing page->mapping to NULL), or even swizzled
652 * back from swapper_space to tmpfs file mapping
654 ret
= lock_page_killable(page
);
657 _leave(" = %d", ret
);
661 if (page
->mapping
!= mapping
|| !PageDirty(page
)) {
667 if (PageWriteback(page
)) {
669 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
670 wait_on_page_writeback(page
);
675 if (!clear_page_dirty_for_io(page
))
677 ret
= afs_write_back_from_locked_page(mapping
, wbc
, page
, end
);
680 _leave(" = %d", ret
);
684 wbc
->nr_to_write
-= ret
;
687 } while (index
< end
&& wbc
->nr_to_write
> 0);
690 _leave(" = 0 [%lx]", *_next
);
695 * write some of the pending data back to the server
697 int afs_writepages(struct address_space
*mapping
,
698 struct writeback_control
*wbc
)
700 pgoff_t start
, end
, next
;
705 if (wbc
->range_cyclic
) {
706 start
= mapping
->writeback_index
;
708 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
709 if (start
> 0 && wbc
->nr_to_write
> 0 && ret
== 0)
710 ret
= afs_writepages_region(mapping
, wbc
, 0, start
,
712 mapping
->writeback_index
= next
;
713 } else if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
) {
714 end
= (pgoff_t
)(LLONG_MAX
>> PAGE_SHIFT
);
715 ret
= afs_writepages_region(mapping
, wbc
, 0, end
, &next
);
716 if (wbc
->nr_to_write
> 0)
717 mapping
->writeback_index
= next
;
719 start
= wbc
->range_start
>> PAGE_SHIFT
;
720 end
= wbc
->range_end
>> PAGE_SHIFT
;
721 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
724 _leave(" = %d", ret
);
729 * write to an AFS file
731 ssize_t
afs_file_write(struct kiocb
*iocb
, struct iov_iter
*from
)
733 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(iocb
->ki_filp
));
735 size_t count
= iov_iter_count(from
);
737 _enter("{%llx:%llu},{%zu},",
738 vnode
->fid
.vid
, vnode
->fid
.vnode
, count
);
740 if (IS_SWAPFILE(&vnode
->vfs_inode
)) {
742 "AFS: Attempt to write to active swap file!\n");
749 result
= generic_file_write_iter(iocb
, from
);
751 _leave(" = %zd", result
);
756 * flush any dirty pages for this process, and check for write errors.
757 * - the return status from this call provides a reliable indication of
758 * whether any write errors occurred for this process.
760 int afs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
762 struct inode
*inode
= file_inode(file
);
763 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
765 _enter("{%llx:%llu},{n=%pD},%d",
766 vnode
->fid
.vid
, vnode
->fid
.vnode
, file
,
769 return file_write_and_wait_range(file
, start
, end
);
773 * notification that a previously read-only page is about to become writable
774 * - if it returns an error, the caller will deliver a bus error signal
776 vm_fault_t
afs_page_mkwrite(struct vm_fault
*vmf
)
778 struct file
*file
= vmf
->vma
->vm_file
;
779 struct inode
*inode
= file_inode(file
);
780 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
783 _enter("{{%llx:%llu}},{%lx}",
784 vnode
->fid
.vid
, vnode
->fid
.vnode
, vmf
->page
->index
);
786 sb_start_pagefault(inode
->i_sb
);
788 /* Wait for the page to be written to the cache before we allow it to
789 * be modified. We then assume the entire page will need writing back.
791 #ifdef CONFIG_AFS_FSCACHE
792 fscache_wait_on_page_write(vnode
->cache
, vmf
->page
);
795 if (PageWriteback(vmf
->page
) &&
796 wait_on_page_bit_killable(vmf
->page
, PG_writeback
) < 0)
797 return VM_FAULT_RETRY
;
799 if (lock_page_killable(vmf
->page
) < 0)
800 return VM_FAULT_RETRY
;
802 /* We mustn't change page->private until writeback is complete as that
803 * details the portion of the page we need to write back and we might
804 * need to redirty the page if there's a problem.
806 wait_on_page_writeback(vmf
->page
);
808 priv
= (unsigned long)PAGE_SIZE
<< AFS_PRIV_SHIFT
; /* To */
809 priv
|= 0; /* From */
810 trace_afs_page_dirty(vnode
, tracepoint_string("mkwrite"),
811 vmf
->page
->index
, priv
);
812 SetPagePrivate(vmf
->page
);
813 set_page_private(vmf
->page
, priv
);
814 file_update_time(file
);
816 sb_end_pagefault(inode
->i_sb
);
817 return VM_FAULT_LOCKED
;
821 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
823 void afs_prune_wb_keys(struct afs_vnode
*vnode
)
825 LIST_HEAD(graveyard
);
826 struct afs_wb_key
*wbk
, *tmp
;
828 /* Discard unused keys */
829 spin_lock(&vnode
->wb_lock
);
831 if (!mapping_tagged(&vnode
->vfs_inode
.i_data
, PAGECACHE_TAG_WRITEBACK
) &&
832 !mapping_tagged(&vnode
->vfs_inode
.i_data
, PAGECACHE_TAG_DIRTY
)) {
833 list_for_each_entry_safe(wbk
, tmp
, &vnode
->wb_keys
, vnode_link
) {
834 if (refcount_read(&wbk
->usage
) == 1)
835 list_move(&wbk
->vnode_link
, &graveyard
);
839 spin_unlock(&vnode
->wb_lock
);
841 while (!list_empty(&graveyard
)) {
842 wbk
= list_entry(graveyard
.next
, struct afs_wb_key
, vnode_link
);
843 list_del(&wbk
->vnode_link
);
849 * Clean up a page during invalidation.
851 int afs_launder_page(struct page
*page
)
853 struct address_space
*mapping
= page
->mapping
;
854 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
859 _enter("{%lx}", page
->index
);
861 priv
= page_private(page
);
862 if (clear_page_dirty_for_io(page
)) {
865 if (PagePrivate(page
)) {
866 f
= priv
& AFS_PRIV_MAX
;
867 t
= priv
>> AFS_PRIV_SHIFT
;
870 trace_afs_page_dirty(vnode
, tracepoint_string("launder"),
872 ret
= afs_store_data(mapping
, page
->index
, page
->index
, t
, f
);
875 trace_afs_page_dirty(vnode
, tracepoint_string("laundered"),
877 set_page_private(page
, 0);
878 ClearPagePrivate(page
);
880 #ifdef CONFIG_AFS_FSCACHE
881 if (PageFsCache(page
)) {
882 fscache_wait_on_page_write(vnode
->cache
, page
);
883 fscache_uncache_page(vnode
->cache
, page
);