1 /* handling of writes to regular files and writing back to the server
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/backing-dev.h>
12 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/writeback.h>
16 #include <linux/pagevec.h>
17 #include <linux/aio.h>
20 static int afs_write_back_from_locked_page(struct afs_writeback
*wb
,
24 * mark a page as having been made dirty and thus needing writeback
26 int afs_set_page_dirty(struct page
*page
)
29 return __set_page_dirty_nobuffers(page
);
33 * unlink a writeback record because its usage has reached zero
34 * - must be called with the wb->vnode->writeback_lock held
36 static void afs_unlink_writeback(struct afs_writeback
*wb
)
38 struct afs_writeback
*front
;
39 struct afs_vnode
*vnode
= wb
->vnode
;
41 list_del_init(&wb
->link
);
42 if (!list_empty(&vnode
->writebacks
)) {
43 /* if an fsync rises to the front of the queue then wake it
45 front
= list_entry(vnode
->writebacks
.next
,
46 struct afs_writeback
, link
);
47 if (front
->state
== AFS_WBACK_SYNCING
) {
48 _debug("wake up sync");
49 front
->state
= AFS_WBACK_COMPLETE
;
50 wake_up(&front
->waitq
);
56 * free a writeback record
58 static void afs_free_writeback(struct afs_writeback
*wb
)
66 * dispose of a reference to a writeback record
68 void afs_put_writeback(struct afs_writeback
*wb
)
70 struct afs_vnode
*vnode
= wb
->vnode
;
72 _enter("{%d}", wb
->usage
);
74 spin_lock(&vnode
->writeback_lock
);
76 afs_unlink_writeback(wb
);
79 spin_unlock(&vnode
->writeback_lock
);
81 afs_free_writeback(wb
);
85 * partly or wholly fill a page that's under preparation for writing
87 static int afs_fill_page(struct afs_vnode
*vnode
, struct key
*key
,
88 loff_t pos
, struct page
*page
)
94 _enter(",,%llu", (unsigned long long)pos
);
96 i_size
= i_size_read(&vnode
->vfs_inode
);
97 if (pos
+ PAGE_CACHE_SIZE
> i_size
)
100 len
= PAGE_CACHE_SIZE
;
102 ret
= afs_vnode_fetch_data(vnode
, key
, pos
, len
, page
);
104 if (ret
== -ENOENT
) {
105 _debug("got NOENT from server"
106 " - marking file deleted and stale");
107 set_bit(AFS_VNODE_DELETED
, &vnode
->flags
);
112 _leave(" = %d", ret
);
117 * prepare to perform part of a write to a page
119 int afs_write_begin(struct file
*file
, struct address_space
*mapping
,
120 loff_t pos
, unsigned len
, unsigned flags
,
121 struct page
**pagep
, void **fsdata
)
123 struct afs_writeback
*candidate
, *wb
;
124 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
126 struct key
*key
= file
->private_data
;
127 unsigned from
= pos
& (PAGE_CACHE_SIZE
- 1);
128 unsigned to
= from
+ len
;
129 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
132 _enter("{%x:%u},{%lx},%u,%u",
133 vnode
->fid
.vid
, vnode
->fid
.vnode
, index
, from
, to
);
135 candidate
= kzalloc(sizeof(*candidate
), GFP_KERNEL
);
138 candidate
->vnode
= vnode
;
139 candidate
->first
= candidate
->last
= index
;
140 candidate
->offset_first
= from
;
141 candidate
->to_last
= to
;
142 INIT_LIST_HEAD(&candidate
->link
);
143 candidate
->usage
= 1;
144 candidate
->state
= AFS_WBACK_PENDING
;
145 init_waitqueue_head(&candidate
->waitq
);
147 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
153 /* page won't leak in error case: it eventually gets cleaned off LRU */
155 if (!PageUptodate(page
) && len
!= PAGE_CACHE_SIZE
) {
156 ret
= afs_fill_page(vnode
, key
, index
<< PAGE_CACHE_SHIFT
, page
);
159 _leave(" = %d [prep]", ret
);
162 SetPageUptodate(page
);
166 spin_lock(&vnode
->writeback_lock
);
168 /* see if this page is already pending a writeback under a suitable key
169 * - if so we can just join onto that one */
170 wb
= (struct afs_writeback
*) page_private(page
);
172 if (wb
->key
== key
&& wb
->state
== AFS_WBACK_PENDING
)
173 goto subsume_in_current_wb
;
174 goto flush_conflicting_wb
;
178 /* see if we can find an already pending writeback that we can
179 * append this page to */
180 list_for_each_entry(wb
, &vnode
->writebacks
, link
) {
181 if (wb
->last
== index
- 1 && wb
->key
== key
&&
182 wb
->state
== AFS_WBACK_PENDING
)
183 goto append_to_previous_wb
;
187 list_add_tail(&candidate
->link
, &vnode
->writebacks
);
188 candidate
->key
= key_get(key
);
189 spin_unlock(&vnode
->writeback_lock
);
190 SetPagePrivate(page
);
191 set_page_private(page
, (unsigned long) candidate
);
192 _leave(" = 0 [new]");
195 subsume_in_current_wb
:
197 ASSERTRANGE(wb
->first
, <=, index
, <=, wb
->last
);
198 if (index
== wb
->first
&& from
< wb
->offset_first
)
199 wb
->offset_first
= from
;
200 if (index
== wb
->last
&& to
> wb
->to_last
)
202 spin_unlock(&vnode
->writeback_lock
);
204 _leave(" = 0 [sub]");
207 append_to_previous_wb
:
208 _debug("append into %lx-%lx", wb
->first
, wb
->last
);
212 spin_unlock(&vnode
->writeback_lock
);
213 SetPagePrivate(page
);
214 set_page_private(page
, (unsigned long) wb
);
216 _leave(" = 0 [app]");
219 /* the page is currently bound to another context, so if it's dirty we
220 * need to flush it before we can use the new context */
221 flush_conflicting_wb
:
222 _debug("flush conflict");
223 if (wb
->state
== AFS_WBACK_PENDING
)
224 wb
->state
= AFS_WBACK_CONFLICTING
;
225 spin_unlock(&vnode
->writeback_lock
);
226 if (PageDirty(page
)) {
227 ret
= afs_write_back_from_locked_page(wb
, page
);
229 afs_put_writeback(candidate
);
230 _leave(" = %d", ret
);
235 /* the page holds a ref on the writeback record */
236 afs_put_writeback(wb
);
237 set_page_private(page
, 0);
238 ClearPagePrivate(page
);
243 * finalise part of a write to a page
245 int afs_write_end(struct file
*file
, struct address_space
*mapping
,
246 loff_t pos
, unsigned len
, unsigned copied
,
247 struct page
*page
, void *fsdata
)
249 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
250 loff_t i_size
, maybe_i_size
;
252 _enter("{%x:%u},{%lx}",
253 vnode
->fid
.vid
, vnode
->fid
.vnode
, page
->index
);
255 maybe_i_size
= pos
+ copied
;
257 i_size
= i_size_read(&vnode
->vfs_inode
);
258 if (maybe_i_size
> i_size
) {
259 spin_lock(&vnode
->writeback_lock
);
260 i_size
= i_size_read(&vnode
->vfs_inode
);
261 if (maybe_i_size
> i_size
)
262 i_size_write(&vnode
->vfs_inode
, maybe_i_size
);
263 spin_unlock(&vnode
->writeback_lock
);
266 set_page_dirty(page
);
270 page_cache_release(page
);
276 * kill all the pages in the given range
278 static void afs_kill_pages(struct afs_vnode
*vnode
, bool error
,
279 pgoff_t first
, pgoff_t last
)
282 unsigned count
, loop
;
284 _enter("{%x:%u},%lx-%lx",
285 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
287 pagevec_init(&pv
, 0);
290 _debug("kill %lx-%lx", first
, last
);
292 count
= last
- first
+ 1;
293 if (count
> PAGEVEC_SIZE
)
294 count
= PAGEVEC_SIZE
;
295 pv
.nr
= find_get_pages_contig(vnode
->vfs_inode
.i_mapping
,
296 first
, count
, pv
.pages
);
297 ASSERTCMP(pv
.nr
, ==, count
);
299 for (loop
= 0; loop
< count
; loop
++) {
300 ClearPageUptodate(pv
.pages
[loop
]);
302 SetPageError(pv
.pages
[loop
]);
303 end_page_writeback(pv
.pages
[loop
]);
306 __pagevec_release(&pv
);
307 } while (first
< last
);
313 * synchronously write back the locked page and any subsequent non-locked dirty
314 * pages also covered by the same writeback record
316 static int afs_write_back_from_locked_page(struct afs_writeback
*wb
,
317 struct page
*primary_page
)
319 struct page
*pages
[8], *page
;
321 unsigned n
, offset
, to
;
322 pgoff_t start
, first
, last
;
325 _enter(",%lx", primary_page
->index
);
328 if (!clear_page_dirty_for_io(primary_page
))
330 if (test_set_page_writeback(primary_page
))
333 /* find all consecutive lockable dirty pages, stopping when we find a
334 * page that is not immediately lockable, is not dirty or is missing,
335 * or we reach the end of the range */
336 start
= primary_page
->index
;
337 if (start
>= wb
->last
)
341 _debug("more %lx [%lx]", start
, count
);
342 n
= wb
->last
- start
+ 1;
343 if (n
> ARRAY_SIZE(pages
))
344 n
= ARRAY_SIZE(pages
);
345 n
= find_get_pages_contig(wb
->vnode
->vfs_inode
.i_mapping
,
347 _debug("fgpc %u", n
);
350 if (pages
[0]->index
!= start
) {
352 put_page(pages
[--n
]);
357 for (loop
= 0; loop
< n
; loop
++) {
359 if (page
->index
> wb
->last
)
361 if (!trylock_page(page
))
363 if (!PageDirty(page
) ||
364 page_private(page
) != (unsigned long) wb
) {
368 if (!clear_page_dirty_for_io(page
))
370 if (test_set_page_writeback(page
))
377 for (; loop
< n
; loop
++)
378 put_page(pages
[loop
]);
383 } while (start
<= wb
->last
&& count
< 65536);
386 /* we now have a contiguous set of dirty pages, each with writeback set
387 * and the dirty mark cleared; the first page is locked and must remain
388 * so, all the rest are unlocked */
389 first
= primary_page
->index
;
390 last
= first
+ count
- 1;
392 offset
= (first
== wb
->first
) ? wb
->offset_first
: 0;
393 to
= (last
== wb
->last
) ? wb
->to_last
: PAGE_SIZE
;
395 _debug("write back %lx[%u..] to %lx[..%u]", first
, offset
, last
, to
);
397 ret
= afs_vnode_store_data(wb
, first
, last
, offset
, to
);
403 &wb
->vnode
->vfs_inode
.i_mapping
->flags
);
412 afs_kill_pages(wb
->vnode
, true, first
, last
);
413 set_bit(AS_EIO
, &wb
->vnode
->vfs_inode
.i_mapping
->flags
);
421 afs_kill_pages(wb
->vnode
, false, first
, last
);
430 _leave(" = %d", ret
);
435 * write a page back to the server
436 * - the caller locked the page for us
438 int afs_writepage(struct page
*page
, struct writeback_control
*wbc
)
440 struct afs_writeback
*wb
;
443 _enter("{%lx},", page
->index
);
445 wb
= (struct afs_writeback
*) page_private(page
);
448 ret
= afs_write_back_from_locked_page(wb
, page
);
451 _leave(" = %d", ret
);
455 wbc
->nr_to_write
-= ret
;
462 * write a region of pages back to the server
464 static int afs_writepages_region(struct address_space
*mapping
,
465 struct writeback_control
*wbc
,
466 pgoff_t index
, pgoff_t end
, pgoff_t
*_next
)
468 struct afs_writeback
*wb
;
472 _enter(",,%lx,%lx,", index
, end
);
475 n
= find_get_pages_tag(mapping
, &index
, PAGECACHE_TAG_DIRTY
,
480 _debug("wback %lx", page
->index
);
482 if (page
->index
> end
) {
484 page_cache_release(page
);
485 _leave(" = 0 [%lx]", *_next
);
489 /* at this point we hold neither mapping->tree_lock nor lock on
490 * the page itself: the page may be truncated or invalidated
491 * (changing page->mapping to NULL), or even swizzled back from
492 * swapper_space to tmpfs file mapping
496 if (page
->mapping
!= mapping
) {
498 page_cache_release(page
);
502 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
503 wait_on_page_writeback(page
);
505 if (PageWriteback(page
) || !PageDirty(page
)) {
510 wb
= (struct afs_writeback
*) page_private(page
);
513 spin_lock(&wb
->vnode
->writeback_lock
);
514 wb
->state
= AFS_WBACK_WRITING
;
515 spin_unlock(&wb
->vnode
->writeback_lock
);
517 ret
= afs_write_back_from_locked_page(wb
, page
);
519 page_cache_release(page
);
521 _leave(" = %d", ret
);
525 wbc
->nr_to_write
-= ret
;
528 } while (index
< end
&& wbc
->nr_to_write
> 0);
531 _leave(" = 0 [%lx]", *_next
);
536 * write some of the pending data back to the server
538 int afs_writepages(struct address_space
*mapping
,
539 struct writeback_control
*wbc
)
541 pgoff_t start
, end
, next
;
546 if (wbc
->range_cyclic
) {
547 start
= mapping
->writeback_index
;
549 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
550 if (start
> 0 && wbc
->nr_to_write
> 0 && ret
== 0)
551 ret
= afs_writepages_region(mapping
, wbc
, 0, start
,
553 mapping
->writeback_index
= next
;
554 } else if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
) {
555 end
= (pgoff_t
)(LLONG_MAX
>> PAGE_CACHE_SHIFT
);
556 ret
= afs_writepages_region(mapping
, wbc
, 0, end
, &next
);
557 if (wbc
->nr_to_write
> 0)
558 mapping
->writeback_index
= next
;
560 start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
561 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
562 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
565 _leave(" = %d", ret
);
570 * completion of write to server
572 void afs_pages_written_back(struct afs_vnode
*vnode
, struct afs_call
*call
)
574 struct afs_writeback
*wb
= call
->wb
;
576 unsigned count
, loop
;
577 pgoff_t first
= call
->first
, last
= call
->last
;
580 _enter("{%x:%u},{%lx-%lx}",
581 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
585 pagevec_init(&pv
, 0);
588 _debug("done %lx-%lx", first
, last
);
590 count
= last
- first
+ 1;
591 if (count
> PAGEVEC_SIZE
)
592 count
= PAGEVEC_SIZE
;
593 pv
.nr
= find_get_pages_contig(call
->mapping
, first
, count
,
595 ASSERTCMP(pv
.nr
, ==, count
);
597 spin_lock(&vnode
->writeback_lock
);
598 for (loop
= 0; loop
< count
; loop
++) {
599 struct page
*page
= pv
.pages
[loop
];
600 end_page_writeback(page
);
601 if (page_private(page
) == (unsigned long) wb
) {
602 set_page_private(page
, 0);
603 ClearPagePrivate(page
);
608 if (wb
->usage
== 0) {
609 afs_unlink_writeback(wb
);
612 spin_unlock(&vnode
->writeback_lock
);
615 afs_free_writeback(wb
);
619 __pagevec_release(&pv
);
620 } while (first
<= last
);
626 * write to an AFS file
628 ssize_t
afs_file_write(struct kiocb
*iocb
, struct iov_iter
*from
)
630 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(iocb
->ki_filp
));
632 size_t count
= iov_iter_count(from
);
634 _enter("{%x.%u},{%zu},",
635 vnode
->fid
.vid
, vnode
->fid
.vnode
, count
);
637 if (IS_SWAPFILE(&vnode
->vfs_inode
)) {
639 "AFS: Attempt to write to active swap file!\n");
646 result
= generic_file_write_iter(iocb
, from
);
647 if (IS_ERR_VALUE(result
)) {
648 _leave(" = %zd", result
);
652 _leave(" = %zd", result
);
657 * flush the vnode to the fileserver
659 int afs_writeback_all(struct afs_vnode
*vnode
)
661 struct address_space
*mapping
= vnode
->vfs_inode
.i_mapping
;
662 struct writeback_control wbc
= {
663 .sync_mode
= WB_SYNC_ALL
,
664 .nr_to_write
= LONG_MAX
,
671 ret
= mapping
->a_ops
->writepages(mapping
, &wbc
);
672 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
674 _leave(" = %d", ret
);
679 * flush any dirty pages for this process, and check for write errors.
680 * - the return status from this call provides a reliable indication of
681 * whether any write errors occurred for this process.
683 int afs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
685 struct inode
*inode
= file_inode(file
);
686 struct afs_writeback
*wb
, *xwb
;
687 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
690 _enter("{%x:%u},{n=%pD},%d",
691 vnode
->fid
.vid
, vnode
->fid
.vnode
, file
,
694 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
697 mutex_lock(&inode
->i_mutex
);
699 /* use a writeback record as a marker in the queue - when this reaches
700 * the front of the queue, all the outstanding writes are either
701 * completed or rejected */
702 wb
= kzalloc(sizeof(*wb
), GFP_KERNEL
);
710 wb
->offset_first
= 0;
711 wb
->to_last
= PAGE_SIZE
;
713 wb
->state
= AFS_WBACK_SYNCING
;
714 init_waitqueue_head(&wb
->waitq
);
716 spin_lock(&vnode
->writeback_lock
);
717 list_for_each_entry(xwb
, &vnode
->writebacks
, link
) {
718 if (xwb
->state
== AFS_WBACK_PENDING
)
719 xwb
->state
= AFS_WBACK_CONFLICTING
;
721 list_add_tail(&wb
->link
, &vnode
->writebacks
);
722 spin_unlock(&vnode
->writeback_lock
);
724 /* push all the outstanding writebacks to the server */
725 ret
= afs_writeback_all(vnode
);
727 afs_put_writeback(wb
);
728 _leave(" = %d [wb]", ret
);
732 /* wait for the preceding writes to actually complete */
733 ret
= wait_event_interruptible(wb
->waitq
,
734 wb
->state
== AFS_WBACK_COMPLETE
||
735 vnode
->writebacks
.next
== &wb
->link
);
736 afs_put_writeback(wb
);
737 _leave(" = %d", ret
);
739 mutex_unlock(&inode
->i_mutex
);
744 * notification that a previously read-only page is about to become writable
745 * - if it returns an error, the caller will deliver a bus error signal
747 int afs_page_mkwrite(struct vm_area_struct
*vma
, struct page
*page
)
749 struct afs_vnode
*vnode
= AFS_FS_I(vma
->vm_file
->f_mapping
->host
);
751 _enter("{{%x:%u}},{%lx}",
752 vnode
->fid
.vid
, vnode
->fid
.vnode
, page
->index
);
754 /* wait for the page to be written to the cache before we allow it to
756 #ifdef CONFIG_AFS_FSCACHE
757 fscache_wait_on_page_write(vnode
->cache
, page
);