4 * Write file data over NFS.
6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
9 #include <linux/types.h>
10 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
20 #include <linux/nfs_page.h>
21 #include <linux/backing-dev.h>
23 #include <asm/uaccess.h>
25 #include "delegation.h"
29 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
31 #define MIN_POOL_WRITE (32)
32 #define MIN_POOL_COMMIT (4)
35 * Local function declarations
37 static struct nfs_page
* nfs_update_request(struct nfs_open_context
*,
39 unsigned int, unsigned int);
40 static void nfs_pageio_init_write(struct nfs_pageio_descriptor
*desc
,
41 struct inode
*inode
, int ioflags
);
42 static void nfs_redirty_request(struct nfs_page
*req
);
43 static const struct rpc_call_ops nfs_write_partial_ops
;
44 static const struct rpc_call_ops nfs_write_full_ops
;
45 static const struct rpc_call_ops nfs_commit_ops
;
47 static struct kmem_cache
*nfs_wdata_cachep
;
48 static mempool_t
*nfs_wdata_mempool
;
49 static mempool_t
*nfs_commit_mempool
;
51 struct nfs_write_data
*nfs_commit_alloc(void)
53 struct nfs_write_data
*p
= mempool_alloc(nfs_commit_mempool
, GFP_NOFS
);
56 memset(p
, 0, sizeof(*p
));
57 INIT_LIST_HEAD(&p
->pages
);
62 static void nfs_commit_rcu_free(struct rcu_head
*head
)
64 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
65 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
67 mempool_free(p
, nfs_commit_mempool
);
70 void nfs_commit_free(struct nfs_write_data
*wdata
)
72 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_commit_rcu_free
);
75 struct nfs_write_data
*nfs_writedata_alloc(unsigned int pagecount
)
77 struct nfs_write_data
*p
= mempool_alloc(nfs_wdata_mempool
, GFP_NOFS
);
80 memset(p
, 0, sizeof(*p
));
81 INIT_LIST_HEAD(&p
->pages
);
82 p
->npages
= pagecount
;
83 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
84 p
->pagevec
= p
->page_array
;
86 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_NOFS
);
88 mempool_free(p
, nfs_wdata_mempool
);
96 static void nfs_writedata_rcu_free(struct rcu_head
*head
)
98 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
99 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
101 mempool_free(p
, nfs_wdata_mempool
);
104 static void nfs_writedata_free(struct nfs_write_data
*wdata
)
106 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_writedata_rcu_free
);
109 void nfs_writedata_release(void *wdata
)
111 nfs_writedata_free(wdata
);
114 static void nfs_context_set_write_error(struct nfs_open_context
*ctx
, int error
)
118 set_bit(NFS_CONTEXT_ERROR_WRITE
, &ctx
->flags
);
121 static struct nfs_page
*nfs_page_find_request_locked(struct page
*page
)
123 struct nfs_page
*req
= NULL
;
125 if (PagePrivate(page
)) {
126 req
= (struct nfs_page
*)page_private(page
);
128 kref_get(&req
->wb_kref
);
133 static struct nfs_page
*nfs_page_find_request(struct page
*page
)
135 struct inode
*inode
= page
->mapping
->host
;
136 struct nfs_page
*req
= NULL
;
138 spin_lock(&inode
->i_lock
);
139 req
= nfs_page_find_request_locked(page
);
140 spin_unlock(&inode
->i_lock
);
144 /* Adjust the file length if we're writing beyond the end */
145 static void nfs_grow_file(struct page
*page
, unsigned int offset
, unsigned int count
)
147 struct inode
*inode
= page
->mapping
->host
;
148 loff_t end
, i_size
= i_size_read(inode
);
149 pgoff_t end_index
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
151 if (i_size
> 0 && page
->index
< end_index
)
153 end
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + ((loff_t
)offset
+count
);
156 nfs_inc_stats(inode
, NFSIOS_EXTENDWRITE
);
157 i_size_write(inode
, end
);
160 /* A writeback failed: mark the page as bad, and invalidate the page cache */
161 static void nfs_set_pageerror(struct page
*page
)
164 nfs_zap_mapping(page
->mapping
->host
, page
->mapping
);
167 /* We can set the PG_uptodate flag if we see that a write request
168 * covers the full page.
170 static void nfs_mark_uptodate(struct page
*page
, unsigned int base
, unsigned int count
)
172 if (PageUptodate(page
))
176 if (count
!= nfs_page_length(page
))
178 SetPageUptodate(page
);
181 static int nfs_writepage_setup(struct nfs_open_context
*ctx
, struct page
*page
,
182 unsigned int offset
, unsigned int count
)
184 struct nfs_page
*req
;
188 req
= nfs_update_request(ctx
, page
, offset
, count
);
194 ret
= nfs_wb_page(page
->mapping
->host
, page
);
198 /* Update file length */
199 nfs_grow_file(page
, offset
, count
);
200 nfs_clear_page_tag_locked(req
);
204 static int wb_priority(struct writeback_control
*wbc
)
206 if (wbc
->for_reclaim
)
207 return FLUSH_HIGHPRI
| FLUSH_STABLE
;
208 if (wbc
->for_kupdate
)
214 * NFS congestion control
217 int nfs_congestion_kb
;
219 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
220 #define NFS_CONGESTION_OFF_THRESH \
221 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
223 static int nfs_set_page_writeback(struct page
*page
)
225 int ret
= test_set_page_writeback(page
);
228 struct inode
*inode
= page
->mapping
->host
;
229 struct nfs_server
*nfss
= NFS_SERVER(inode
);
231 if (atomic_long_inc_return(&nfss
->writeback
) >
232 NFS_CONGESTION_ON_THRESH
)
233 set_bdi_congested(&nfss
->backing_dev_info
, WRITE
);
238 static void nfs_end_page_writeback(struct page
*page
)
240 struct inode
*inode
= page
->mapping
->host
;
241 struct nfs_server
*nfss
= NFS_SERVER(inode
);
243 end_page_writeback(page
);
244 if (atomic_long_dec_return(&nfss
->writeback
) < NFS_CONGESTION_OFF_THRESH
)
245 clear_bdi_congested(&nfss
->backing_dev_info
, WRITE
);
249 * Find an associated nfs write request, and prepare to flush it out
250 * May return an error if the user signalled nfs_wait_on_request().
252 static int nfs_page_async_flush(struct nfs_pageio_descriptor
*pgio
,
255 struct inode
*inode
= page
->mapping
->host
;
256 struct nfs_page
*req
;
259 spin_lock(&inode
->i_lock
);
261 req
= nfs_page_find_request_locked(page
);
263 spin_unlock(&inode
->i_lock
);
266 if (nfs_set_page_tag_locked(req
))
268 /* Note: If we hold the page lock, as is the case in nfs_writepage,
269 * then the call to nfs_set_page_tag_locked() will always
270 * succeed provided that someone hasn't already marked the
271 * request as dirty (in which case we don't care).
273 spin_unlock(&inode
->i_lock
);
274 ret
= nfs_wait_on_request(req
);
275 nfs_release_request(req
);
278 spin_lock(&inode
->i_lock
);
280 if (test_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
281 /* This request is marked for commit */
282 spin_unlock(&inode
->i_lock
);
283 nfs_clear_page_tag_locked(req
);
284 nfs_pageio_complete(pgio
);
287 if (nfs_set_page_writeback(page
) != 0) {
288 spin_unlock(&inode
->i_lock
);
291 spin_unlock(&inode
->i_lock
);
292 if (!nfs_pageio_add_request(pgio
, req
)) {
293 nfs_redirty_request(req
);
294 nfs_end_page_writeback(page
);
295 nfs_clear_page_tag_locked(req
);
296 return pgio
->pg_error
;
301 static int nfs_do_writepage(struct page
*page
, struct writeback_control
*wbc
, struct nfs_pageio_descriptor
*pgio
)
303 struct inode
*inode
= page
->mapping
->host
;
305 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGE
);
306 nfs_add_stats(inode
, NFSIOS_WRITEPAGES
, 1);
308 nfs_pageio_cond_complete(pgio
, page
->index
);
309 return nfs_page_async_flush(pgio
, page
);
313 * Write an mmapped page to the server.
315 static int nfs_writepage_locked(struct page
*page
, struct writeback_control
*wbc
)
317 struct nfs_pageio_descriptor pgio
;
320 nfs_pageio_init_write(&pgio
, page
->mapping
->host
, wb_priority(wbc
));
321 err
= nfs_do_writepage(page
, wbc
, &pgio
);
322 nfs_pageio_complete(&pgio
);
325 if (pgio
.pg_error
< 0)
326 return pgio
.pg_error
;
330 int nfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
334 ret
= nfs_writepage_locked(page
, wbc
);
339 static int nfs_writepages_callback(struct page
*page
, struct writeback_control
*wbc
, void *data
)
343 ret
= nfs_do_writepage(page
, wbc
, data
);
348 int nfs_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
350 struct inode
*inode
= mapping
->host
;
351 struct nfs_pageio_descriptor pgio
;
354 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGES
);
356 nfs_pageio_init_write(&pgio
, inode
, wb_priority(wbc
));
357 err
= write_cache_pages(mapping
, wbc
, nfs_writepages_callback
, &pgio
);
358 nfs_pageio_complete(&pgio
);
361 if (pgio
.pg_error
< 0)
362 return pgio
.pg_error
;
367 * Insert a write request into an inode
369 static int nfs_inode_add_request(struct inode
*inode
, struct nfs_page
*req
)
371 struct nfs_inode
*nfsi
= NFS_I(inode
);
374 error
= radix_tree_insert(&nfsi
->nfs_page_tree
, req
->wb_index
, req
);
375 BUG_ON(error
== -EEXIST
);
380 if (nfs_have_delegation(inode
, FMODE_WRITE
))
383 SetPagePrivate(req
->wb_page
);
384 set_page_private(req
->wb_page
, (unsigned long)req
);
386 kref_get(&req
->wb_kref
);
387 radix_tree_tag_set(&nfsi
->nfs_page_tree
, req
->wb_index
, NFS_PAGE_TAG_LOCKED
);
392 * Remove a write request from an inode
394 static void nfs_inode_remove_request(struct nfs_page
*req
)
396 struct inode
*inode
= req
->wb_context
->path
.dentry
->d_inode
;
397 struct nfs_inode
*nfsi
= NFS_I(inode
);
399 BUG_ON (!NFS_WBACK_BUSY(req
));
401 spin_lock(&inode
->i_lock
);
402 set_page_private(req
->wb_page
, 0);
403 ClearPagePrivate(req
->wb_page
);
404 radix_tree_delete(&nfsi
->nfs_page_tree
, req
->wb_index
);
407 spin_unlock(&inode
->i_lock
);
410 spin_unlock(&inode
->i_lock
);
411 nfs_clear_request(req
);
412 nfs_release_request(req
);
416 nfs_redirty_request(struct nfs_page
*req
)
418 __set_page_dirty_nobuffers(req
->wb_page
);
422 * Check if a request is dirty
425 nfs_dirty_request(struct nfs_page
*req
)
427 struct page
*page
= req
->wb_page
;
429 if (page
== NULL
|| test_bit(PG_NEED_COMMIT
, &req
->wb_flags
))
431 return !PageWriteback(req
->wb_page
);
434 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
436 * Add a request to the inode's commit list.
439 nfs_mark_request_commit(struct nfs_page
*req
)
441 struct inode
*inode
= req
->wb_context
->path
.dentry
->d_inode
;
442 struct nfs_inode
*nfsi
= NFS_I(inode
);
444 spin_lock(&inode
->i_lock
);
446 set_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
447 radix_tree_tag_set(&nfsi
->nfs_page_tree
,
449 NFS_PAGE_TAG_COMMIT
);
450 spin_unlock(&inode
->i_lock
);
451 inc_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
452 inc_bdi_stat(req
->wb_page
->mapping
->backing_dev_info
, BDI_RECLAIMABLE
);
453 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
457 int nfs_write_need_commit(struct nfs_write_data
*data
)
459 return data
->verf
.committed
!= NFS_FILE_SYNC
;
463 int nfs_reschedule_unstable_write(struct nfs_page
*req
)
465 if (test_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
466 nfs_mark_request_commit(req
);
469 if (test_and_clear_bit(PG_NEED_RESCHED
, &req
->wb_flags
)) {
470 nfs_redirty_request(req
);
477 nfs_mark_request_commit(struct nfs_page
*req
)
482 int nfs_write_need_commit(struct nfs_write_data
*data
)
488 int nfs_reschedule_unstable_write(struct nfs_page
*req
)
495 * Wait for a request to complete.
497 * Interruptible by fatal signals only.
499 static int nfs_wait_on_requests_locked(struct inode
*inode
, pgoff_t idx_start
, unsigned int npages
)
501 struct nfs_inode
*nfsi
= NFS_I(inode
);
502 struct nfs_page
*req
;
503 pgoff_t idx_end
, next
;
504 unsigned int res
= 0;
510 idx_end
= idx_start
+ npages
- 1;
513 while (radix_tree_gang_lookup_tag(&nfsi
->nfs_page_tree
, (void **)&req
, next
, 1, NFS_PAGE_TAG_LOCKED
)) {
514 if (req
->wb_index
> idx_end
)
517 next
= req
->wb_index
+ 1;
518 BUG_ON(!NFS_WBACK_BUSY(req
));
520 kref_get(&req
->wb_kref
);
521 spin_unlock(&inode
->i_lock
);
522 error
= nfs_wait_on_request(req
);
523 nfs_release_request(req
);
524 spin_lock(&inode
->i_lock
);
532 static void nfs_cancel_commit_list(struct list_head
*head
)
534 struct nfs_page
*req
;
536 while(!list_empty(head
)) {
537 req
= nfs_list_entry(head
->next
);
538 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
539 dec_bdi_stat(req
->wb_page
->mapping
->backing_dev_info
,
541 nfs_list_remove_request(req
);
542 clear_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
543 nfs_inode_remove_request(req
);
544 nfs_unlock_request(req
);
548 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
550 * nfs_scan_commit - Scan an inode for commit requests
551 * @inode: NFS inode to scan
552 * @dst: destination list
553 * @idx_start: lower bound of page->index to scan.
554 * @npages: idx_start + npages sets the upper bound to scan.
556 * Moves requests from the inode's 'commit' request list.
557 * The requests are *not* checked to ensure that they form a contiguous set.
560 nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, pgoff_t idx_start
, unsigned int npages
)
562 struct nfs_inode
*nfsi
= NFS_I(inode
);
565 if (nfsi
->ncommit
!= 0) {
566 res
= nfs_scan_list(nfsi
, dst
, idx_start
, npages
,
567 NFS_PAGE_TAG_COMMIT
);
568 nfsi
->ncommit
-= res
;
573 static inline int nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, pgoff_t idx_start
, unsigned int npages
)
580 * Try to update any existing write request, or create one if there is none.
581 * In order to match, the request's credentials must match those of
582 * the calling process.
584 * Note: Should always be called with the Page Lock held!
586 static struct nfs_page
* nfs_update_request(struct nfs_open_context
* ctx
,
587 struct page
*page
, unsigned int offset
, unsigned int bytes
)
589 struct address_space
*mapping
= page
->mapping
;
590 struct inode
*inode
= mapping
->host
;
591 struct nfs_page
*req
, *new = NULL
;
594 end
= offset
+ bytes
;
597 /* Loop over all inode entries and see if we find
598 * A request for the page we wish to update
600 spin_lock(&inode
->i_lock
);
601 req
= nfs_page_find_request_locked(page
);
603 if (!nfs_set_page_tag_locked(req
)) {
606 spin_unlock(&inode
->i_lock
);
607 error
= nfs_wait_on_request(req
);
608 nfs_release_request(req
);
611 nfs_release_request(new);
612 return ERR_PTR(error
);
616 spin_unlock(&inode
->i_lock
);
618 nfs_release_request(new);
624 nfs_lock_request_dontget(new);
625 error
= nfs_inode_add_request(inode
, new);
627 spin_unlock(&inode
->i_lock
);
628 nfs_unlock_request(new);
629 return ERR_PTR(error
);
631 spin_unlock(&inode
->i_lock
);
635 spin_unlock(&inode
->i_lock
);
637 new = nfs_create_request(ctx
, inode
, page
, offset
, bytes
);
642 /* We have a request for our page.
643 * If the creds don't match, or the
644 * page addresses don't match,
645 * tell the caller to wait on the conflicting
648 rqend
= req
->wb_offset
+ req
->wb_bytes
;
649 if (req
->wb_context
!= ctx
650 || req
->wb_page
!= page
651 || !nfs_dirty_request(req
)
652 || offset
> rqend
|| end
< req
->wb_offset
) {
653 nfs_clear_page_tag_locked(req
);
654 return ERR_PTR(-EBUSY
);
657 /* Okay, the request matches. Update the region */
658 if (offset
< req
->wb_offset
) {
659 req
->wb_offset
= offset
;
660 req
->wb_pgbase
= offset
;
661 req
->wb_bytes
= max(end
, rqend
) - req
->wb_offset
;
666 req
->wb_bytes
= end
- req
->wb_offset
;
670 /* If this page might potentially be marked as up to date,
671 * then we need to zero any uninitalised data. */
672 if (req
->wb_pgbase
== 0 && req
->wb_bytes
!= PAGE_CACHE_SIZE
673 && !PageUptodate(req
->wb_page
))
674 zero_user_segment(req
->wb_page
, req
->wb_bytes
, PAGE_CACHE_SIZE
);
678 int nfs_flush_incompatible(struct file
*file
, struct page
*page
)
680 struct nfs_open_context
*ctx
= nfs_file_open_context(file
);
681 struct nfs_page
*req
;
682 int do_flush
, status
;
684 * Look for a request corresponding to this page. If there
685 * is one, and it belongs to another file, we flush it out
686 * before we try to copy anything into the page. Do this
687 * due to the lack of an ACCESS-type call in NFSv2.
688 * Also do the same if we find a request from an existing
692 req
= nfs_page_find_request(page
);
695 do_flush
= req
->wb_page
!= page
|| req
->wb_context
!= ctx
696 || !nfs_dirty_request(req
);
697 nfs_release_request(req
);
700 status
= nfs_wb_page(page
->mapping
->host
, page
);
701 } while (status
== 0);
706 * If the page cache is marked as unsafe or invalid, then we can't rely on
707 * the PageUptodate() flag. In this case, we will need to turn off
708 * write optimisations that depend on the page contents being correct.
710 static int nfs_write_pageuptodate(struct page
*page
, struct inode
*inode
)
712 return PageUptodate(page
) &&
713 !(NFS_I(inode
)->cache_validity
& (NFS_INO_REVAL_PAGECACHE
|NFS_INO_INVALID_DATA
));
717 * Update and possibly write a cached page of an NFS file.
719 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
720 * things with a page scheduled for an RPC call (e.g. invalidate it).
722 int nfs_updatepage(struct file
*file
, struct page
*page
,
723 unsigned int offset
, unsigned int count
)
725 struct nfs_open_context
*ctx
= nfs_file_open_context(file
);
726 struct inode
*inode
= page
->mapping
->host
;
729 nfs_inc_stats(inode
, NFSIOS_VFSUPDATEPAGE
);
731 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
732 file
->f_path
.dentry
->d_parent
->d_name
.name
,
733 file
->f_path
.dentry
->d_name
.name
, count
,
734 (long long)(page_offset(page
) +offset
));
736 /* If we're not using byte range locks, and we know the page
737 * is up to date, it may be more efficient to extend the write
738 * to cover the entire page in order to avoid fragmentation
741 if (nfs_write_pageuptodate(page
, inode
) &&
742 inode
->i_flock
== NULL
&&
743 !(file
->f_flags
& O_SYNC
)) {
744 count
= max(count
+ offset
, nfs_page_length(page
));
748 status
= nfs_writepage_setup(ctx
, page
, offset
, count
);
749 __set_page_dirty_nobuffers(page
);
751 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
752 status
, (long long)i_size_read(inode
));
754 nfs_set_pageerror(page
);
758 static void nfs_writepage_release(struct nfs_page
*req
)
761 if (PageError(req
->wb_page
)) {
762 nfs_end_page_writeback(req
->wb_page
);
763 nfs_inode_remove_request(req
);
764 } else if (!nfs_reschedule_unstable_write(req
)) {
765 /* Set the PG_uptodate flag */
766 nfs_mark_uptodate(req
->wb_page
, req
->wb_pgbase
, req
->wb_bytes
);
767 nfs_end_page_writeback(req
->wb_page
);
768 nfs_inode_remove_request(req
);
770 nfs_end_page_writeback(req
->wb_page
);
771 nfs_clear_page_tag_locked(req
);
774 static int flush_task_priority(int how
)
776 switch (how
& (FLUSH_HIGHPRI
|FLUSH_LOWPRI
)) {
778 return RPC_PRIORITY_HIGH
;
780 return RPC_PRIORITY_LOW
;
782 return RPC_PRIORITY_NORMAL
;
786 * Set up the argument/result storage required for the RPC call.
788 static void nfs_write_rpcsetup(struct nfs_page
*req
,
789 struct nfs_write_data
*data
,
790 const struct rpc_call_ops
*call_ops
,
791 unsigned int count
, unsigned int offset
,
794 struct inode
*inode
= req
->wb_context
->path
.dentry
->d_inode
;
795 int flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
796 int priority
= flush_task_priority(how
);
797 struct rpc_task
*task
;
798 struct rpc_message msg
= {
799 .rpc_argp
= &data
->args
,
800 .rpc_resp
= &data
->res
,
801 .rpc_cred
= req
->wb_context
->cred
,
803 struct rpc_task_setup task_setup_data
= {
804 .rpc_client
= NFS_CLIENT(inode
),
807 .callback_ops
= call_ops
,
808 .callback_data
= data
,
810 .priority
= priority
,
813 /* Set up the RPC argument and reply structs
814 * NB: take care not to mess about with data->commit et al. */
817 data
->inode
= inode
= req
->wb_context
->path
.dentry
->d_inode
;
818 data
->cred
= msg
.rpc_cred
;
820 data
->args
.fh
= NFS_FH(inode
);
821 data
->args
.offset
= req_offset(req
) + offset
;
822 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
823 data
->args
.pages
= data
->pagevec
;
824 data
->args
.count
= count
;
825 data
->args
.context
= req
->wb_context
;
826 data
->args
.stable
= NFS_UNSTABLE
;
827 if (how
& FLUSH_STABLE
) {
828 data
->args
.stable
= NFS_DATA_SYNC
;
829 if (!NFS_I(inode
)->ncommit
)
830 data
->args
.stable
= NFS_FILE_SYNC
;
833 data
->res
.fattr
= &data
->fattr
;
834 data
->res
.count
= count
;
835 data
->res
.verf
= &data
->verf
;
836 nfs_fattr_init(&data
->fattr
);
838 /* Set up the initial task struct. */
839 NFS_PROTO(inode
)->write_setup(data
, &msg
);
841 dprintk("NFS: %5u initiated write call "
842 "(req %s/%Ld, %u bytes @ offset %Lu)\n",
845 (long long)NFS_FILEID(inode
),
847 (unsigned long long)data
->args
.offset
);
849 task
= rpc_run_task(&task_setup_data
);
855 * Generate multiple small requests to write out a single
856 * contiguous dirty area on one page.
858 static int nfs_flush_multi(struct inode
*inode
, struct list_head
*head
, unsigned int npages
, size_t count
, int how
)
860 struct nfs_page
*req
= nfs_list_entry(head
->next
);
861 struct page
*page
= req
->wb_page
;
862 struct nfs_write_data
*data
;
863 size_t wsize
= NFS_SERVER(inode
)->wsize
, nbytes
;
868 nfs_list_remove_request(req
);
872 size_t len
= min(nbytes
, wsize
);
874 data
= nfs_writedata_alloc(1);
877 list_add(&data
->pages
, &list
);
880 } while (nbytes
!= 0);
881 atomic_set(&req
->wb_complete
, requests
);
883 ClearPageError(page
);
887 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
888 list_del_init(&data
->pages
);
890 data
->pagevec
[0] = page
;
894 nfs_write_rpcsetup(req
, data
, &nfs_write_partial_ops
,
898 } while (nbytes
!= 0);
903 while (!list_empty(&list
)) {
904 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
905 list_del(&data
->pages
);
906 nfs_writedata_release(data
);
908 nfs_redirty_request(req
);
909 nfs_end_page_writeback(req
->wb_page
);
910 nfs_clear_page_tag_locked(req
);
915 * Create an RPC task for the given write request and kick it.
916 * The page must have been locked by the caller.
918 * It may happen that the page we're passed is not marked dirty.
919 * This is the case if nfs_updatepage detects a conflicting request
920 * that has been written but not committed.
922 static int nfs_flush_one(struct inode
*inode
, struct list_head
*head
, unsigned int npages
, size_t count
, int how
)
924 struct nfs_page
*req
;
926 struct nfs_write_data
*data
;
928 data
= nfs_writedata_alloc(npages
);
932 pages
= data
->pagevec
;
933 while (!list_empty(head
)) {
934 req
= nfs_list_entry(head
->next
);
935 nfs_list_remove_request(req
);
936 nfs_list_add_request(req
, &data
->pages
);
937 ClearPageError(req
->wb_page
);
938 *pages
++ = req
->wb_page
;
940 req
= nfs_list_entry(data
->pages
.next
);
942 /* Set up the argument struct */
943 nfs_write_rpcsetup(req
, data
, &nfs_write_full_ops
, count
, 0, how
);
947 while (!list_empty(head
)) {
948 req
= nfs_list_entry(head
->next
);
949 nfs_list_remove_request(req
);
950 nfs_redirty_request(req
);
951 nfs_end_page_writeback(req
->wb_page
);
952 nfs_clear_page_tag_locked(req
);
957 static void nfs_pageio_init_write(struct nfs_pageio_descriptor
*pgio
,
958 struct inode
*inode
, int ioflags
)
960 size_t wsize
= NFS_SERVER(inode
)->wsize
;
962 if (wsize
< PAGE_CACHE_SIZE
)
963 nfs_pageio_init(pgio
, inode
, nfs_flush_multi
, wsize
, ioflags
);
965 nfs_pageio_init(pgio
, inode
, nfs_flush_one
, wsize
, ioflags
);
969 * Handle a write reply that flushed part of a page.
971 static void nfs_writeback_done_partial(struct rpc_task
*task
, void *calldata
)
973 struct nfs_write_data
*data
= calldata
;
974 struct nfs_page
*req
= data
->req
;
975 struct page
*page
= req
->wb_page
;
977 dprintk("NFS: write (%s/%Ld %d@%Ld)",
978 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
979 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
981 (long long)req_offset(req
));
983 if (nfs_writeback_done(task
, data
) != 0)
986 if (task
->tk_status
< 0) {
987 nfs_set_pageerror(page
);
988 nfs_context_set_write_error(req
->wb_context
, task
->tk_status
);
989 dprintk(", error = %d\n", task
->tk_status
);
993 if (nfs_write_need_commit(data
)) {
994 struct inode
*inode
= page
->mapping
->host
;
996 spin_lock(&inode
->i_lock
);
997 if (test_bit(PG_NEED_RESCHED
, &req
->wb_flags
)) {
998 /* Do nothing we need to resend the writes */
999 } else if (!test_and_set_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
1000 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1001 dprintk(" defer commit\n");
1002 } else if (memcmp(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
))) {
1003 set_bit(PG_NEED_RESCHED
, &req
->wb_flags
);
1004 clear_bit(PG_NEED_COMMIT
, &req
->wb_flags
);
1005 dprintk(" server reboot detected\n");
1007 spin_unlock(&inode
->i_lock
);
1012 if (atomic_dec_and_test(&req
->wb_complete
))
1013 nfs_writepage_release(req
);
1016 static const struct rpc_call_ops nfs_write_partial_ops
= {
1017 .rpc_call_done
= nfs_writeback_done_partial
,
1018 .rpc_release
= nfs_writedata_release
,
1022 * Handle a write reply that flushes a whole page.
1024 * FIXME: There is an inherent race with invalidate_inode_pages and
1025 * writebacks since the page->count is kept > 1 for as long
1026 * as the page has a write request pending.
1028 static void nfs_writeback_done_full(struct rpc_task
*task
, void *calldata
)
1030 struct nfs_write_data
*data
= calldata
;
1031 struct nfs_page
*req
;
1034 if (nfs_writeback_done(task
, data
) != 0)
1037 /* Update attributes as result of writeback. */
1038 while (!list_empty(&data
->pages
)) {
1039 req
= nfs_list_entry(data
->pages
.next
);
1040 nfs_list_remove_request(req
);
1041 page
= req
->wb_page
;
1043 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1044 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
1045 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
1047 (long long)req_offset(req
));
1049 if (task
->tk_status
< 0) {
1050 nfs_set_pageerror(page
);
1051 nfs_context_set_write_error(req
->wb_context
, task
->tk_status
);
1052 dprintk(", error = %d\n", task
->tk_status
);
1053 goto remove_request
;
1056 if (nfs_write_need_commit(data
)) {
1057 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1058 nfs_mark_request_commit(req
);
1059 nfs_end_page_writeback(page
);
1060 dprintk(" marked for commit\n");
1063 /* Set the PG_uptodate flag? */
1064 nfs_mark_uptodate(page
, req
->wb_pgbase
, req
->wb_bytes
);
1067 nfs_end_page_writeback(page
);
1068 nfs_inode_remove_request(req
);
1070 nfs_clear_page_tag_locked(req
);
1074 static const struct rpc_call_ops nfs_write_full_ops
= {
1075 .rpc_call_done
= nfs_writeback_done_full
,
1076 .rpc_release
= nfs_writedata_release
,
1081 * This function is called when the WRITE call is complete.
1083 int nfs_writeback_done(struct rpc_task
*task
, struct nfs_write_data
*data
)
1085 struct nfs_writeargs
*argp
= &data
->args
;
1086 struct nfs_writeres
*resp
= &data
->res
;
1089 dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1090 task
->tk_pid
, task
->tk_status
);
1093 * ->write_done will attempt to use post-op attributes to detect
1094 * conflicting writes by other clients. A strict interpretation
1095 * of close-to-open would allow us to continue caching even if
1096 * another writer had changed the file, but some applications
1097 * depend on tighter cache coherency when writing.
1099 status
= NFS_PROTO(data
->inode
)->write_done(task
, data
);
1102 nfs_add_stats(data
->inode
, NFSIOS_SERVERWRITTENBYTES
, resp
->count
);
1104 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1105 if (resp
->verf
->committed
< argp
->stable
&& task
->tk_status
>= 0) {
1106 /* We tried a write call, but the server did not
1107 * commit data to stable storage even though we
1109 * Note: There is a known bug in Tru64 < 5.0 in which
1110 * the server reports NFS_DATA_SYNC, but performs
1111 * NFS_FILE_SYNC. We therefore implement this checking
1112 * as a dprintk() in order to avoid filling syslog.
1114 static unsigned long complain
;
1116 if (time_before(complain
, jiffies
)) {
1117 dprintk("NFS: faulty NFS server %s:"
1118 " (committed = %d) != (stable = %d)\n",
1119 NFS_SERVER(data
->inode
)->nfs_client
->cl_hostname
,
1120 resp
->verf
->committed
, argp
->stable
);
1121 complain
= jiffies
+ 300 * HZ
;
1125 /* Is this a short write? */
1126 if (task
->tk_status
>= 0 && resp
->count
< argp
->count
) {
1127 static unsigned long complain
;
1129 nfs_inc_stats(data
->inode
, NFSIOS_SHORTWRITE
);
1131 /* Has the server at least made some progress? */
1132 if (resp
->count
!= 0) {
1133 /* Was this an NFSv2 write or an NFSv3 stable write? */
1134 if (resp
->verf
->committed
!= NFS_UNSTABLE
) {
1135 /* Resend from where the server left off */
1136 argp
->offset
+= resp
->count
;
1137 argp
->pgbase
+= resp
->count
;
1138 argp
->count
-= resp
->count
;
1140 /* Resend as a stable write in order to avoid
1141 * headaches in the case of a server crash.
1143 argp
->stable
= NFS_FILE_SYNC
;
1145 rpc_restart_call(task
);
1148 if (time_before(complain
, jiffies
)) {
1150 "NFS: Server wrote zero bytes, expected %u.\n",
1152 complain
= jiffies
+ 300 * HZ
;
1154 /* Can't do anything about it except throw an error. */
1155 task
->tk_status
= -EIO
;
1161 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1162 void nfs_commit_release(void *wdata
)
1164 nfs_commit_free(wdata
);
1168 * Set up the argument/result storage required for the RPC call.
1170 static void nfs_commit_rpcsetup(struct list_head
*head
,
1171 struct nfs_write_data
*data
,
1174 struct nfs_page
*first
= nfs_list_entry(head
->next
);
1175 struct inode
*inode
= first
->wb_context
->path
.dentry
->d_inode
;
1176 int flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
1177 int priority
= flush_task_priority(how
);
1178 struct rpc_task
*task
;
1179 struct rpc_message msg
= {
1180 .rpc_argp
= &data
->args
,
1181 .rpc_resp
= &data
->res
,
1182 .rpc_cred
= first
->wb_context
->cred
,
1184 struct rpc_task_setup task_setup_data
= {
1185 .task
= &data
->task
,
1186 .rpc_client
= NFS_CLIENT(inode
),
1187 .rpc_message
= &msg
,
1188 .callback_ops
= &nfs_commit_ops
,
1189 .callback_data
= data
,
1191 .priority
= priority
,
1194 /* Set up the RPC argument and reply structs
1195 * NB: take care not to mess about with data->commit et al. */
1197 list_splice_init(head
, &data
->pages
);
1199 data
->inode
= inode
;
1200 data
->cred
= msg
.rpc_cred
;
1202 data
->args
.fh
= NFS_FH(data
->inode
);
1203 /* Note: we always request a commit of the entire inode */
1204 data
->args
.offset
= 0;
1205 data
->args
.count
= 0;
1206 data
->res
.count
= 0;
1207 data
->res
.fattr
= &data
->fattr
;
1208 data
->res
.verf
= &data
->verf
;
1209 nfs_fattr_init(&data
->fattr
);
1211 /* Set up the initial task struct. */
1212 NFS_PROTO(inode
)->commit_setup(data
, &msg
);
1214 dprintk("NFS: %5u initiated commit call\n", data
->task
.tk_pid
);
1216 task
= rpc_run_task(&task_setup_data
);
1222 * Commit dirty pages
1225 nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1227 struct nfs_write_data
*data
;
1228 struct nfs_page
*req
;
1230 data
= nfs_commit_alloc();
1235 /* Set up the argument struct */
1236 nfs_commit_rpcsetup(head
, data
, how
);
1240 while (!list_empty(head
)) {
1241 req
= nfs_list_entry(head
->next
);
1242 nfs_list_remove_request(req
);
1243 nfs_mark_request_commit(req
);
1244 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1245 dec_bdi_stat(req
->wb_page
->mapping
->backing_dev_info
,
1247 nfs_clear_page_tag_locked(req
);
1253 * COMMIT call returned
1255 static void nfs_commit_done(struct rpc_task
*task
, void *calldata
)
1257 struct nfs_write_data
*data
= calldata
;
1258 struct nfs_page
*req
;
1260 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1261 task
->tk_pid
, task
->tk_status
);
1263 /* Call the NFS version-specific code */
1264 if (NFS_PROTO(data
->inode
)->commit_done(task
, data
) != 0)
1267 while (!list_empty(&data
->pages
)) {
1268 req
= nfs_list_entry(data
->pages
.next
);
1269 nfs_list_remove_request(req
);
1270 clear_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
1271 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1272 dec_bdi_stat(req
->wb_page
->mapping
->backing_dev_info
,
1275 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1276 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
1277 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
1279 (long long)req_offset(req
));
1280 if (task
->tk_status
< 0) {
1281 nfs_context_set_write_error(req
->wb_context
, task
->tk_status
);
1282 nfs_inode_remove_request(req
);
1283 dprintk(", error = %d\n", task
->tk_status
);
1287 /* Okay, COMMIT succeeded, apparently. Check the verifier
1288 * returned by the server against all stored verfs. */
1289 if (!memcmp(req
->wb_verf
.verifier
, data
->verf
.verifier
, sizeof(data
->verf
.verifier
))) {
1290 /* We have a match */
1291 /* Set the PG_uptodate flag */
1292 nfs_mark_uptodate(req
->wb_page
, req
->wb_pgbase
,
1294 nfs_inode_remove_request(req
);
1298 /* We have a mismatch. Write the page again */
1299 dprintk(" mismatch\n");
1300 nfs_redirty_request(req
);
1302 nfs_clear_page_tag_locked(req
);
1306 static const struct rpc_call_ops nfs_commit_ops
= {
1307 .rpc_call_done
= nfs_commit_done
,
1308 .rpc_release
= nfs_commit_release
,
1311 int nfs_commit_inode(struct inode
*inode
, int how
)
1316 spin_lock(&inode
->i_lock
);
1317 res
= nfs_scan_commit(inode
, &head
, 0, 0);
1318 spin_unlock(&inode
->i_lock
);
1320 int error
= nfs_commit_list(inode
, &head
, how
);
1327 static inline int nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1333 long nfs_sync_mapping_wait(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
)
1335 struct inode
*inode
= mapping
->host
;
1336 pgoff_t idx_start
, idx_end
;
1337 unsigned int npages
= 0;
1339 int nocommit
= how
& FLUSH_NOCOMMIT
;
1343 if (wbc
->range_cyclic
)
1346 idx_start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
1347 idx_end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
1348 if (idx_end
> idx_start
) {
1349 pgoff_t l_npages
= 1 + idx_end
- idx_start
;
1351 if (sizeof(npages
) != sizeof(l_npages
) &&
1352 (pgoff_t
)npages
!= l_npages
)
1356 how
&= ~FLUSH_NOCOMMIT
;
1357 spin_lock(&inode
->i_lock
);
1359 ret
= nfs_wait_on_requests_locked(inode
, idx_start
, npages
);
1364 pages
= nfs_scan_commit(inode
, &head
, idx_start
, npages
);
1367 if (how
& FLUSH_INVALIDATE
) {
1368 spin_unlock(&inode
->i_lock
);
1369 nfs_cancel_commit_list(&head
);
1371 spin_lock(&inode
->i_lock
);
1374 pages
+= nfs_scan_commit(inode
, &head
, 0, 0);
1375 spin_unlock(&inode
->i_lock
);
1376 ret
= nfs_commit_list(inode
, &head
, how
);
1377 spin_lock(&inode
->i_lock
);
1380 spin_unlock(&inode
->i_lock
);
1384 static int __nfs_write_mapping(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
)
1388 ret
= nfs_writepages(mapping
, wbc
);
1391 ret
= nfs_sync_mapping_wait(mapping
, wbc
, how
);
1396 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1400 /* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
1401 static int nfs_write_mapping(struct address_space
*mapping
, int how
)
1403 struct writeback_control wbc
= {
1404 .bdi
= mapping
->backing_dev_info
,
1405 .sync_mode
= WB_SYNC_NONE
,
1406 .nr_to_write
= LONG_MAX
,
1407 .for_writepages
= 1,
1412 ret
= __nfs_write_mapping(mapping
, &wbc
, how
);
1415 wbc
.sync_mode
= WB_SYNC_ALL
;
1416 return __nfs_write_mapping(mapping
, &wbc
, how
);
1420 * flush the inode to disk.
1422 int nfs_wb_all(struct inode
*inode
)
1424 return nfs_write_mapping(inode
->i_mapping
, 0);
1427 int nfs_wb_nocommit(struct inode
*inode
)
1429 return nfs_write_mapping(inode
->i_mapping
, FLUSH_NOCOMMIT
);
1432 int nfs_wb_page_cancel(struct inode
*inode
, struct page
*page
)
1434 struct nfs_page
*req
;
1435 loff_t range_start
= page_offset(page
);
1436 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
1437 struct writeback_control wbc
= {
1438 .bdi
= page
->mapping
->backing_dev_info
,
1439 .sync_mode
= WB_SYNC_ALL
,
1440 .nr_to_write
= LONG_MAX
,
1441 .range_start
= range_start
,
1442 .range_end
= range_end
,
1446 BUG_ON(!PageLocked(page
));
1448 req
= nfs_page_find_request(page
);
1451 if (test_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
1452 nfs_release_request(req
);
1455 if (nfs_lock_request_dontget(req
)) {
1456 nfs_inode_remove_request(req
);
1458 * In case nfs_inode_remove_request has marked the
1459 * page as being dirty
1461 cancel_dirty_page(page
, PAGE_CACHE_SIZE
);
1462 nfs_unlock_request(req
);
1465 ret
= nfs_wait_on_request(req
);
1469 if (!PagePrivate(page
))
1471 ret
= nfs_sync_mapping_wait(page
->mapping
, &wbc
, FLUSH_INVALIDATE
);
1476 static int nfs_wb_page_priority(struct inode
*inode
, struct page
*page
,
1479 loff_t range_start
= page_offset(page
);
1480 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
1481 struct writeback_control wbc
= {
1482 .bdi
= page
->mapping
->backing_dev_info
,
1483 .sync_mode
= WB_SYNC_ALL
,
1484 .nr_to_write
= LONG_MAX
,
1485 .range_start
= range_start
,
1486 .range_end
= range_end
,
1490 BUG_ON(!PageLocked(page
));
1491 if (clear_page_dirty_for_io(page
)) {
1492 ret
= nfs_writepage_locked(page
, &wbc
);
1496 if (!PagePrivate(page
))
1498 ret
= nfs_sync_mapping_wait(page
->mapping
, &wbc
, how
);
1502 __mark_inode_dirty(inode
, I_DIRTY_PAGES
);
1507 * Write back all requests on one page - we do this before reading it.
1509 int nfs_wb_page(struct inode
*inode
, struct page
* page
)
1511 return nfs_wb_page_priority(inode
, page
, FLUSH_STABLE
);
1514 int __init
nfs_init_writepagecache(void)
1516 nfs_wdata_cachep
= kmem_cache_create("nfs_write_data",
1517 sizeof(struct nfs_write_data
),
1518 0, SLAB_HWCACHE_ALIGN
,
1520 if (nfs_wdata_cachep
== NULL
)
1523 nfs_wdata_mempool
= mempool_create_slab_pool(MIN_POOL_WRITE
,
1525 if (nfs_wdata_mempool
== NULL
)
1528 nfs_commit_mempool
= mempool_create_slab_pool(MIN_POOL_COMMIT
,
1530 if (nfs_commit_mempool
== NULL
)
1534 * NFS congestion size, scale with available memory.
1546 * This allows larger machines to have larger/more transfers.
1547 * Limit the default to 256M
1549 nfs_congestion_kb
= (16*int_sqrt(totalram_pages
)) << (PAGE_SHIFT
-10);
1550 if (nfs_congestion_kb
> 256*1024)
1551 nfs_congestion_kb
= 256*1024;
1556 void nfs_destroy_writepagecache(void)
1558 mempool_destroy(nfs_commit_mempool
);
1559 mempool_destroy(nfs_wdata_mempool
);
1560 kmem_cache_destroy(nfs_wdata_cachep
);