WIP FPC-III support
[linux/fpc-iii.git] / fs / nfs / direct.c
blob2d30a4da49fa0dedd8959c3fc4545d7ab9076764
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/nfs/direct.c
5 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
7 * High-performance uncached I/O for the Linux NFS client
9 * There are important applications whose performance or correctness
10 * depends on uncached access to file data. Database clusters
11 * (multiple copies of the same instance running on separate hosts)
12 * implement their own cache coherency protocol that subsumes file
13 * system cache protocols. Applications that process datasets
14 * considerably larger than the client's memory do not always benefit
15 * from a local cache. A streaming video server, for instance, has no
16 * need to cache the contents of a file.
18 * When an application requests uncached I/O, all read and write requests
19 * are made directly to the server; data stored or fetched via these
20 * requests is not cached in the Linux page cache. The client does not
21 * correct unaligned requests from applications. All requested bytes are
22 * held on permanent storage before a direct write system call returns to
23 * an application.
25 * Solaris implements an uncached I/O facility called directio() that
26 * is used for backups and sequential I/O to very large files. Solaris
27 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
28 * an undocumented mount option.
30 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
31 * help from Andrew Morton.
33 * 18 Dec 2001 Initial implementation for 2.4 --cel
34 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
35 * 08 Jun 2003 Port to 2.5 APIs --cel
36 * 31 Mar 2004 Handle direct I/O without VFS support --cel
37 * 15 Sep 2004 Parallel async reads --cel
38 * 04 May 2005 support O_DIRECT with aio --cel
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/file.h>
46 #include <linux/pagemap.h>
47 #include <linux/kref.h>
48 #include <linux/slab.h>
49 #include <linux/task_io_accounting_ops.h>
50 #include <linux/module.h>
52 #include <linux/nfs_fs.h>
53 #include <linux/nfs_page.h>
54 #include <linux/sunrpc/clnt.h>
56 #include <linux/uaccess.h>
57 #include <linux/atomic.h>
59 #include "internal.h"
60 #include "iostat.h"
61 #include "pnfs.h"
63 #define NFSDBG_FACILITY NFSDBG_VFS
65 static struct kmem_cache *nfs_direct_cachep;
67 struct nfs_direct_req {
68 struct kref kref; /* release manager */
70 /* I/O parameters */
71 struct nfs_open_context *ctx; /* file open context info */
72 struct nfs_lock_context *l_ctx; /* Lock context info */
73 struct kiocb * iocb; /* controlling i/o request */
74 struct inode * inode; /* target file of i/o */
76 /* completion state */
77 atomic_t io_count; /* i/os we're waiting for */
78 spinlock_t lock; /* protect completion state */
80 loff_t io_start; /* Start offset for I/O */
81 ssize_t count, /* bytes actually processed */
82 max_count, /* max expected count */
83 bytes_left, /* bytes left to be sent */
84 error; /* any reported error */
85 struct completion completion; /* wait for i/o completion */
87 /* commit state */
88 struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
89 struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
90 struct work_struct work;
91 int flags;
92 /* for write */
93 #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
94 #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
95 /* for read */
96 #define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
97 #define NFS_ODIRECT_DONE INT_MAX /* write verification failed */
100 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
101 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
102 static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
103 static void nfs_direct_write_schedule_work(struct work_struct *work);
105 static inline void get_dreq(struct nfs_direct_req *dreq)
107 atomic_inc(&dreq->io_count);
110 static inline int put_dreq(struct nfs_direct_req *dreq)
112 return atomic_dec_and_test(&dreq->io_count);
115 static void
116 nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
117 const struct nfs_pgio_header *hdr,
118 ssize_t dreq_len)
120 if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
121 test_bit(NFS_IOHDR_EOF, &hdr->flags)))
122 return;
123 if (dreq->max_count >= dreq_len) {
124 dreq->max_count = dreq_len;
125 if (dreq->count > dreq_len)
126 dreq->count = dreq_len;
128 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
129 dreq->error = hdr->error;
130 else /* Clear outstanding error if this is EOF */
131 dreq->error = 0;
135 static void
136 nfs_direct_count_bytes(struct nfs_direct_req *dreq,
137 const struct nfs_pgio_header *hdr)
139 loff_t hdr_end = hdr->io_start + hdr->good_bytes;
140 ssize_t dreq_len = 0;
142 if (hdr_end > dreq->io_start)
143 dreq_len = hdr_end - dreq->io_start;
145 nfs_direct_handle_truncated(dreq, hdr, dreq_len);
147 if (dreq_len > dreq->max_count)
148 dreq_len = dreq->max_count;
150 if (dreq->count < dreq_len)
151 dreq->count = dreq_len;
155 * nfs_direct_IO - NFS address space operation for direct I/O
156 * @iocb: target I/O control block
157 * @iter: I/O buffer
159 * The presence of this routine in the address space ops vector means
160 * the NFS client supports direct I/O. However, for most direct IO, we
161 * shunt off direct read and write requests before the VFS gets them,
162 * so this method is only ever called for swap.
164 ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
166 struct inode *inode = iocb->ki_filp->f_mapping->host;
168 /* we only support swap file calling nfs_direct_IO */
169 if (!IS_SWAPFILE(inode))
170 return 0;
172 VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
174 if (iov_iter_rw(iter) == READ)
175 return nfs_file_direct_read(iocb, iter);
176 return nfs_file_direct_write(iocb, iter);
179 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
181 unsigned int i;
182 for (i = 0; i < npages; i++)
183 put_page(pages[i]);
186 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
187 struct nfs_direct_req *dreq)
189 cinfo->inode = dreq->inode;
190 cinfo->mds = &dreq->mds_cinfo;
191 cinfo->ds = &dreq->ds_cinfo;
192 cinfo->dreq = dreq;
193 cinfo->completion_ops = &nfs_direct_commit_completion_ops;
196 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
198 struct nfs_direct_req *dreq;
200 dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
201 if (!dreq)
202 return NULL;
204 kref_init(&dreq->kref);
205 kref_get(&dreq->kref);
206 init_completion(&dreq->completion);
207 INIT_LIST_HEAD(&dreq->mds_cinfo.list);
208 pnfs_init_ds_commit_info(&dreq->ds_cinfo);
209 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
210 spin_lock_init(&dreq->lock);
212 return dreq;
215 static void nfs_direct_req_free(struct kref *kref)
217 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
219 pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
220 if (dreq->l_ctx != NULL)
221 nfs_put_lock_context(dreq->l_ctx);
222 if (dreq->ctx != NULL)
223 put_nfs_open_context(dreq->ctx);
224 kmem_cache_free(nfs_direct_cachep, dreq);
227 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
229 kref_put(&dreq->kref, nfs_direct_req_free);
232 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
234 return dreq->bytes_left;
236 EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
239 * Collects and returns the final error value/byte-count.
241 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
243 ssize_t result = -EIOCBQUEUED;
245 /* Async requests don't wait here */
246 if (dreq->iocb)
247 goto out;
249 result = wait_for_completion_killable(&dreq->completion);
251 if (!result) {
252 result = dreq->count;
253 WARN_ON_ONCE(dreq->count < 0);
255 if (!result)
256 result = dreq->error;
258 out:
259 return (ssize_t) result;
263 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
264 * the iocb is still valid here if this is a synchronous request.
266 static void nfs_direct_complete(struct nfs_direct_req *dreq)
268 struct inode *inode = dreq->inode;
270 inode_dio_end(inode);
272 if (dreq->iocb) {
273 long res = (long) dreq->error;
274 if (dreq->count != 0) {
275 res = (long) dreq->count;
276 WARN_ON_ONCE(dreq->count < 0);
278 dreq->iocb->ki_complete(dreq->iocb, res, 0);
281 complete(&dreq->completion);
283 nfs_direct_req_release(dreq);
286 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
288 unsigned long bytes = 0;
289 struct nfs_direct_req *dreq = hdr->dreq;
291 spin_lock(&dreq->lock);
292 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
293 spin_unlock(&dreq->lock);
294 goto out_put;
297 nfs_direct_count_bytes(dreq, hdr);
298 spin_unlock(&dreq->lock);
300 while (!list_empty(&hdr->pages)) {
301 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
302 struct page *page = req->wb_page;
304 if (!PageCompound(page) && bytes < hdr->good_bytes &&
305 (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
306 set_page_dirty(page);
307 bytes += req->wb_bytes;
308 nfs_list_remove_request(req);
309 nfs_release_request(req);
311 out_put:
312 if (put_dreq(dreq))
313 nfs_direct_complete(dreq);
314 hdr->release(hdr);
317 static void nfs_read_sync_pgio_error(struct list_head *head, int error)
319 struct nfs_page *req;
321 while (!list_empty(head)) {
322 req = nfs_list_entry(head->next);
323 nfs_list_remove_request(req);
324 nfs_release_request(req);
328 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
330 get_dreq(hdr->dreq);
333 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
334 .error_cleanup = nfs_read_sync_pgio_error,
335 .init_hdr = nfs_direct_pgio_init,
336 .completion = nfs_direct_read_completion,
340 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
341 * operation. If nfs_readdata_alloc() or get_user_pages() fails,
342 * bail and stop sending more reads. Read length accounting is
343 * handled automatically by nfs_direct_read_result(). Otherwise, if
344 * no requests have been sent, just return an error.
347 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
348 struct iov_iter *iter,
349 loff_t pos)
351 struct nfs_pageio_descriptor desc;
352 struct inode *inode = dreq->inode;
353 ssize_t result = -EINVAL;
354 size_t requested_bytes = 0;
355 size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
357 nfs_pageio_init_read(&desc, dreq->inode, false,
358 &nfs_direct_read_completion_ops);
359 get_dreq(dreq);
360 desc.pg_dreq = dreq;
361 inode_dio_begin(inode);
363 while (iov_iter_count(iter)) {
364 struct page **pagevec;
365 size_t bytes;
366 size_t pgbase;
367 unsigned npages, i;
369 result = iov_iter_get_pages_alloc(iter, &pagevec,
370 rsize, &pgbase);
371 if (result < 0)
372 break;
374 bytes = result;
375 iov_iter_advance(iter, bytes);
376 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
377 for (i = 0; i < npages; i++) {
378 struct nfs_page *req;
379 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
380 /* XXX do we need to do the eof zeroing found in async_filler? */
381 req = nfs_create_request(dreq->ctx, pagevec[i],
382 pgbase, req_len);
383 if (IS_ERR(req)) {
384 result = PTR_ERR(req);
385 break;
387 req->wb_index = pos >> PAGE_SHIFT;
388 req->wb_offset = pos & ~PAGE_MASK;
389 if (!nfs_pageio_add_request(&desc, req)) {
390 result = desc.pg_error;
391 nfs_release_request(req);
392 break;
394 pgbase = 0;
395 bytes -= req_len;
396 requested_bytes += req_len;
397 pos += req_len;
398 dreq->bytes_left -= req_len;
400 nfs_direct_release_pages(pagevec, npages);
401 kvfree(pagevec);
402 if (result < 0)
403 break;
406 nfs_pageio_complete(&desc);
409 * If no bytes were started, return the error, and let the
410 * generic layer handle the completion.
412 if (requested_bytes == 0) {
413 inode_dio_end(inode);
414 nfs_direct_req_release(dreq);
415 return result < 0 ? result : -EIO;
418 if (put_dreq(dreq))
419 nfs_direct_complete(dreq);
420 return requested_bytes;
424 * nfs_file_direct_read - file direct read operation for NFS files
425 * @iocb: target I/O control block
426 * @iter: vector of user buffers into which to read data
428 * We use this function for direct reads instead of calling
429 * generic_file_aio_read() in order to avoid gfar's check to see if
430 * the request starts before the end of the file. For that check
431 * to work, we must generate a GETATTR before each direct read, and
432 * even then there is a window between the GETATTR and the subsequent
433 * READ where the file size could change. Our preference is simply
434 * to do all reads the application wants, and the server will take
435 * care of managing the end of file boundary.
437 * This function also eliminates unnecessarily updating the file's
438 * atime locally, as the NFS server sets the file's atime, and this
439 * client must read the updated atime from the server back into its
440 * cache.
442 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
444 struct file *file = iocb->ki_filp;
445 struct address_space *mapping = file->f_mapping;
446 struct inode *inode = mapping->host;
447 struct nfs_direct_req *dreq;
448 struct nfs_lock_context *l_ctx;
449 ssize_t result, requested;
450 size_t count = iov_iter_count(iter);
451 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
453 dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
454 file, count, (long long) iocb->ki_pos);
456 result = 0;
457 if (!count)
458 goto out;
460 task_io_account_read(count);
462 result = -ENOMEM;
463 dreq = nfs_direct_req_alloc();
464 if (dreq == NULL)
465 goto out;
467 dreq->inode = inode;
468 dreq->bytes_left = dreq->max_count = count;
469 dreq->io_start = iocb->ki_pos;
470 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
471 l_ctx = nfs_get_lock_context(dreq->ctx);
472 if (IS_ERR(l_ctx)) {
473 result = PTR_ERR(l_ctx);
474 nfs_direct_req_release(dreq);
475 goto out_release;
477 dreq->l_ctx = l_ctx;
478 if (!is_sync_kiocb(iocb))
479 dreq->iocb = iocb;
481 if (iter_is_iovec(iter))
482 dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
484 nfs_start_io_direct(inode);
486 NFS_I(inode)->read_io += count;
487 requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
489 nfs_end_io_direct(inode);
491 if (requested > 0) {
492 result = nfs_direct_wait(dreq);
493 if (result > 0) {
494 requested -= result;
495 iocb->ki_pos += result;
497 iov_iter_revert(iter, requested);
498 } else {
499 result = requested;
502 out_release:
503 nfs_direct_req_release(dreq);
504 out:
505 return result;
508 static void
509 nfs_direct_join_group(struct list_head *list, struct inode *inode)
511 struct nfs_page *req, *next;
513 list_for_each_entry(req, list, wb_list) {
514 if (req->wb_head != req || req->wb_this_page == req)
515 continue;
516 for (next = req->wb_this_page;
517 next != req->wb_head;
518 next = next->wb_this_page) {
519 nfs_list_remove_request(next);
520 nfs_release_request(next);
522 nfs_join_page_group(req, inode);
526 static void
527 nfs_direct_write_scan_commit_list(struct inode *inode,
528 struct list_head *list,
529 struct nfs_commit_info *cinfo)
531 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
532 pnfs_recover_commit_reqs(list, cinfo);
533 nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
534 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
537 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
539 struct nfs_pageio_descriptor desc;
540 struct nfs_page *req, *tmp;
541 LIST_HEAD(reqs);
542 struct nfs_commit_info cinfo;
543 LIST_HEAD(failed);
545 nfs_init_cinfo_from_dreq(&cinfo, dreq);
546 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
548 nfs_direct_join_group(&reqs, dreq->inode);
550 dreq->count = 0;
551 dreq->max_count = 0;
552 list_for_each_entry(req, &reqs, wb_list)
553 dreq->max_count += req->wb_bytes;
554 nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
555 get_dreq(dreq);
557 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
558 &nfs_direct_write_completion_ops);
559 desc.pg_dreq = dreq;
561 list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
562 /* Bump the transmission count */
563 req->wb_nio++;
564 if (!nfs_pageio_add_request(&desc, req)) {
565 nfs_list_move_request(req, &failed);
566 spin_lock(&cinfo.inode->i_lock);
567 dreq->flags = 0;
568 if (desc.pg_error < 0)
569 dreq->error = desc.pg_error;
570 else
571 dreq->error = -EIO;
572 spin_unlock(&cinfo.inode->i_lock);
574 nfs_release_request(req);
576 nfs_pageio_complete(&desc);
578 while (!list_empty(&failed)) {
579 req = nfs_list_entry(failed.next);
580 nfs_list_remove_request(req);
581 nfs_unlock_and_release_request(req);
584 if (put_dreq(dreq))
585 nfs_direct_write_complete(dreq);
588 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
590 const struct nfs_writeverf *verf = data->res.verf;
591 struct nfs_direct_req *dreq = data->dreq;
592 struct nfs_commit_info cinfo;
593 struct nfs_page *req;
594 int status = data->task.tk_status;
596 if (status < 0) {
597 /* Errors in commit are fatal */
598 dreq->error = status;
599 dreq->max_count = 0;
600 dreq->count = 0;
601 dreq->flags = NFS_ODIRECT_DONE;
602 } else if (dreq->flags == NFS_ODIRECT_DONE)
603 status = dreq->error;
605 nfs_init_cinfo_from_dreq(&cinfo, dreq);
607 while (!list_empty(&data->pages)) {
608 req = nfs_list_entry(data->pages.next);
609 nfs_list_remove_request(req);
610 if (status >= 0 && !nfs_write_match_verf(verf, req)) {
611 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
613 * Despite the reboot, the write was successful,
614 * so reset wb_nio.
616 req->wb_nio = 0;
617 nfs_mark_request_commit(req, NULL, &cinfo, 0);
618 } else /* Error or match */
619 nfs_release_request(req);
620 nfs_unlock_and_release_request(req);
623 if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
624 nfs_direct_write_complete(dreq);
627 static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
628 struct nfs_page *req)
630 struct nfs_direct_req *dreq = cinfo->dreq;
632 spin_lock(&dreq->lock);
633 if (dreq->flags != NFS_ODIRECT_DONE)
634 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
635 spin_unlock(&dreq->lock);
636 nfs_mark_request_commit(req, NULL, cinfo, 0);
639 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
640 .completion = nfs_direct_commit_complete,
641 .resched_write = nfs_direct_resched_write,
644 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
646 int res;
647 struct nfs_commit_info cinfo;
648 LIST_HEAD(mds_list);
650 nfs_init_cinfo_from_dreq(&cinfo, dreq);
651 nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
652 res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
653 if (res < 0) /* res == -ENOMEM */
654 nfs_direct_write_reschedule(dreq);
657 static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
659 struct nfs_commit_info cinfo;
660 struct nfs_page *req;
661 LIST_HEAD(reqs);
663 nfs_init_cinfo_from_dreq(&cinfo, dreq);
664 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
666 while (!list_empty(&reqs)) {
667 req = nfs_list_entry(reqs.next);
668 nfs_list_remove_request(req);
669 nfs_release_request(req);
670 nfs_unlock_and_release_request(req);
674 static void nfs_direct_write_schedule_work(struct work_struct *work)
676 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
677 int flags = dreq->flags;
679 dreq->flags = 0;
680 switch (flags) {
681 case NFS_ODIRECT_DO_COMMIT:
682 nfs_direct_commit_schedule(dreq);
683 break;
684 case NFS_ODIRECT_RESCHED_WRITES:
685 nfs_direct_write_reschedule(dreq);
686 break;
687 default:
688 nfs_direct_write_clear_reqs(dreq);
689 nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
690 nfs_direct_complete(dreq);
694 static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
696 queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
699 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
701 struct nfs_direct_req *dreq = hdr->dreq;
702 struct nfs_commit_info cinfo;
703 bool request_commit = false;
704 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
706 nfs_init_cinfo_from_dreq(&cinfo, dreq);
708 spin_lock(&dreq->lock);
709 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
710 spin_unlock(&dreq->lock);
711 goto out_put;
714 nfs_direct_count_bytes(dreq, hdr);
715 if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) {
716 switch (dreq->flags) {
717 case 0:
718 dreq->flags = NFS_ODIRECT_DO_COMMIT;
719 request_commit = true;
720 break;
721 case NFS_ODIRECT_RESCHED_WRITES:
722 case NFS_ODIRECT_DO_COMMIT:
723 request_commit = true;
726 spin_unlock(&dreq->lock);
728 while (!list_empty(&hdr->pages)) {
730 req = nfs_list_entry(hdr->pages.next);
731 nfs_list_remove_request(req);
732 if (request_commit) {
733 kref_get(&req->wb_kref);
734 memcpy(&req->wb_verf, &hdr->verf.verifier,
735 sizeof(req->wb_verf));
736 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
737 hdr->ds_commit_idx);
739 nfs_unlock_and_release_request(req);
742 out_put:
743 if (put_dreq(dreq))
744 nfs_direct_write_complete(dreq);
745 hdr->release(hdr);
748 static void nfs_write_sync_pgio_error(struct list_head *head, int error)
750 struct nfs_page *req;
752 while (!list_empty(head)) {
753 req = nfs_list_entry(head->next);
754 nfs_list_remove_request(req);
755 nfs_unlock_and_release_request(req);
759 static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
761 struct nfs_direct_req *dreq = hdr->dreq;
763 spin_lock(&dreq->lock);
764 if (dreq->error == 0) {
765 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
766 /* fake unstable write to let common nfs resend pages */
767 hdr->verf.committed = NFS_UNSTABLE;
768 hdr->good_bytes = hdr->args.offset + hdr->args.count -
769 hdr->io_start;
771 spin_unlock(&dreq->lock);
774 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
775 .error_cleanup = nfs_write_sync_pgio_error,
776 .init_hdr = nfs_direct_pgio_init,
777 .completion = nfs_direct_write_completion,
778 .reschedule_io = nfs_direct_write_reschedule_io,
783 * NB: Return the value of the first error return code. Subsequent
784 * errors after the first one are ignored.
787 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
788 * operation. If nfs_writedata_alloc() or get_user_pages() fails,
789 * bail and stop sending more writes. Write length accounting is
790 * handled automatically by nfs_direct_write_result(). Otherwise, if
791 * no requests have been sent, just return an error.
793 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
794 struct iov_iter *iter,
795 loff_t pos)
797 struct nfs_pageio_descriptor desc;
798 struct inode *inode = dreq->inode;
799 ssize_t result = 0;
800 size_t requested_bytes = 0;
801 size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
803 nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
804 &nfs_direct_write_completion_ops);
805 desc.pg_dreq = dreq;
806 get_dreq(dreq);
807 inode_dio_begin(inode);
809 NFS_I(inode)->write_io += iov_iter_count(iter);
810 while (iov_iter_count(iter)) {
811 struct page **pagevec;
812 size_t bytes;
813 size_t pgbase;
814 unsigned npages, i;
816 result = iov_iter_get_pages_alloc(iter, &pagevec,
817 wsize, &pgbase);
818 if (result < 0)
819 break;
821 bytes = result;
822 iov_iter_advance(iter, bytes);
823 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
824 for (i = 0; i < npages; i++) {
825 struct nfs_page *req;
826 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
828 req = nfs_create_request(dreq->ctx, pagevec[i],
829 pgbase, req_len);
830 if (IS_ERR(req)) {
831 result = PTR_ERR(req);
832 break;
835 if (desc.pg_error < 0) {
836 nfs_free_request(req);
837 result = desc.pg_error;
838 break;
841 nfs_lock_request(req);
842 req->wb_index = pos >> PAGE_SHIFT;
843 req->wb_offset = pos & ~PAGE_MASK;
844 if (!nfs_pageio_add_request(&desc, req)) {
845 result = desc.pg_error;
846 nfs_unlock_and_release_request(req);
847 break;
849 pgbase = 0;
850 bytes -= req_len;
851 requested_bytes += req_len;
852 pos += req_len;
853 dreq->bytes_left -= req_len;
855 nfs_direct_release_pages(pagevec, npages);
856 kvfree(pagevec);
857 if (result < 0)
858 break;
860 nfs_pageio_complete(&desc);
863 * If no bytes were started, return the error, and let the
864 * generic layer handle the completion.
866 if (requested_bytes == 0) {
867 inode_dio_end(inode);
868 nfs_direct_req_release(dreq);
869 return result < 0 ? result : -EIO;
872 if (put_dreq(dreq))
873 nfs_direct_write_complete(dreq);
874 return requested_bytes;
878 * nfs_file_direct_write - file direct write operation for NFS files
879 * @iocb: target I/O control block
880 * @iter: vector of user buffers from which to write data
882 * We use this function for direct writes instead of calling
883 * generic_file_aio_write() in order to avoid taking the inode
884 * semaphore and updating the i_size. The NFS server will set
885 * the new i_size and this client must read the updated size
886 * back into its cache. We let the server do generic write
887 * parameter checking and report problems.
889 * We eliminate local atime updates, see direct read above.
891 * We avoid unnecessary page cache invalidations for normal cached
892 * readers of this file.
894 * Note that O_APPEND is not supported for NFS direct writes, as there
895 * is no atomic O_APPEND write facility in the NFS protocol.
897 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
899 ssize_t result, requested;
900 size_t count;
901 struct file *file = iocb->ki_filp;
902 struct address_space *mapping = file->f_mapping;
903 struct inode *inode = mapping->host;
904 struct nfs_direct_req *dreq;
905 struct nfs_lock_context *l_ctx;
906 loff_t pos, end;
908 dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
909 file, iov_iter_count(iter), (long long) iocb->ki_pos);
911 result = generic_write_checks(iocb, iter);
912 if (result <= 0)
913 return result;
914 count = result;
915 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
917 pos = iocb->ki_pos;
918 end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
920 task_io_account_write(count);
922 result = -ENOMEM;
923 dreq = nfs_direct_req_alloc();
924 if (!dreq)
925 goto out;
927 dreq->inode = inode;
928 dreq->bytes_left = dreq->max_count = count;
929 dreq->io_start = pos;
930 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
931 l_ctx = nfs_get_lock_context(dreq->ctx);
932 if (IS_ERR(l_ctx)) {
933 result = PTR_ERR(l_ctx);
934 nfs_direct_req_release(dreq);
935 goto out_release;
937 dreq->l_ctx = l_ctx;
938 if (!is_sync_kiocb(iocb))
939 dreq->iocb = iocb;
940 pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
942 nfs_start_io_direct(inode);
944 requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
946 if (mapping->nrpages) {
947 invalidate_inode_pages2_range(mapping,
948 pos >> PAGE_SHIFT, end);
951 nfs_end_io_direct(inode);
953 if (requested > 0) {
954 result = nfs_direct_wait(dreq);
955 if (result > 0) {
956 requested -= result;
957 iocb->ki_pos = pos + result;
958 /* XXX: should check the generic_write_sync retval */
959 generic_write_sync(iocb, result);
961 iov_iter_revert(iter, requested);
962 } else {
963 result = requested;
965 out_release:
966 nfs_direct_req_release(dreq);
967 out:
968 return result;
972 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
975 int __init nfs_init_directcache(void)
977 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
978 sizeof(struct nfs_direct_req),
979 0, (SLAB_RECLAIM_ACCOUNT|
980 SLAB_MEM_SPREAD),
981 NULL);
982 if (nfs_direct_cachep == NULL)
983 return -ENOMEM;
985 return 0;
989 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
992 void nfs_destroy_directcache(void)
994 kmem_cache_destroy(nfs_direct_cachep);