2 * linux/fs/nfs/pagelist.c
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs.h>
17 #include <linux/nfs3.h>
18 #include <linux/nfs4.h>
19 #include <linux/nfs_page.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/export.h>
27 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
29 static struct kmem_cache
*nfs_page_cachep
;
30 static const struct rpc_call_ops nfs_pgio_common_ops
;
32 static bool nfs_pgarray_set(struct nfs_page_array
*p
, unsigned int pagecount
)
34 p
->npages
= pagecount
;
35 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
36 p
->pagevec
= p
->page_array
;
38 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_KERNEL
);
42 return p
->pagevec
!= NULL
;
45 void nfs_pgheader_init(struct nfs_pageio_descriptor
*desc
,
46 struct nfs_pgio_header
*hdr
,
47 void (*release
)(struct nfs_pgio_header
*hdr
))
49 hdr
->req
= nfs_list_entry(desc
->pg_list
.next
);
50 hdr
->inode
= desc
->pg_inode
;
51 hdr
->cred
= hdr
->req
->wb_context
->cred
;
52 hdr
->io_start
= req_offset(hdr
->req
);
53 hdr
->good_bytes
= desc
->pg_count
;
54 hdr
->dreq
= desc
->pg_dreq
;
55 hdr
->layout_private
= desc
->pg_layout_private
;
56 hdr
->release
= release
;
57 hdr
->completion_ops
= desc
->pg_completion_ops
;
58 if (hdr
->completion_ops
->init_hdr
)
59 hdr
->completion_ops
->init_hdr(hdr
);
61 EXPORT_SYMBOL_GPL(nfs_pgheader_init
);
63 void nfs_set_pgio_error(struct nfs_pgio_header
*hdr
, int error
, loff_t pos
)
65 spin_lock(&hdr
->lock
);
66 if (pos
< hdr
->io_start
+ hdr
->good_bytes
) {
67 set_bit(NFS_IOHDR_ERROR
, &hdr
->flags
);
68 clear_bit(NFS_IOHDR_EOF
, &hdr
->flags
);
69 hdr
->good_bytes
= pos
- hdr
->io_start
;
72 spin_unlock(&hdr
->lock
);
75 static inline struct nfs_page
*
78 struct nfs_page
*p
= kmem_cache_zalloc(nfs_page_cachep
, GFP_NOIO
);
80 INIT_LIST_HEAD(&p
->wb_list
);
85 nfs_page_free(struct nfs_page
*p
)
87 kmem_cache_free(nfs_page_cachep
, p
);
91 nfs_iocounter_inc(struct nfs_io_counter
*c
)
93 atomic_inc(&c
->io_count
);
97 nfs_iocounter_dec(struct nfs_io_counter
*c
)
99 if (atomic_dec_and_test(&c
->io_count
)) {
100 clear_bit(NFS_IO_INPROGRESS
, &c
->flags
);
101 smp_mb__after_atomic();
102 wake_up_bit(&c
->flags
, NFS_IO_INPROGRESS
);
107 __nfs_iocounter_wait(struct nfs_io_counter
*c
)
109 wait_queue_head_t
*wq
= bit_waitqueue(&c
->flags
, NFS_IO_INPROGRESS
);
110 DEFINE_WAIT_BIT(q
, &c
->flags
, NFS_IO_INPROGRESS
);
114 prepare_to_wait(wq
, &q
.wait
, TASK_KILLABLE
);
115 set_bit(NFS_IO_INPROGRESS
, &c
->flags
);
116 if (atomic_read(&c
->io_count
) == 0)
118 ret
= nfs_wait_bit_killable(&q
.key
);
119 } while (atomic_read(&c
->io_count
) != 0 && !ret
);
120 finish_wait(wq
, &q
.wait
);
125 * nfs_iocounter_wait - wait for i/o to complete
126 * @c: nfs_io_counter to use
128 * returns -ERESTARTSYS if interrupted by a fatal signal.
129 * Otherwise returns 0 once the io_count hits 0.
132 nfs_iocounter_wait(struct nfs_io_counter
*c
)
134 if (atomic_read(&c
->io_count
) == 0)
136 return __nfs_iocounter_wait(c
);
140 * nfs_page_group_lock - lock the head of the page group
141 * @req - request in group that is to be locked
142 * @nonblock - if true don't block waiting for lock
144 * this lock must be held if modifying the page group list
146 * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
147 * result from wait_on_bit_lock
149 * NOTE: calling with nonblock=false should always have set the
150 * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
151 * with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
154 nfs_page_group_lock(struct nfs_page
*req
, bool nonblock
)
156 struct nfs_page
*head
= req
->wb_head
;
158 WARN_ON_ONCE(head
!= head
->wb_head
);
160 if (!test_and_set_bit(PG_HEADLOCK
, &head
->wb_flags
))
164 return wait_on_bit_lock(&head
->wb_flags
, PG_HEADLOCK
,
165 TASK_UNINTERRUPTIBLE
);
171 * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
172 * @req - a request in the group
174 * This is a blocking call to wait for the group lock to be cleared.
177 nfs_page_group_lock_wait(struct nfs_page
*req
)
179 struct nfs_page
*head
= req
->wb_head
;
181 WARN_ON_ONCE(head
!= head
->wb_head
);
183 wait_on_bit(&head
->wb_flags
, PG_HEADLOCK
,
184 TASK_UNINTERRUPTIBLE
);
188 * nfs_page_group_unlock - unlock the head of the page group
189 * @req - request in group that is to be unlocked
192 nfs_page_group_unlock(struct nfs_page
*req
)
194 struct nfs_page
*head
= req
->wb_head
;
196 WARN_ON_ONCE(head
!= head
->wb_head
);
198 smp_mb__before_atomic();
199 clear_bit(PG_HEADLOCK
, &head
->wb_flags
);
200 smp_mb__after_atomic();
201 wake_up_bit(&head
->wb_flags
, PG_HEADLOCK
);
205 * nfs_page_group_sync_on_bit_locked
207 * must be called with page group lock held
210 nfs_page_group_sync_on_bit_locked(struct nfs_page
*req
, unsigned int bit
)
212 struct nfs_page
*head
= req
->wb_head
;
213 struct nfs_page
*tmp
;
215 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &head
->wb_flags
));
216 WARN_ON_ONCE(test_and_set_bit(bit
, &req
->wb_flags
));
218 tmp
= req
->wb_this_page
;
220 if (!test_bit(bit
, &tmp
->wb_flags
))
222 tmp
= tmp
->wb_this_page
;
225 /* true! reset all bits */
228 clear_bit(bit
, &tmp
->wb_flags
);
229 tmp
= tmp
->wb_this_page
;
230 } while (tmp
!= req
);
236 * nfs_page_group_sync_on_bit - set bit on current request, but only
237 * return true if the bit is set for all requests in page group
238 * @req - request in page group
239 * @bit - PG_* bit that is used to sync page group
241 bool nfs_page_group_sync_on_bit(struct nfs_page
*req
, unsigned int bit
)
245 nfs_page_group_lock(req
, false);
246 ret
= nfs_page_group_sync_on_bit_locked(req
, bit
);
247 nfs_page_group_unlock(req
);
253 * nfs_page_group_init - Initialize the page group linkage for @req
254 * @req - a new nfs request
255 * @prev - the previous request in page group, or NULL if @req is the first
256 * or only request in the group (the head).
259 nfs_page_group_init(struct nfs_page
*req
, struct nfs_page
*prev
)
261 WARN_ON_ONCE(prev
== req
);
266 req
->wb_this_page
= req
;
269 WARN_ON_ONCE(prev
->wb_this_page
!= prev
->wb_head
);
270 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &prev
->wb_head
->wb_flags
));
271 req
->wb_head
= prev
->wb_head
;
272 req
->wb_this_page
= prev
->wb_this_page
;
273 prev
->wb_this_page
= req
;
275 /* All subrequests take a ref on the head request until
276 * nfs_page_group_destroy is called */
277 kref_get(&req
->wb_head
->wb_kref
);
279 /* grab extra ref if head request has extra ref from
280 * the write/commit path to handle handoff between write
281 * and commit lists */
282 if (test_bit(PG_INODE_REF
, &prev
->wb_head
->wb_flags
)) {
283 set_bit(PG_INODE_REF
, &req
->wb_flags
);
284 kref_get(&req
->wb_kref
);
290 * nfs_page_group_destroy - sync the destruction of page groups
291 * @req - request that no longer needs the page group
293 * releases the page group reference from each member once all
294 * members have called this function.
297 nfs_page_group_destroy(struct kref
*kref
)
299 struct nfs_page
*req
= container_of(kref
, struct nfs_page
, wb_kref
);
300 struct nfs_page
*tmp
, *next
;
302 /* subrequests must release the ref on the head request */
303 if (req
->wb_head
!= req
)
304 nfs_release_request(req
->wb_head
);
306 if (!nfs_page_group_sync_on_bit(req
, PG_TEARDOWN
))
311 next
= tmp
->wb_this_page
;
312 /* unlink and free */
313 tmp
->wb_this_page
= tmp
;
315 nfs_free_request(tmp
);
317 } while (tmp
!= req
);
321 * nfs_create_request - Create an NFS read/write request.
322 * @ctx: open context to use
323 * @page: page to write
324 * @last: last nfs request created for this page group or NULL if head
325 * @offset: starting offset within the page for the write
326 * @count: number of bytes to read/write
328 * The page must be locked by the caller. This makes sure we never
329 * create two different requests for the same page.
330 * User should ensure it is safe to sleep in this function.
333 nfs_create_request(struct nfs_open_context
*ctx
, struct page
*page
,
334 struct nfs_page
*last
, unsigned int offset
,
337 struct nfs_page
*req
;
338 struct nfs_lock_context
*l_ctx
;
340 if (test_bit(NFS_CONTEXT_BAD
, &ctx
->flags
))
341 return ERR_PTR(-EBADF
);
342 /* try to allocate the request struct */
343 req
= nfs_page_alloc();
345 return ERR_PTR(-ENOMEM
);
347 /* get lock context early so we can deal with alloc failures */
348 l_ctx
= nfs_get_lock_context(ctx
);
351 return ERR_CAST(l_ctx
);
353 req
->wb_lock_context
= l_ctx
;
354 nfs_iocounter_inc(&l_ctx
->io_count
);
356 /* Initialize the request struct. Initially, we assume a
357 * long write-back delay. This will be adjusted in
358 * update_nfs_request below if the region is not locked. */
360 req
->wb_index
= page_file_index(page
);
361 page_cache_get(page
);
362 req
->wb_offset
= offset
;
363 req
->wb_pgbase
= offset
;
364 req
->wb_bytes
= count
;
365 req
->wb_context
= get_nfs_open_context(ctx
);
366 kref_init(&req
->wb_kref
);
367 nfs_page_group_init(req
, last
);
372 * nfs_unlock_request - Unlock request and wake up sleepers.
375 void nfs_unlock_request(struct nfs_page
*req
)
377 if (!NFS_WBACK_BUSY(req
)) {
378 printk(KERN_ERR
"NFS: Invalid unlock attempted\n");
381 smp_mb__before_atomic();
382 clear_bit(PG_BUSY
, &req
->wb_flags
);
383 smp_mb__after_atomic();
384 wake_up_bit(&req
->wb_flags
, PG_BUSY
);
388 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
391 void nfs_unlock_and_release_request(struct nfs_page
*req
)
393 nfs_unlock_request(req
);
394 nfs_release_request(req
);
398 * nfs_clear_request - Free up all resources allocated to the request
401 * Release page and open context resources associated with a read/write
402 * request after it has completed.
404 static void nfs_clear_request(struct nfs_page
*req
)
406 struct page
*page
= req
->wb_page
;
407 struct nfs_open_context
*ctx
= req
->wb_context
;
408 struct nfs_lock_context
*l_ctx
= req
->wb_lock_context
;
411 page_cache_release(page
);
415 nfs_iocounter_dec(&l_ctx
->io_count
);
416 nfs_put_lock_context(l_ctx
);
417 req
->wb_lock_context
= NULL
;
420 put_nfs_open_context(ctx
);
421 req
->wb_context
= NULL
;
426 * nfs_release_request - Release the count on an NFS read/write request
427 * @req: request to release
429 * Note: Should never be called with the spinlock held!
431 void nfs_free_request(struct nfs_page
*req
)
433 WARN_ON_ONCE(req
->wb_this_page
!= req
);
435 /* extra debug: make sure no sync bits are still set */
436 WARN_ON_ONCE(test_bit(PG_TEARDOWN
, &req
->wb_flags
));
437 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE
, &req
->wb_flags
));
438 WARN_ON_ONCE(test_bit(PG_UPTODATE
, &req
->wb_flags
));
439 WARN_ON_ONCE(test_bit(PG_WB_END
, &req
->wb_flags
));
440 WARN_ON_ONCE(test_bit(PG_REMOVE
, &req
->wb_flags
));
442 /* Release struct file and open context */
443 nfs_clear_request(req
);
447 void nfs_release_request(struct nfs_page
*req
)
449 kref_put(&req
->wb_kref
, nfs_page_group_destroy
);
453 * nfs_wait_on_request - Wait for a request to complete.
454 * @req: request to wait upon.
456 * Interruptible by fatal signals only.
457 * The user is responsible for holding a count on the request.
460 nfs_wait_on_request(struct nfs_page
*req
)
462 return wait_on_bit_io(&req
->wb_flags
, PG_BUSY
,
463 TASK_UNINTERRUPTIBLE
);
467 * nfs_generic_pg_test - determine if requests can be coalesced
468 * @desc: pointer to descriptor
469 * @prev: previous request in desc, or NULL
472 * Returns zero if @req can be coalesced into @desc, otherwise it returns
473 * the size of the request.
475 size_t nfs_generic_pg_test(struct nfs_pageio_descriptor
*desc
,
476 struct nfs_page
*prev
, struct nfs_page
*req
)
478 if (desc
->pg_count
> desc
->pg_bsize
) {
479 /* should never happen */
485 * Limit the request size so that we can still allocate a page array
486 * for it without upsetting the slab allocator.
488 if (((desc
->pg_count
+ req
->wb_bytes
) >> PAGE_SHIFT
) *
489 sizeof(struct page
) > PAGE_SIZE
)
492 return min(desc
->pg_bsize
- desc
->pg_count
, (size_t)req
->wb_bytes
);
494 EXPORT_SYMBOL_GPL(nfs_generic_pg_test
);
496 struct nfs_pgio_header
*nfs_pgio_header_alloc(const struct nfs_rw_ops
*ops
)
498 struct nfs_pgio_header
*hdr
= ops
->rw_alloc_header();
501 INIT_LIST_HEAD(&hdr
->pages
);
502 spin_lock_init(&hdr
->lock
);
507 EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc
);
510 * nfs_pgio_header_free - Free a read or write header
511 * @hdr: The header to free
513 void nfs_pgio_header_free(struct nfs_pgio_header
*hdr
)
515 hdr
->rw_ops
->rw_free_header(hdr
);
517 EXPORT_SYMBOL_GPL(nfs_pgio_header_free
);
520 * nfs_pgio_data_destroy - make @hdr suitable for reuse
522 * Frees memory and releases refs from nfs_generic_pgio, so that it may
525 * @hdr: A header that has had nfs_generic_pgio called
527 void nfs_pgio_data_destroy(struct nfs_pgio_header
*hdr
)
529 if (hdr
->args
.context
)
530 put_nfs_open_context(hdr
->args
.context
);
531 if (hdr
->page_array
.pagevec
!= hdr
->page_array
.page_array
)
532 kfree(hdr
->page_array
.pagevec
);
534 EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy
);
537 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
538 * @hdr: The pageio hdr
539 * @count: Number of bytes to read
540 * @offset: Initial offset
541 * @how: How to commit data (writes only)
542 * @cinfo: Commit information for the call (writes only)
544 static void nfs_pgio_rpcsetup(struct nfs_pgio_header
*hdr
,
545 unsigned int count
, unsigned int offset
,
546 int how
, struct nfs_commit_info
*cinfo
)
548 struct nfs_page
*req
= hdr
->req
;
550 /* Set up the RPC argument and reply structs
551 * NB: take care not to mess about with hdr->commit et al. */
553 hdr
->args
.fh
= NFS_FH(hdr
->inode
);
554 hdr
->args
.offset
= req_offset(req
) + offset
;
555 /* pnfs_set_layoutcommit needs this */
556 hdr
->mds_offset
= hdr
->args
.offset
;
557 hdr
->args
.pgbase
= req
->wb_pgbase
+ offset
;
558 hdr
->args
.pages
= hdr
->page_array
.pagevec
;
559 hdr
->args
.count
= count
;
560 hdr
->args
.context
= get_nfs_open_context(req
->wb_context
);
561 hdr
->args
.lock_context
= req
->wb_lock_context
;
562 hdr
->args
.stable
= NFS_UNSTABLE
;
563 switch (how
& (FLUSH_STABLE
| FLUSH_COND_STABLE
)) {
566 case FLUSH_COND_STABLE
:
567 if (nfs_reqs_to_commit(cinfo
))
570 hdr
->args
.stable
= NFS_FILE_SYNC
;
573 hdr
->res
.fattr
= &hdr
->fattr
;
574 hdr
->res
.count
= count
;
576 hdr
->res
.verf
= &hdr
->verf
;
577 nfs_fattr_init(&hdr
->fattr
);
581 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
582 * @task: The current task
583 * @calldata: pageio header to prepare
585 static void nfs_pgio_prepare(struct rpc_task
*task
, void *calldata
)
587 struct nfs_pgio_header
*hdr
= calldata
;
589 err
= NFS_PROTO(hdr
->inode
)->pgio_rpc_prepare(task
, hdr
);
594 int nfs_initiate_pgio(struct rpc_clnt
*clnt
, struct nfs_pgio_header
*hdr
,
595 const struct rpc_call_ops
*call_ops
, int how
, int flags
)
597 struct rpc_task
*task
;
598 struct rpc_message msg
= {
599 .rpc_argp
= &hdr
->args
,
600 .rpc_resp
= &hdr
->res
,
601 .rpc_cred
= hdr
->cred
,
603 struct rpc_task_setup task_setup_data
= {
607 .callback_ops
= call_ops
,
608 .callback_data
= hdr
,
609 .workqueue
= nfsiod_workqueue
,
610 .flags
= RPC_TASK_ASYNC
| flags
,
614 hdr
->rw_ops
->rw_initiate(hdr
, &msg
, &task_setup_data
, how
);
616 dprintk("NFS: %5u initiated pgio call "
617 "(req %s/%llu, %u bytes @ offset %llu)\n",
619 hdr
->inode
->i_sb
->s_id
,
620 (unsigned long long)NFS_FILEID(hdr
->inode
),
622 (unsigned long long)hdr
->args
.offset
);
624 task
= rpc_run_task(&task_setup_data
);
629 if (how
& FLUSH_SYNC
) {
630 ret
= rpc_wait_for_completion_task(task
);
632 ret
= task
->tk_status
;
638 EXPORT_SYMBOL_GPL(nfs_initiate_pgio
);
641 * nfs_pgio_error - Clean up from a pageio error
642 * @desc: IO descriptor
643 * @hdr: pageio header
645 static int nfs_pgio_error(struct nfs_pageio_descriptor
*desc
,
646 struct nfs_pgio_header
*hdr
)
648 set_bit(NFS_IOHDR_REDO
, &hdr
->flags
);
649 nfs_pgio_data_destroy(hdr
);
650 hdr
->completion_ops
->completion(hdr
);
651 desc
->pg_completion_ops
->error_cleanup(&desc
->pg_list
);
656 * nfs_pgio_release - Release pageio data
657 * @calldata: The pageio header to release
659 static void nfs_pgio_release(void *calldata
)
661 struct nfs_pgio_header
*hdr
= calldata
;
662 if (hdr
->rw_ops
->rw_release
)
663 hdr
->rw_ops
->rw_release(hdr
);
664 nfs_pgio_data_destroy(hdr
);
665 hdr
->completion_ops
->completion(hdr
);
669 * nfs_pageio_init - initialise a page io descriptor
670 * @desc: pointer to descriptor
671 * @inode: pointer to inode
672 * @doio: pointer to io function
673 * @bsize: io block size
674 * @io_flags: extra parameters for the io function
676 void nfs_pageio_init(struct nfs_pageio_descriptor
*desc
,
678 const struct nfs_pageio_ops
*pg_ops
,
679 const struct nfs_pgio_completion_ops
*compl_ops
,
680 const struct nfs_rw_ops
*rw_ops
,
684 INIT_LIST_HEAD(&desc
->pg_list
);
685 desc
->pg_bytes_written
= 0;
687 desc
->pg_bsize
= bsize
;
690 desc
->pg_recoalesce
= 0;
691 desc
->pg_inode
= inode
;
692 desc
->pg_ops
= pg_ops
;
693 desc
->pg_completion_ops
= compl_ops
;
694 desc
->pg_rw_ops
= rw_ops
;
695 desc
->pg_ioflags
= io_flags
;
697 desc
->pg_lseg
= NULL
;
698 desc
->pg_dreq
= NULL
;
699 desc
->pg_layout_private
= NULL
;
701 EXPORT_SYMBOL_GPL(nfs_pageio_init
);
704 * nfs_pgio_result - Basic pageio error handling
705 * @task: The task that ran
706 * @calldata: Pageio header to check
708 static void nfs_pgio_result(struct rpc_task
*task
, void *calldata
)
710 struct nfs_pgio_header
*hdr
= calldata
;
711 struct inode
*inode
= hdr
->inode
;
713 dprintk("NFS: %s: %5u, (status %d)\n", __func__
,
714 task
->tk_pid
, task
->tk_status
);
716 if (hdr
->rw_ops
->rw_done(task
, hdr
, inode
) != 0)
718 if (task
->tk_status
< 0)
719 nfs_set_pgio_error(hdr
, task
->tk_status
, hdr
->args
.offset
);
721 hdr
->rw_ops
->rw_result(task
, hdr
);
725 * Create an RPC task for the given read or write request and kick it.
726 * The page must have been locked by the caller.
728 * It may happen that the page we're passed is not marked dirty.
729 * This is the case if nfs_updatepage detects a conflicting request
730 * that has been written but not committed.
732 int nfs_generic_pgio(struct nfs_pageio_descriptor
*desc
,
733 struct nfs_pgio_header
*hdr
)
735 struct nfs_page
*req
;
738 struct list_head
*head
= &desc
->pg_list
;
739 struct nfs_commit_info cinfo
;
740 unsigned int pagecount
, pageused
;
742 pagecount
= nfs_page_array_len(desc
->pg_base
, desc
->pg_count
);
743 if (!nfs_pgarray_set(&hdr
->page_array
, pagecount
))
744 return nfs_pgio_error(desc
, hdr
);
746 nfs_init_cinfo(&cinfo
, desc
->pg_inode
, desc
->pg_dreq
);
747 pages
= hdr
->page_array
.pagevec
;
750 while (!list_empty(head
)) {
751 req
= nfs_list_entry(head
->next
);
752 nfs_list_remove_request(req
);
753 nfs_list_add_request(req
, &hdr
->pages
);
755 if (!last_page
|| last_page
!= req
->wb_page
) {
757 if (pageused
> pagecount
)
759 *pages
++ = last_page
= req
->wb_page
;
762 if (WARN_ON_ONCE(pageused
!= pagecount
))
763 return nfs_pgio_error(desc
, hdr
);
765 if ((desc
->pg_ioflags
& FLUSH_COND_STABLE
) &&
766 (desc
->pg_moreio
|| nfs_reqs_to_commit(&cinfo
)))
767 desc
->pg_ioflags
&= ~FLUSH_COND_STABLE
;
769 /* Set up the argument struct */
770 nfs_pgio_rpcsetup(hdr
, desc
->pg_count
, 0, desc
->pg_ioflags
, &cinfo
);
771 desc
->pg_rpc_callops
= &nfs_pgio_common_ops
;
774 EXPORT_SYMBOL_GPL(nfs_generic_pgio
);
776 static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor
*desc
)
778 struct nfs_pgio_header
*hdr
;
781 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
783 desc
->pg_completion_ops
->error_cleanup(&desc
->pg_list
);
786 nfs_pgheader_init(desc
, hdr
, nfs_pgio_header_free
);
787 ret
= nfs_generic_pgio(desc
, hdr
);
789 ret
= nfs_initiate_pgio(NFS_CLIENT(hdr
->inode
),
790 hdr
, desc
->pg_rpc_callops
,
791 desc
->pg_ioflags
, 0);
795 static bool nfs_match_open_context(const struct nfs_open_context
*ctx1
,
796 const struct nfs_open_context
*ctx2
)
798 return ctx1
->cred
== ctx2
->cred
&& ctx1
->state
== ctx2
->state
;
801 static bool nfs_match_lock_context(const struct nfs_lock_context
*l1
,
802 const struct nfs_lock_context
*l2
)
804 return l1
->lockowner
.l_owner
== l2
->lockowner
.l_owner
805 && l1
->lockowner
.l_pid
== l2
->lockowner
.l_pid
;
809 * nfs_can_coalesce_requests - test two requests for compatibility
810 * @prev: pointer to nfs_page
811 * @req: pointer to nfs_page
813 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
814 * page data area they describe is contiguous, and that their RPC
815 * credentials, NFSv4 open state, and lockowners are the same.
817 * Return 'true' if this is the case, else return 'false'.
819 static bool nfs_can_coalesce_requests(struct nfs_page
*prev
,
820 struct nfs_page
*req
,
821 struct nfs_pageio_descriptor
*pgio
)
826 if (!nfs_match_open_context(req
->wb_context
, prev
->wb_context
))
828 if (req
->wb_context
->dentry
->d_inode
->i_flock
!= NULL
&&
829 !nfs_match_lock_context(req
->wb_lock_context
,
830 prev
->wb_lock_context
))
832 if (req_offset(req
) != req_offset(prev
) + prev
->wb_bytes
)
834 if (req
->wb_page
== prev
->wb_page
) {
835 if (req
->wb_pgbase
!= prev
->wb_pgbase
+ prev
->wb_bytes
)
838 if (req
->wb_pgbase
!= 0 ||
839 prev
->wb_pgbase
+ prev
->wb_bytes
!= PAGE_CACHE_SIZE
)
843 size
= pgio
->pg_ops
->pg_test(pgio
, prev
, req
);
844 WARN_ON_ONCE(size
> req
->wb_bytes
);
845 if (size
&& size
< req
->wb_bytes
)
846 req
->wb_bytes
= size
;
851 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
852 * @desc: destination io descriptor
855 * Returns true if the request 'req' was successfully coalesced into the
856 * existing list of pages 'desc'.
858 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor
*desc
,
859 struct nfs_page
*req
)
861 struct nfs_page
*prev
= NULL
;
862 if (desc
->pg_count
!= 0) {
863 prev
= nfs_list_entry(desc
->pg_list
.prev
);
865 if (desc
->pg_ops
->pg_init
)
866 desc
->pg_ops
->pg_init(desc
, req
);
867 desc
->pg_base
= req
->wb_pgbase
;
869 if (!nfs_can_coalesce_requests(prev
, req
, desc
))
871 nfs_list_remove_request(req
);
872 nfs_list_add_request(req
, &desc
->pg_list
);
873 desc
->pg_count
+= req
->wb_bytes
;
878 * Helper for nfs_pageio_add_request and nfs_pageio_complete
880 static void nfs_pageio_doio(struct nfs_pageio_descriptor
*desc
)
882 if (!list_empty(&desc
->pg_list
)) {
883 int error
= desc
->pg_ops
->pg_doio(desc
);
885 desc
->pg_error
= error
;
887 desc
->pg_bytes_written
+= desc
->pg_count
;
889 if (list_empty(&desc
->pg_list
)) {
896 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
897 * @desc: destination io descriptor
900 * This may split a request into subrequests which are all part of the
903 * Returns true if the request 'req' was successfully coalesced into the
904 * existing list of pages 'desc'.
906 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
907 struct nfs_page
*req
)
909 struct nfs_page
*subreq
;
910 unsigned int bytes_left
= 0;
911 unsigned int offset
, pgbase
;
913 nfs_page_group_lock(req
, false);
916 bytes_left
= subreq
->wb_bytes
;
917 offset
= subreq
->wb_offset
;
918 pgbase
= subreq
->wb_pgbase
;
921 if (!nfs_pageio_do_add_request(desc
, subreq
)) {
922 /* make sure pg_test call(s) did nothing */
923 WARN_ON_ONCE(subreq
->wb_bytes
!= bytes_left
);
924 WARN_ON_ONCE(subreq
->wb_offset
!= offset
);
925 WARN_ON_ONCE(subreq
->wb_pgbase
!= pgbase
);
927 nfs_page_group_unlock(req
);
929 nfs_pageio_doio(desc
);
930 if (desc
->pg_error
< 0)
932 if (desc
->pg_recoalesce
)
934 /* retry add_request for this subreq */
935 nfs_page_group_lock(req
, false);
939 /* check for buggy pg_test call(s) */
940 WARN_ON_ONCE(subreq
->wb_bytes
+ subreq
->wb_pgbase
> PAGE_SIZE
);
941 WARN_ON_ONCE(subreq
->wb_bytes
> bytes_left
);
942 WARN_ON_ONCE(subreq
->wb_bytes
== 0);
944 bytes_left
-= subreq
->wb_bytes
;
945 offset
+= subreq
->wb_bytes
;
946 pgbase
+= subreq
->wb_bytes
;
949 subreq
= nfs_create_request(req
->wb_context
,
951 subreq
, pgbase
, bytes_left
);
954 nfs_lock_request(subreq
);
955 subreq
->wb_offset
= offset
;
956 subreq
->wb_index
= req
->wb_index
;
958 } while (bytes_left
> 0);
960 nfs_page_group_unlock(req
);
963 desc
->pg_error
= PTR_ERR(subreq
);
964 nfs_page_group_unlock(req
);
968 static int nfs_do_recoalesce(struct nfs_pageio_descriptor
*desc
)
973 list_splice_init(&desc
->pg_list
, &head
);
974 desc
->pg_bytes_written
-= desc
->pg_count
;
977 desc
->pg_recoalesce
= 0;
980 while (!list_empty(&head
)) {
981 struct nfs_page
*req
;
983 req
= list_first_entry(&head
, struct nfs_page
, wb_list
);
984 nfs_list_remove_request(req
);
985 if (__nfs_pageio_add_request(desc
, req
))
987 if (desc
->pg_error
< 0)
991 } while (desc
->pg_recoalesce
);
995 int nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
996 struct nfs_page
*req
)
1001 ret
= __nfs_pageio_add_request(desc
, req
);
1004 if (desc
->pg_error
< 0)
1006 ret
= nfs_do_recoalesce(desc
);
1012 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1013 * @hdr - the pgio header to move request from
1014 * @desc - the pageio descriptor to add requests to
1016 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1019 * Returns 0 on success and < 0 on error.
1021 int nfs_pageio_resend(struct nfs_pageio_descriptor
*desc
,
1022 struct nfs_pgio_header
*hdr
)
1026 desc
->pg_dreq
= hdr
->dreq
;
1027 while (!list_empty(&hdr
->pages
)) {
1028 struct nfs_page
*req
= nfs_list_entry(hdr
->pages
.next
);
1030 nfs_list_remove_request(req
);
1031 if (!nfs_pageio_add_request(desc
, req
))
1032 nfs_list_add_request(req
, &failed
);
1034 nfs_pageio_complete(desc
);
1035 if (!list_empty(&failed
)) {
1036 list_move(&failed
, &hdr
->pages
);
1041 EXPORT_SYMBOL_GPL(nfs_pageio_resend
);
1044 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
1045 * @desc: pointer to io descriptor
1047 void nfs_pageio_complete(struct nfs_pageio_descriptor
*desc
)
1050 nfs_pageio_doio(desc
);
1051 if (!desc
->pg_recoalesce
)
1053 if (!nfs_do_recoalesce(desc
))
1059 * nfs_pageio_cond_complete - Conditional I/O completion
1060 * @desc: pointer to io descriptor
1061 * @index: page index
1063 * It is important to ensure that processes don't try to take locks
1064 * on non-contiguous ranges of pages as that might deadlock. This
1065 * function should be called before attempting to wait on a locked
1066 * nfs_page. It will complete the I/O if the page index 'index'
1067 * is not contiguous with the existing list of pages in 'desc'.
1069 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor
*desc
, pgoff_t index
)
1071 if (!list_empty(&desc
->pg_list
)) {
1072 struct nfs_page
*prev
= nfs_list_entry(desc
->pg_list
.prev
);
1073 if (index
!= prev
->wb_index
+ 1)
1074 nfs_pageio_complete(desc
);
1078 int __init
nfs_init_nfspagecache(void)
1080 nfs_page_cachep
= kmem_cache_create("nfs_page",
1081 sizeof(struct nfs_page
),
1082 0, SLAB_HWCACHE_ALIGN
,
1084 if (nfs_page_cachep
== NULL
)
1090 void nfs_destroy_nfspagecache(void)
1092 kmem_cache_destroy(nfs_page_cachep
);
1095 static const struct rpc_call_ops nfs_pgio_common_ops
= {
1096 .rpc_call_prepare
= nfs_pgio_prepare
,
1097 .rpc_call_done
= nfs_pgio_result
,
1098 .rpc_release
= nfs_pgio_release
,
1101 const struct nfs_pageio_ops nfs_pgio_rw_ops
= {
1102 .pg_test
= nfs_generic_pg_test
,
1103 .pg_doio
= nfs_generic_pg_pgios
,