2 * linux/fs/nfs/pagelist.c
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs.h>
17 #include <linux/nfs3.h>
18 #include <linux/nfs4.h>
19 #include <linux/nfs_page.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/export.h>
27 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
29 static struct kmem_cache
*nfs_page_cachep
;
30 static const struct rpc_call_ops nfs_pgio_common_ops
;
32 static bool nfs_pgarray_set(struct nfs_page_array
*p
, unsigned int pagecount
)
34 p
->npages
= pagecount
;
35 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
36 p
->pagevec
= p
->page_array
;
38 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_KERNEL
);
42 return p
->pagevec
!= NULL
;
45 struct nfs_pgio_mirror
*
46 nfs_pgio_current_mirror(struct nfs_pageio_descriptor
*desc
)
48 return nfs_pgio_has_mirroring(desc
) ?
49 &desc
->pg_mirrors
[desc
->pg_mirror_idx
] :
52 EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror
);
54 void nfs_pgheader_init(struct nfs_pageio_descriptor
*desc
,
55 struct nfs_pgio_header
*hdr
,
56 void (*release
)(struct nfs_pgio_header
*hdr
))
58 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
61 hdr
->req
= nfs_list_entry(mirror
->pg_list
.next
);
62 hdr
->inode
= desc
->pg_inode
;
63 hdr
->cred
= hdr
->req
->wb_context
->cred
;
64 hdr
->io_start
= req_offset(hdr
->req
);
65 hdr
->good_bytes
= mirror
->pg_count
;
66 hdr
->dreq
= desc
->pg_dreq
;
67 hdr
->layout_private
= desc
->pg_layout_private
;
68 hdr
->release
= release
;
69 hdr
->completion_ops
= desc
->pg_completion_ops
;
70 if (hdr
->completion_ops
->init_hdr
)
71 hdr
->completion_ops
->init_hdr(hdr
);
73 hdr
->pgio_mirror_idx
= desc
->pg_mirror_idx
;
75 EXPORT_SYMBOL_GPL(nfs_pgheader_init
);
77 void nfs_set_pgio_error(struct nfs_pgio_header
*hdr
, int error
, loff_t pos
)
79 spin_lock(&hdr
->lock
);
80 if (!test_and_set_bit(NFS_IOHDR_ERROR
, &hdr
->flags
)
81 || pos
< hdr
->io_start
+ hdr
->good_bytes
) {
82 clear_bit(NFS_IOHDR_EOF
, &hdr
->flags
);
83 hdr
->good_bytes
= pos
- hdr
->io_start
;
86 spin_unlock(&hdr
->lock
);
89 static inline struct nfs_page
*
92 struct nfs_page
*p
= kmem_cache_zalloc(nfs_page_cachep
, GFP_NOIO
);
94 INIT_LIST_HEAD(&p
->wb_list
);
99 nfs_page_free(struct nfs_page
*p
)
101 kmem_cache_free(nfs_page_cachep
, p
);
105 * nfs_iocounter_wait - wait for i/o to complete
106 * @l_ctx: nfs_lock_context with io_counter to use
108 * returns -ERESTARTSYS if interrupted by a fatal signal.
109 * Otherwise returns 0 once the io_count hits 0.
112 nfs_iocounter_wait(struct nfs_lock_context
*l_ctx
)
114 return wait_on_atomic_t(&l_ctx
->io_count
, nfs_wait_atomic_killable
,
119 * nfs_page_group_lock - lock the head of the page group
120 * @req - request in group that is to be locked
121 * @nonblock - if true don't block waiting for lock
123 * this lock must be held if modifying the page group list
125 * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
126 * result from wait_on_bit_lock
128 * NOTE: calling with nonblock=false should always have set the
129 * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
130 * with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
133 nfs_page_group_lock(struct nfs_page
*req
, bool nonblock
)
135 struct nfs_page
*head
= req
->wb_head
;
137 WARN_ON_ONCE(head
!= head
->wb_head
);
139 if (!test_and_set_bit(PG_HEADLOCK
, &head
->wb_flags
))
143 return wait_on_bit_lock(&head
->wb_flags
, PG_HEADLOCK
,
144 TASK_UNINTERRUPTIBLE
);
150 * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
151 * @req - a request in the group
153 * This is a blocking call to wait for the group lock to be cleared.
156 nfs_page_group_lock_wait(struct nfs_page
*req
)
158 struct nfs_page
*head
= req
->wb_head
;
160 WARN_ON_ONCE(head
!= head
->wb_head
);
162 wait_on_bit(&head
->wb_flags
, PG_HEADLOCK
,
163 TASK_UNINTERRUPTIBLE
);
167 * nfs_page_group_unlock - unlock the head of the page group
168 * @req - request in group that is to be unlocked
171 nfs_page_group_unlock(struct nfs_page
*req
)
173 struct nfs_page
*head
= req
->wb_head
;
175 WARN_ON_ONCE(head
!= head
->wb_head
);
177 smp_mb__before_atomic();
178 clear_bit(PG_HEADLOCK
, &head
->wb_flags
);
179 smp_mb__after_atomic();
180 wake_up_bit(&head
->wb_flags
, PG_HEADLOCK
);
184 * nfs_page_group_sync_on_bit_locked
186 * must be called with page group lock held
189 nfs_page_group_sync_on_bit_locked(struct nfs_page
*req
, unsigned int bit
)
191 struct nfs_page
*head
= req
->wb_head
;
192 struct nfs_page
*tmp
;
194 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &head
->wb_flags
));
195 WARN_ON_ONCE(test_and_set_bit(bit
, &req
->wb_flags
));
197 tmp
= req
->wb_this_page
;
199 if (!test_bit(bit
, &tmp
->wb_flags
))
201 tmp
= tmp
->wb_this_page
;
204 /* true! reset all bits */
207 clear_bit(bit
, &tmp
->wb_flags
);
208 tmp
= tmp
->wb_this_page
;
209 } while (tmp
!= req
);
215 * nfs_page_group_sync_on_bit - set bit on current request, but only
216 * return true if the bit is set for all requests in page group
217 * @req - request in page group
218 * @bit - PG_* bit that is used to sync page group
220 bool nfs_page_group_sync_on_bit(struct nfs_page
*req
, unsigned int bit
)
224 nfs_page_group_lock(req
, false);
225 ret
= nfs_page_group_sync_on_bit_locked(req
, bit
);
226 nfs_page_group_unlock(req
);
232 * nfs_page_group_init - Initialize the page group linkage for @req
233 * @req - a new nfs request
234 * @prev - the previous request in page group, or NULL if @req is the first
235 * or only request in the group (the head).
238 nfs_page_group_init(struct nfs_page
*req
, struct nfs_page
*prev
)
241 WARN_ON_ONCE(prev
== req
);
246 req
->wb_this_page
= req
;
249 WARN_ON_ONCE(prev
->wb_this_page
!= prev
->wb_head
);
250 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &prev
->wb_head
->wb_flags
));
251 req
->wb_head
= prev
->wb_head
;
252 req
->wb_this_page
= prev
->wb_this_page
;
253 prev
->wb_this_page
= req
;
255 /* All subrequests take a ref on the head request until
256 * nfs_page_group_destroy is called */
257 kref_get(&req
->wb_head
->wb_kref
);
259 /* grab extra ref and bump the request count if head request
260 * has extra ref from the write/commit path to handle handoff
261 * between write and commit lists. */
262 if (test_bit(PG_INODE_REF
, &prev
->wb_head
->wb_flags
)) {
263 inode
= page_file_mapping(req
->wb_page
)->host
;
264 set_bit(PG_INODE_REF
, &req
->wb_flags
);
265 kref_get(&req
->wb_kref
);
266 spin_lock(&inode
->i_lock
);
267 NFS_I(inode
)->nrequests
++;
268 spin_unlock(&inode
->i_lock
);
274 * nfs_page_group_destroy - sync the destruction of page groups
275 * @req - request that no longer needs the page group
277 * releases the page group reference from each member once all
278 * members have called this function.
281 nfs_page_group_destroy(struct kref
*kref
)
283 struct nfs_page
*req
= container_of(kref
, struct nfs_page
, wb_kref
);
284 struct nfs_page
*tmp
, *next
;
286 /* subrequests must release the ref on the head request */
287 if (req
->wb_head
!= req
)
288 nfs_release_request(req
->wb_head
);
290 if (!nfs_page_group_sync_on_bit(req
, PG_TEARDOWN
))
295 next
= tmp
->wb_this_page
;
296 /* unlink and free */
297 tmp
->wb_this_page
= tmp
;
299 nfs_free_request(tmp
);
301 } while (tmp
!= req
);
305 * nfs_create_request - Create an NFS read/write request.
306 * @ctx: open context to use
307 * @page: page to write
308 * @last: last nfs request created for this page group or NULL if head
309 * @offset: starting offset within the page for the write
310 * @count: number of bytes to read/write
312 * The page must be locked by the caller. This makes sure we never
313 * create two different requests for the same page.
314 * User should ensure it is safe to sleep in this function.
317 nfs_create_request(struct nfs_open_context
*ctx
, struct page
*page
,
318 struct nfs_page
*last
, unsigned int offset
,
321 struct nfs_page
*req
;
322 struct nfs_lock_context
*l_ctx
;
324 if (test_bit(NFS_CONTEXT_BAD
, &ctx
->flags
))
325 return ERR_PTR(-EBADF
);
326 /* try to allocate the request struct */
327 req
= nfs_page_alloc();
329 return ERR_PTR(-ENOMEM
);
331 /* get lock context early so we can deal with alloc failures */
332 l_ctx
= nfs_get_lock_context(ctx
);
335 return ERR_CAST(l_ctx
);
337 req
->wb_lock_context
= l_ctx
;
338 atomic_inc(&l_ctx
->io_count
);
340 /* Initialize the request struct. Initially, we assume a
341 * long write-back delay. This will be adjusted in
342 * update_nfs_request below if the region is not locked. */
345 req
->wb_index
= page_file_index(page
);
348 req
->wb_offset
= offset
;
349 req
->wb_pgbase
= offset
;
350 req
->wb_bytes
= count
;
351 req
->wb_context
= get_nfs_open_context(ctx
);
352 kref_init(&req
->wb_kref
);
353 nfs_page_group_init(req
, last
);
358 * nfs_unlock_request - Unlock request and wake up sleepers.
361 void nfs_unlock_request(struct nfs_page
*req
)
363 if (!NFS_WBACK_BUSY(req
)) {
364 printk(KERN_ERR
"NFS: Invalid unlock attempted\n");
367 smp_mb__before_atomic();
368 clear_bit(PG_BUSY
, &req
->wb_flags
);
369 smp_mb__after_atomic();
370 wake_up_bit(&req
->wb_flags
, PG_BUSY
);
374 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
377 void nfs_unlock_and_release_request(struct nfs_page
*req
)
379 nfs_unlock_request(req
);
380 nfs_release_request(req
);
384 * nfs_clear_request - Free up all resources allocated to the request
387 * Release page and open context resources associated with a read/write
388 * request after it has completed.
390 static void nfs_clear_request(struct nfs_page
*req
)
392 struct page
*page
= req
->wb_page
;
393 struct nfs_open_context
*ctx
= req
->wb_context
;
394 struct nfs_lock_context
*l_ctx
= req
->wb_lock_context
;
401 if (atomic_dec_and_test(&l_ctx
->io_count
))
402 wake_up_atomic_t(&l_ctx
->io_count
);
403 nfs_put_lock_context(l_ctx
);
404 req
->wb_lock_context
= NULL
;
407 put_nfs_open_context(ctx
);
408 req
->wb_context
= NULL
;
413 * nfs_release_request - Release the count on an NFS read/write request
414 * @req: request to release
416 * Note: Should never be called with the spinlock held!
418 void nfs_free_request(struct nfs_page
*req
)
420 WARN_ON_ONCE(req
->wb_this_page
!= req
);
422 /* extra debug: make sure no sync bits are still set */
423 WARN_ON_ONCE(test_bit(PG_TEARDOWN
, &req
->wb_flags
));
424 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE
, &req
->wb_flags
));
425 WARN_ON_ONCE(test_bit(PG_UPTODATE
, &req
->wb_flags
));
426 WARN_ON_ONCE(test_bit(PG_WB_END
, &req
->wb_flags
));
427 WARN_ON_ONCE(test_bit(PG_REMOVE
, &req
->wb_flags
));
429 /* Release struct file and open context */
430 nfs_clear_request(req
);
434 void nfs_release_request(struct nfs_page
*req
)
436 kref_put(&req
->wb_kref
, nfs_page_group_destroy
);
440 * nfs_wait_on_request - Wait for a request to complete.
441 * @req: request to wait upon.
443 * Interruptible by fatal signals only.
444 * The user is responsible for holding a count on the request.
447 nfs_wait_on_request(struct nfs_page
*req
)
449 return wait_on_bit_io(&req
->wb_flags
, PG_BUSY
,
450 TASK_UNINTERRUPTIBLE
);
454 * nfs_generic_pg_test - determine if requests can be coalesced
455 * @desc: pointer to descriptor
456 * @prev: previous request in desc, or NULL
459 * Returns zero if @req can be coalesced into @desc, otherwise it returns
460 * the size of the request.
462 size_t nfs_generic_pg_test(struct nfs_pageio_descriptor
*desc
,
463 struct nfs_page
*prev
, struct nfs_page
*req
)
465 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
468 if (mirror
->pg_count
> mirror
->pg_bsize
) {
469 /* should never happen */
475 * Limit the request size so that we can still allocate a page array
476 * for it without upsetting the slab allocator.
478 if (((mirror
->pg_count
+ req
->wb_bytes
) >> PAGE_SHIFT
) *
479 sizeof(struct page
*) > PAGE_SIZE
)
482 return min(mirror
->pg_bsize
- mirror
->pg_count
, (size_t)req
->wb_bytes
);
484 EXPORT_SYMBOL_GPL(nfs_generic_pg_test
);
486 struct nfs_pgio_header
*nfs_pgio_header_alloc(const struct nfs_rw_ops
*ops
)
488 struct nfs_pgio_header
*hdr
= ops
->rw_alloc_header();
491 INIT_LIST_HEAD(&hdr
->pages
);
492 spin_lock_init(&hdr
->lock
);
497 EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc
);
500 * nfs_pgio_header_free - Free a read or write header
501 * @hdr: The header to free
503 void nfs_pgio_header_free(struct nfs_pgio_header
*hdr
)
505 hdr
->rw_ops
->rw_free_header(hdr
);
507 EXPORT_SYMBOL_GPL(nfs_pgio_header_free
);
510 * nfs_pgio_data_destroy - make @hdr suitable for reuse
512 * Frees memory and releases refs from nfs_generic_pgio, so that it may
515 * @hdr: A header that has had nfs_generic_pgio called
517 void nfs_pgio_data_destroy(struct nfs_pgio_header
*hdr
)
519 if (hdr
->args
.context
)
520 put_nfs_open_context(hdr
->args
.context
);
521 if (hdr
->page_array
.pagevec
!= hdr
->page_array
.page_array
)
522 kfree(hdr
->page_array
.pagevec
);
524 EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy
);
527 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
528 * @hdr: The pageio hdr
529 * @count: Number of bytes to read
530 * @offset: Initial offset
531 * @how: How to commit data (writes only)
532 * @cinfo: Commit information for the call (writes only)
534 static void nfs_pgio_rpcsetup(struct nfs_pgio_header
*hdr
,
535 unsigned int count
, unsigned int offset
,
536 int how
, struct nfs_commit_info
*cinfo
)
538 struct nfs_page
*req
= hdr
->req
;
540 /* Set up the RPC argument and reply structs
541 * NB: take care not to mess about with hdr->commit et al. */
543 hdr
->args
.fh
= NFS_FH(hdr
->inode
);
544 hdr
->args
.offset
= req_offset(req
) + offset
;
545 /* pnfs_set_layoutcommit needs this */
546 hdr
->mds_offset
= hdr
->args
.offset
;
547 hdr
->args
.pgbase
= req
->wb_pgbase
+ offset
;
548 hdr
->args
.pages
= hdr
->page_array
.pagevec
;
549 hdr
->args
.count
= count
;
550 hdr
->args
.context
= get_nfs_open_context(req
->wb_context
);
551 hdr
->args
.lock_context
= req
->wb_lock_context
;
552 hdr
->args
.stable
= NFS_UNSTABLE
;
553 switch (how
& (FLUSH_STABLE
| FLUSH_COND_STABLE
)) {
556 case FLUSH_COND_STABLE
:
557 if (nfs_reqs_to_commit(cinfo
))
560 hdr
->args
.stable
= NFS_FILE_SYNC
;
563 hdr
->res
.fattr
= &hdr
->fattr
;
564 hdr
->res
.count
= count
;
566 hdr
->res
.verf
= &hdr
->verf
;
567 nfs_fattr_init(&hdr
->fattr
);
571 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
572 * @task: The current task
573 * @calldata: pageio header to prepare
575 static void nfs_pgio_prepare(struct rpc_task
*task
, void *calldata
)
577 struct nfs_pgio_header
*hdr
= calldata
;
579 err
= NFS_PROTO(hdr
->inode
)->pgio_rpc_prepare(task
, hdr
);
584 int nfs_initiate_pgio(struct rpc_clnt
*clnt
, struct nfs_pgio_header
*hdr
,
585 struct rpc_cred
*cred
, const struct nfs_rpc_ops
*rpc_ops
,
586 const struct rpc_call_ops
*call_ops
, int how
, int flags
)
588 struct rpc_task
*task
;
589 struct rpc_message msg
= {
590 .rpc_argp
= &hdr
->args
,
591 .rpc_resp
= &hdr
->res
,
594 struct rpc_task_setup task_setup_data
= {
598 .callback_ops
= call_ops
,
599 .callback_data
= hdr
,
600 .workqueue
= nfsiod_workqueue
,
601 .flags
= RPC_TASK_ASYNC
| flags
,
605 hdr
->rw_ops
->rw_initiate(hdr
, &msg
, rpc_ops
, &task_setup_data
, how
);
607 dprintk("NFS: initiated pgio call "
608 "(req %s/%llu, %u bytes @ offset %llu)\n",
609 hdr
->inode
->i_sb
->s_id
,
610 (unsigned long long)NFS_FILEID(hdr
->inode
),
612 (unsigned long long)hdr
->args
.offset
);
614 task
= rpc_run_task(&task_setup_data
);
619 if (how
& FLUSH_SYNC
) {
620 ret
= rpc_wait_for_completion_task(task
);
622 ret
= task
->tk_status
;
628 EXPORT_SYMBOL_GPL(nfs_initiate_pgio
);
631 * nfs_pgio_error - Clean up from a pageio error
632 * @desc: IO descriptor
633 * @hdr: pageio header
635 static void nfs_pgio_error(struct nfs_pgio_header
*hdr
)
637 set_bit(NFS_IOHDR_REDO
, &hdr
->flags
);
638 nfs_pgio_data_destroy(hdr
);
639 hdr
->completion_ops
->completion(hdr
);
643 * nfs_pgio_release - Release pageio data
644 * @calldata: The pageio header to release
646 static void nfs_pgio_release(void *calldata
)
648 struct nfs_pgio_header
*hdr
= calldata
;
649 nfs_pgio_data_destroy(hdr
);
650 hdr
->completion_ops
->completion(hdr
);
653 static void nfs_pageio_mirror_init(struct nfs_pgio_mirror
*mirror
,
656 INIT_LIST_HEAD(&mirror
->pg_list
);
657 mirror
->pg_bytes_written
= 0;
658 mirror
->pg_count
= 0;
659 mirror
->pg_bsize
= bsize
;
661 mirror
->pg_recoalesce
= 0;
665 * nfs_pageio_init - initialise a page io descriptor
666 * @desc: pointer to descriptor
667 * @inode: pointer to inode
668 * @pg_ops: pointer to pageio operations
669 * @compl_ops: pointer to pageio completion operations
670 * @rw_ops: pointer to nfs read/write operations
671 * @bsize: io block size
672 * @io_flags: extra parameters for the io function
674 void nfs_pageio_init(struct nfs_pageio_descriptor
*desc
,
676 const struct nfs_pageio_ops
*pg_ops
,
677 const struct nfs_pgio_completion_ops
*compl_ops
,
678 const struct nfs_rw_ops
*rw_ops
,
682 struct nfs_pgio_mirror
*new;
686 desc
->pg_inode
= inode
;
687 desc
->pg_ops
= pg_ops
;
688 desc
->pg_completion_ops
= compl_ops
;
689 desc
->pg_rw_ops
= rw_ops
;
690 desc
->pg_ioflags
= io_flags
;
692 desc
->pg_lseg
= NULL
;
693 desc
->pg_dreq
= NULL
;
694 desc
->pg_layout_private
= NULL
;
695 desc
->pg_bsize
= bsize
;
697 desc
->pg_mirror_count
= 1;
698 desc
->pg_mirror_idx
= 0;
700 if (pg_ops
->pg_get_mirror_count
) {
701 /* until we have a request, we don't have an lseg and no
702 * idea how many mirrors there will be */
703 new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX
,
704 sizeof(struct nfs_pgio_mirror
), GFP_KERNEL
);
705 desc
->pg_mirrors_dynamic
= new;
706 desc
->pg_mirrors
= new;
708 for (i
= 0; i
< NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX
; i
++)
709 nfs_pageio_mirror_init(&desc
->pg_mirrors
[i
], bsize
);
711 desc
->pg_mirrors_dynamic
= NULL
;
712 desc
->pg_mirrors
= desc
->pg_mirrors_static
;
713 nfs_pageio_mirror_init(&desc
->pg_mirrors
[0], bsize
);
716 EXPORT_SYMBOL_GPL(nfs_pageio_init
);
719 * nfs_pgio_result - Basic pageio error handling
720 * @task: The task that ran
721 * @calldata: Pageio header to check
723 static void nfs_pgio_result(struct rpc_task
*task
, void *calldata
)
725 struct nfs_pgio_header
*hdr
= calldata
;
726 struct inode
*inode
= hdr
->inode
;
728 dprintk("NFS: %s: %5u, (status %d)\n", __func__
,
729 task
->tk_pid
, task
->tk_status
);
731 if (hdr
->rw_ops
->rw_done(task
, hdr
, inode
) != 0)
733 if (task
->tk_status
< 0)
734 nfs_set_pgio_error(hdr
, task
->tk_status
, hdr
->args
.offset
);
736 hdr
->rw_ops
->rw_result(task
, hdr
);
740 * Create an RPC task for the given read or write request and kick it.
741 * The page must have been locked by the caller.
743 * It may happen that the page we're passed is not marked dirty.
744 * This is the case if nfs_updatepage detects a conflicting request
745 * that has been written but not committed.
747 int nfs_generic_pgio(struct nfs_pageio_descriptor
*desc
,
748 struct nfs_pgio_header
*hdr
)
750 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
752 struct nfs_page
*req
;
755 struct list_head
*head
= &mirror
->pg_list
;
756 struct nfs_commit_info cinfo
;
757 unsigned int pagecount
, pageused
;
759 pagecount
= nfs_page_array_len(mirror
->pg_base
, mirror
->pg_count
);
760 if (!nfs_pgarray_set(&hdr
->page_array
, pagecount
)) {
762 desc
->pg_error
= -ENOMEM
;
763 return desc
->pg_error
;
766 nfs_init_cinfo(&cinfo
, desc
->pg_inode
, desc
->pg_dreq
);
767 pages
= hdr
->page_array
.pagevec
;
770 while (!list_empty(head
)) {
771 req
= nfs_list_entry(head
->next
);
772 nfs_list_remove_request(req
);
773 nfs_list_add_request(req
, &hdr
->pages
);
775 if (!last_page
|| last_page
!= req
->wb_page
) {
777 if (pageused
> pagecount
)
779 *pages
++ = last_page
= req
->wb_page
;
782 if (WARN_ON_ONCE(pageused
!= pagecount
)) {
784 desc
->pg_error
= -EINVAL
;
785 return desc
->pg_error
;
788 if ((desc
->pg_ioflags
& FLUSH_COND_STABLE
) &&
789 (desc
->pg_moreio
|| nfs_reqs_to_commit(&cinfo
)))
790 desc
->pg_ioflags
&= ~FLUSH_COND_STABLE
;
792 /* Set up the argument struct */
793 nfs_pgio_rpcsetup(hdr
, mirror
->pg_count
, 0, desc
->pg_ioflags
, &cinfo
);
794 desc
->pg_rpc_callops
= &nfs_pgio_common_ops
;
797 EXPORT_SYMBOL_GPL(nfs_generic_pgio
);
799 static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor
*desc
)
801 struct nfs_pgio_header
*hdr
;
804 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
806 desc
->pg_error
= -ENOMEM
;
807 return desc
->pg_error
;
809 nfs_pgheader_init(desc
, hdr
, nfs_pgio_header_free
);
810 ret
= nfs_generic_pgio(desc
, hdr
);
812 ret
= nfs_initiate_pgio(NFS_CLIENT(hdr
->inode
),
815 NFS_PROTO(hdr
->inode
),
816 desc
->pg_rpc_callops
,
817 desc
->pg_ioflags
, 0);
822 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
823 * by calling the pg_get_mirror_count op
825 static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor
*pgio
,
826 struct nfs_page
*req
)
828 int mirror_count
= 1;
830 if (!pgio
->pg_ops
->pg_get_mirror_count
)
833 mirror_count
= pgio
->pg_ops
->pg_get_mirror_count(pgio
, req
);
835 if (pgio
->pg_error
< 0)
836 return pgio
->pg_error
;
838 if (!mirror_count
|| mirror_count
> NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX
)
841 if (WARN_ON_ONCE(!pgio
->pg_mirrors_dynamic
))
844 pgio
->pg_mirror_count
= mirror_count
;
850 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
852 void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor
*pgio
)
854 pgio
->pg_mirror_count
= 1;
855 pgio
->pg_mirror_idx
= 0;
858 static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor
*pgio
)
860 pgio
->pg_mirror_count
= 1;
861 pgio
->pg_mirror_idx
= 0;
862 pgio
->pg_mirrors
= pgio
->pg_mirrors_static
;
863 kfree(pgio
->pg_mirrors_dynamic
);
864 pgio
->pg_mirrors_dynamic
= NULL
;
867 static bool nfs_match_lock_context(const struct nfs_lock_context
*l1
,
868 const struct nfs_lock_context
*l2
)
870 return l1
->lockowner
.l_owner
== l2
->lockowner
.l_owner
871 && l1
->lockowner
.l_pid
== l2
->lockowner
.l_pid
;
875 * nfs_can_coalesce_requests - test two requests for compatibility
876 * @prev: pointer to nfs_page
877 * @req: pointer to nfs_page
879 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
880 * page data area they describe is contiguous, and that their RPC
881 * credentials, NFSv4 open state, and lockowners are the same.
883 * Return 'true' if this is the case, else return 'false'.
885 static bool nfs_can_coalesce_requests(struct nfs_page
*prev
,
886 struct nfs_page
*req
,
887 struct nfs_pageio_descriptor
*pgio
)
890 struct file_lock_context
*flctx
;
893 if (!nfs_match_open_context(req
->wb_context
, prev
->wb_context
))
895 flctx
= d_inode(req
->wb_context
->dentry
)->i_flctx
;
897 !(list_empty_careful(&flctx
->flc_posix
) &&
898 list_empty_careful(&flctx
->flc_flock
)) &&
899 !nfs_match_lock_context(req
->wb_lock_context
,
900 prev
->wb_lock_context
))
902 if (req_offset(req
) != req_offset(prev
) + prev
->wb_bytes
)
904 if (req
->wb_page
== prev
->wb_page
) {
905 if (req
->wb_pgbase
!= prev
->wb_pgbase
+ prev
->wb_bytes
)
908 if (req
->wb_pgbase
!= 0 ||
909 prev
->wb_pgbase
+ prev
->wb_bytes
!= PAGE_SIZE
)
913 size
= pgio
->pg_ops
->pg_test(pgio
, prev
, req
);
914 WARN_ON_ONCE(size
> req
->wb_bytes
);
915 if (size
&& size
< req
->wb_bytes
)
916 req
->wb_bytes
= size
;
921 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
922 * @desc: destination io descriptor
925 * Returns true if the request 'req' was successfully coalesced into the
926 * existing list of pages 'desc'.
928 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor
*desc
,
929 struct nfs_page
*req
)
931 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
933 struct nfs_page
*prev
= NULL
;
935 if (mirror
->pg_count
!= 0) {
936 prev
= nfs_list_entry(mirror
->pg_list
.prev
);
938 if (desc
->pg_ops
->pg_init
)
939 desc
->pg_ops
->pg_init(desc
, req
);
940 if (desc
->pg_error
< 0)
942 mirror
->pg_base
= req
->wb_pgbase
;
944 if (!nfs_can_coalesce_requests(prev
, req
, desc
))
946 nfs_list_remove_request(req
);
947 nfs_list_add_request(req
, &mirror
->pg_list
);
948 mirror
->pg_count
+= req
->wb_bytes
;
953 * Helper for nfs_pageio_add_request and nfs_pageio_complete
955 static void nfs_pageio_doio(struct nfs_pageio_descriptor
*desc
)
957 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
960 if (!list_empty(&mirror
->pg_list
)) {
961 int error
= desc
->pg_ops
->pg_doio(desc
);
963 desc
->pg_error
= error
;
965 mirror
->pg_bytes_written
+= mirror
->pg_count
;
967 if (list_empty(&mirror
->pg_list
)) {
968 mirror
->pg_count
= 0;
974 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
975 * @desc: destination io descriptor
978 * This may split a request into subrequests which are all part of the
981 * Returns true if the request 'req' was successfully coalesced into the
982 * existing list of pages 'desc'.
984 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
985 struct nfs_page
*req
)
987 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
989 struct nfs_page
*subreq
;
990 unsigned int bytes_left
= 0;
991 unsigned int offset
, pgbase
;
993 nfs_page_group_lock(req
, false);
996 bytes_left
= subreq
->wb_bytes
;
997 offset
= subreq
->wb_offset
;
998 pgbase
= subreq
->wb_pgbase
;
1001 if (!nfs_pageio_do_add_request(desc
, subreq
)) {
1002 /* make sure pg_test call(s) did nothing */
1003 WARN_ON_ONCE(subreq
->wb_bytes
!= bytes_left
);
1004 WARN_ON_ONCE(subreq
->wb_offset
!= offset
);
1005 WARN_ON_ONCE(subreq
->wb_pgbase
!= pgbase
);
1007 nfs_page_group_unlock(req
);
1008 desc
->pg_moreio
= 1;
1009 nfs_pageio_doio(desc
);
1010 if (desc
->pg_error
< 0)
1012 if (mirror
->pg_recoalesce
)
1014 /* retry add_request for this subreq */
1015 nfs_page_group_lock(req
, false);
1019 /* check for buggy pg_test call(s) */
1020 WARN_ON_ONCE(subreq
->wb_bytes
+ subreq
->wb_pgbase
> PAGE_SIZE
);
1021 WARN_ON_ONCE(subreq
->wb_bytes
> bytes_left
);
1022 WARN_ON_ONCE(subreq
->wb_bytes
== 0);
1024 bytes_left
-= subreq
->wb_bytes
;
1025 offset
+= subreq
->wb_bytes
;
1026 pgbase
+= subreq
->wb_bytes
;
1029 subreq
= nfs_create_request(req
->wb_context
,
1031 subreq
, pgbase
, bytes_left
);
1034 nfs_lock_request(subreq
);
1035 subreq
->wb_offset
= offset
;
1036 subreq
->wb_index
= req
->wb_index
;
1038 } while (bytes_left
> 0);
1040 nfs_page_group_unlock(req
);
1043 desc
->pg_error
= PTR_ERR(subreq
);
1044 nfs_page_group_unlock(req
);
1048 static int nfs_do_recoalesce(struct nfs_pageio_descriptor
*desc
)
1050 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1054 list_splice_init(&mirror
->pg_list
, &head
);
1055 mirror
->pg_bytes_written
-= mirror
->pg_count
;
1056 mirror
->pg_count
= 0;
1057 mirror
->pg_base
= 0;
1058 mirror
->pg_recoalesce
= 0;
1060 while (!list_empty(&head
)) {
1061 struct nfs_page
*req
;
1063 req
= list_first_entry(&head
, struct nfs_page
, wb_list
);
1064 nfs_list_remove_request(req
);
1065 if (__nfs_pageio_add_request(desc
, req
))
1067 if (desc
->pg_error
< 0) {
1068 list_splice_tail(&head
, &mirror
->pg_list
);
1069 mirror
->pg_recoalesce
= 1;
1074 } while (mirror
->pg_recoalesce
);
1078 static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor
*desc
,
1079 struct nfs_page
*req
)
1084 ret
= __nfs_pageio_add_request(desc
, req
);
1087 if (desc
->pg_error
< 0)
1089 ret
= nfs_do_recoalesce(desc
);
1095 int nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
1096 struct nfs_page
*req
)
1099 unsigned int pgbase
, offset
, bytes
;
1100 struct nfs_page
*dupreq
, *lastreq
;
1102 pgbase
= req
->wb_pgbase
;
1103 offset
= req
->wb_offset
;
1104 bytes
= req
->wb_bytes
;
1106 nfs_pageio_setup_mirroring(desc
, req
);
1107 if (desc
->pg_error
< 0)
1110 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++) {
1112 nfs_page_group_lock(req
, false);
1114 /* find the last request */
1115 for (lastreq
= req
->wb_head
;
1116 lastreq
->wb_this_page
!= req
->wb_head
;
1117 lastreq
= lastreq
->wb_this_page
)
1120 dupreq
= nfs_create_request(req
->wb_context
,
1121 req
->wb_page
, lastreq
, pgbase
, bytes
);
1123 if (IS_ERR(dupreq
)) {
1124 nfs_page_group_unlock(req
);
1125 desc
->pg_error
= PTR_ERR(dupreq
);
1129 nfs_lock_request(dupreq
);
1130 nfs_page_group_unlock(req
);
1131 dupreq
->wb_offset
= offset
;
1132 dupreq
->wb_index
= req
->wb_index
;
1136 if (nfs_pgio_has_mirroring(desc
))
1137 desc
->pg_mirror_idx
= midx
;
1138 if (!nfs_pageio_add_request_mirror(desc
, dupreq
))
1146 * We might have failed before sending any reqs over wire.
1147 * Clean up rest of the reqs in mirror pg_list.
1149 if (desc
->pg_error
) {
1150 struct nfs_pgio_mirror
*mirror
;
1151 void (*func
)(struct list_head
*);
1153 /* remember fatal errors */
1154 if (nfs_error_is_fatal(desc
->pg_error
))
1155 mapping_set_error(desc
->pg_inode
->i_mapping
,
1158 func
= desc
->pg_completion_ops
->error_cleanup
;
1159 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++) {
1160 mirror
= &desc
->pg_mirrors
[midx
];
1161 func(&mirror
->pg_list
);
1168 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1169 * nfs_pageio_descriptor
1170 * @desc: pointer to io descriptor
1171 * @mirror_idx: pointer to mirror index
1173 static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor
*desc
,
1176 struct nfs_pgio_mirror
*mirror
= &desc
->pg_mirrors
[mirror_idx
];
1177 u32 restore_idx
= desc
->pg_mirror_idx
;
1179 if (nfs_pgio_has_mirroring(desc
))
1180 desc
->pg_mirror_idx
= mirror_idx
;
1182 nfs_pageio_doio(desc
);
1183 if (!mirror
->pg_recoalesce
)
1185 if (!nfs_do_recoalesce(desc
))
1188 desc
->pg_mirror_idx
= restore_idx
;
1192 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1193 * @hdr - the pgio header to move request from
1194 * @desc - the pageio descriptor to add requests to
1196 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1199 * Returns 0 on success and < 0 on error.
1201 int nfs_pageio_resend(struct nfs_pageio_descriptor
*desc
,
1202 struct nfs_pgio_header
*hdr
)
1206 desc
->pg_dreq
= hdr
->dreq
;
1207 while (!list_empty(&hdr
->pages
)) {
1208 struct nfs_page
*req
= nfs_list_entry(hdr
->pages
.next
);
1210 nfs_list_remove_request(req
);
1211 if (!nfs_pageio_add_request(desc
, req
))
1212 nfs_list_add_request(req
, &failed
);
1214 nfs_pageio_complete(desc
);
1215 if (!list_empty(&failed
)) {
1216 list_move(&failed
, &hdr
->pages
);
1217 return desc
->pg_error
< 0 ? desc
->pg_error
: -EIO
;
1221 EXPORT_SYMBOL_GPL(nfs_pageio_resend
);
1224 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1225 * @desc: pointer to io descriptor
1227 void nfs_pageio_complete(struct nfs_pageio_descriptor
*desc
)
1231 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++)
1232 nfs_pageio_complete_mirror(desc
, midx
);
1234 if (desc
->pg_ops
->pg_cleanup
)
1235 desc
->pg_ops
->pg_cleanup(desc
);
1236 nfs_pageio_cleanup_mirroring(desc
);
1240 * nfs_pageio_cond_complete - Conditional I/O completion
1241 * @desc: pointer to io descriptor
1242 * @index: page index
1244 * It is important to ensure that processes don't try to take locks
1245 * on non-contiguous ranges of pages as that might deadlock. This
1246 * function should be called before attempting to wait on a locked
1247 * nfs_page. It will complete the I/O if the page index 'index'
1248 * is not contiguous with the existing list of pages in 'desc'.
1250 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor
*desc
, pgoff_t index
)
1252 struct nfs_pgio_mirror
*mirror
;
1253 struct nfs_page
*prev
;
1256 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++) {
1257 mirror
= &desc
->pg_mirrors
[midx
];
1258 if (!list_empty(&mirror
->pg_list
)) {
1259 prev
= nfs_list_entry(mirror
->pg_list
.prev
);
1260 if (index
!= prev
->wb_index
+ 1)
1261 nfs_pageio_complete_mirror(desc
, midx
);
1266 int __init
nfs_init_nfspagecache(void)
1268 nfs_page_cachep
= kmem_cache_create("nfs_page",
1269 sizeof(struct nfs_page
),
1270 0, SLAB_HWCACHE_ALIGN
,
1272 if (nfs_page_cachep
== NULL
)
1278 void nfs_destroy_nfspagecache(void)
1280 kmem_cache_destroy(nfs_page_cachep
);
1283 static const struct rpc_call_ops nfs_pgio_common_ops
= {
1284 .rpc_call_prepare
= nfs_pgio_prepare
,
1285 .rpc_call_done
= nfs_pgio_result
,
1286 .rpc_release
= nfs_pgio_release
,
1289 const struct nfs_pageio_ops nfs_pgio_rw_ops
= {
1290 .pg_test
= nfs_generic_pg_test
,
1291 .pg_doio
= nfs_generic_pg_pgios
,