1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/fs/nfs/pagelist.c
5 * A set of helper functions for managing NFS read and write requests.
6 * The main purpose of these routines is to provide support for the
7 * coalescing of several requests into a single RPC call.
9 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
13 #include <linux/slab.h>
14 #include <linux/file.h>
15 #include <linux/sched.h>
16 #include <linux/sunrpc/clnt.h>
17 #include <linux/nfs.h>
18 #include <linux/nfs3.h>
19 #include <linux/nfs4.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_page.h>
22 #include <linux/nfs_mount.h>
23 #include <linux/export.h>
28 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
30 static struct kmem_cache
*nfs_page_cachep
;
31 static const struct rpc_call_ops nfs_pgio_common_ops
;
33 struct nfs_pgio_mirror
*
34 nfs_pgio_current_mirror(struct nfs_pageio_descriptor
*desc
)
36 return nfs_pgio_has_mirroring(desc
) ?
37 &desc
->pg_mirrors
[desc
->pg_mirror_idx
] :
40 EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror
);
42 void nfs_pgheader_init(struct nfs_pageio_descriptor
*desc
,
43 struct nfs_pgio_header
*hdr
,
44 void (*release
)(struct nfs_pgio_header
*hdr
))
46 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
49 hdr
->req
= nfs_list_entry(mirror
->pg_list
.next
);
50 hdr
->inode
= desc
->pg_inode
;
51 hdr
->cred
= nfs_req_openctx(hdr
->req
)->cred
;
52 hdr
->io_start
= req_offset(hdr
->req
);
53 hdr
->good_bytes
= mirror
->pg_count
;
54 hdr
->io_completion
= desc
->pg_io_completion
;
55 hdr
->dreq
= desc
->pg_dreq
;
56 hdr
->release
= release
;
57 hdr
->completion_ops
= desc
->pg_completion_ops
;
58 if (hdr
->completion_ops
->init_hdr
)
59 hdr
->completion_ops
->init_hdr(hdr
);
61 hdr
->pgio_mirror_idx
= desc
->pg_mirror_idx
;
63 EXPORT_SYMBOL_GPL(nfs_pgheader_init
);
65 void nfs_set_pgio_error(struct nfs_pgio_header
*hdr
, int error
, loff_t pos
)
67 unsigned int new = pos
- hdr
->io_start
;
69 if (hdr
->good_bytes
> new) {
70 hdr
->good_bytes
= new;
71 clear_bit(NFS_IOHDR_EOF
, &hdr
->flags
);
72 if (!test_and_set_bit(NFS_IOHDR_ERROR
, &hdr
->flags
))
77 static inline struct nfs_page
*
80 struct nfs_page
*p
= kmem_cache_zalloc(nfs_page_cachep
, GFP_KERNEL
);
82 INIT_LIST_HEAD(&p
->wb_list
);
87 nfs_page_free(struct nfs_page
*p
)
89 kmem_cache_free(nfs_page_cachep
, p
);
93 * nfs_iocounter_wait - wait for i/o to complete
94 * @l_ctx: nfs_lock_context with io_counter to use
96 * returns -ERESTARTSYS if interrupted by a fatal signal.
97 * Otherwise returns 0 once the io_count hits 0.
100 nfs_iocounter_wait(struct nfs_lock_context
*l_ctx
)
102 return wait_var_event_killable(&l_ctx
->io_count
,
103 !atomic_read(&l_ctx
->io_count
));
107 * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
109 * @task: the rpc_task that should wait
110 * @l_ctx: nfs_lock_context with io_counter to check
112 * Returns true if there is outstanding I/O to wait on and the
113 * task has been put to sleep.
116 nfs_async_iocounter_wait(struct rpc_task
*task
, struct nfs_lock_context
*l_ctx
)
118 struct inode
*inode
= d_inode(l_ctx
->open_context
->dentry
);
121 if (atomic_read(&l_ctx
->io_count
) > 0) {
122 rpc_sleep_on(&NFS_SERVER(inode
)->uoc_rpcwaitq
, task
, NULL
);
126 if (atomic_read(&l_ctx
->io_count
) == 0) {
127 rpc_wake_up_queued_task(&NFS_SERVER(inode
)->uoc_rpcwaitq
, task
);
133 EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait
);
136 * nfs_page_group_lock - lock the head of the page group
137 * @req - request in group that is to be locked
139 * this lock must be held when traversing or modifying the page
142 * return 0 on success, < 0 on error
145 nfs_page_group_lock(struct nfs_page
*req
)
147 struct nfs_page
*head
= req
->wb_head
;
149 WARN_ON_ONCE(head
!= head
->wb_head
);
151 if (!test_and_set_bit(PG_HEADLOCK
, &head
->wb_flags
))
154 set_bit(PG_CONTENDED1
, &head
->wb_flags
);
155 smp_mb__after_atomic();
156 return wait_on_bit_lock(&head
->wb_flags
, PG_HEADLOCK
,
157 TASK_UNINTERRUPTIBLE
);
161 * nfs_page_group_unlock - unlock the head of the page group
162 * @req - request in group that is to be unlocked
165 nfs_page_group_unlock(struct nfs_page
*req
)
167 struct nfs_page
*head
= req
->wb_head
;
169 WARN_ON_ONCE(head
!= head
->wb_head
);
171 smp_mb__before_atomic();
172 clear_bit(PG_HEADLOCK
, &head
->wb_flags
);
173 smp_mb__after_atomic();
174 if (!test_bit(PG_CONTENDED1
, &head
->wb_flags
))
176 wake_up_bit(&head
->wb_flags
, PG_HEADLOCK
);
180 * nfs_page_group_sync_on_bit_locked
182 * must be called with page group lock held
185 nfs_page_group_sync_on_bit_locked(struct nfs_page
*req
, unsigned int bit
)
187 struct nfs_page
*head
= req
->wb_head
;
188 struct nfs_page
*tmp
;
190 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &head
->wb_flags
));
191 WARN_ON_ONCE(test_and_set_bit(bit
, &req
->wb_flags
));
193 tmp
= req
->wb_this_page
;
195 if (!test_bit(bit
, &tmp
->wb_flags
))
197 tmp
= tmp
->wb_this_page
;
200 /* true! reset all bits */
203 clear_bit(bit
, &tmp
->wb_flags
);
204 tmp
= tmp
->wb_this_page
;
205 } while (tmp
!= req
);
211 * nfs_page_group_sync_on_bit - set bit on current request, but only
212 * return true if the bit is set for all requests in page group
213 * @req - request in page group
214 * @bit - PG_* bit that is used to sync page group
216 bool nfs_page_group_sync_on_bit(struct nfs_page
*req
, unsigned int bit
)
220 nfs_page_group_lock(req
);
221 ret
= nfs_page_group_sync_on_bit_locked(req
, bit
);
222 nfs_page_group_unlock(req
);
228 * nfs_page_group_init - Initialize the page group linkage for @req
229 * @req - a new nfs request
230 * @prev - the previous request in page group, or NULL if @req is the first
231 * or only request in the group (the head).
234 nfs_page_group_init(struct nfs_page
*req
, struct nfs_page
*prev
)
237 WARN_ON_ONCE(prev
== req
);
242 req
->wb_this_page
= req
;
245 WARN_ON_ONCE(prev
->wb_this_page
!= prev
->wb_head
);
246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &prev
->wb_head
->wb_flags
));
247 req
->wb_head
= prev
->wb_head
;
248 req
->wb_this_page
= prev
->wb_this_page
;
249 prev
->wb_this_page
= req
;
251 /* All subrequests take a ref on the head request until
252 * nfs_page_group_destroy is called */
253 kref_get(&req
->wb_head
->wb_kref
);
255 /* grab extra ref and bump the request count if head request
256 * has extra ref from the write/commit path to handle handoff
257 * between write and commit lists. */
258 if (test_bit(PG_INODE_REF
, &prev
->wb_head
->wb_flags
)) {
259 inode
= page_file_mapping(req
->wb_page
)->host
;
260 set_bit(PG_INODE_REF
, &req
->wb_flags
);
261 kref_get(&req
->wb_kref
);
262 atomic_long_inc(&NFS_I(inode
)->nrequests
);
268 * nfs_page_group_destroy - sync the destruction of page groups
269 * @req - request that no longer needs the page group
271 * releases the page group reference from each member once all
272 * members have called this function.
275 nfs_page_group_destroy(struct kref
*kref
)
277 struct nfs_page
*req
= container_of(kref
, struct nfs_page
, wb_kref
);
278 struct nfs_page
*head
= req
->wb_head
;
279 struct nfs_page
*tmp
, *next
;
281 if (!nfs_page_group_sync_on_bit(req
, PG_TEARDOWN
))
286 next
= tmp
->wb_this_page
;
287 /* unlink and free */
288 tmp
->wb_this_page
= tmp
;
290 nfs_free_request(tmp
);
292 } while (tmp
!= req
);
294 /* subrequests must release the ref on the head request */
296 nfs_release_request(head
);
299 static struct nfs_page
*
300 __nfs_create_request(struct nfs_lock_context
*l_ctx
, struct page
*page
,
301 unsigned int pgbase
, unsigned int offset
,
304 struct nfs_page
*req
;
305 struct nfs_open_context
*ctx
= l_ctx
->open_context
;
307 if (test_bit(NFS_CONTEXT_BAD
, &ctx
->flags
))
308 return ERR_PTR(-EBADF
);
309 /* try to allocate the request struct */
310 req
= nfs_page_alloc();
312 return ERR_PTR(-ENOMEM
);
314 req
->wb_lock_context
= l_ctx
;
315 refcount_inc(&l_ctx
->count
);
316 atomic_inc(&l_ctx
->io_count
);
318 /* Initialize the request struct. Initially, we assume a
319 * long write-back delay. This will be adjusted in
320 * update_nfs_request below if the region is not locked. */
323 req
->wb_index
= page_index(page
);
326 req
->wb_offset
= offset
;
327 req
->wb_pgbase
= pgbase
;
328 req
->wb_bytes
= count
;
329 kref_init(&req
->wb_kref
);
335 * nfs_create_request - Create an NFS read/write request.
336 * @ctx: open context to use
337 * @page: page to write
338 * @offset: starting offset within the page for the write
339 * @count: number of bytes to read/write
341 * The page must be locked by the caller. This makes sure we never
342 * create two different requests for the same page.
343 * User should ensure it is safe to sleep in this function.
346 nfs_create_request(struct nfs_open_context
*ctx
, struct page
*page
,
347 unsigned int offset
, unsigned int count
)
349 struct nfs_lock_context
*l_ctx
= nfs_get_lock_context(ctx
);
350 struct nfs_page
*ret
;
353 return ERR_CAST(l_ctx
);
354 ret
= __nfs_create_request(l_ctx
, page
, offset
, offset
, count
);
356 nfs_page_group_init(ret
, NULL
);
357 nfs_put_lock_context(l_ctx
);
361 static struct nfs_page
*
362 nfs_create_subreq(struct nfs_page
*req
, struct nfs_page
*last
,
363 unsigned int pgbase
, unsigned int offset
,
366 struct nfs_page
*ret
;
368 ret
= __nfs_create_request(req
->wb_lock_context
, req
->wb_page
,
369 pgbase
, offset
, count
);
371 nfs_lock_request(ret
);
372 ret
->wb_index
= req
->wb_index
;
373 nfs_page_group_init(ret
, last
);
374 ret
->wb_nio
= req
->wb_nio
;
380 * nfs_unlock_request - Unlock request and wake up sleepers.
381 * @req: pointer to request
383 void nfs_unlock_request(struct nfs_page
*req
)
385 if (!NFS_WBACK_BUSY(req
)) {
386 printk(KERN_ERR
"NFS: Invalid unlock attempted\n");
389 smp_mb__before_atomic();
390 clear_bit(PG_BUSY
, &req
->wb_flags
);
391 smp_mb__after_atomic();
392 if (!test_bit(PG_CONTENDED2
, &req
->wb_flags
))
394 wake_up_bit(&req
->wb_flags
, PG_BUSY
);
398 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
399 * @req: pointer to request
401 void nfs_unlock_and_release_request(struct nfs_page
*req
)
403 nfs_unlock_request(req
);
404 nfs_release_request(req
);
408 * nfs_clear_request - Free up all resources allocated to the request
411 * Release page and open context resources associated with a read/write
412 * request after it has completed.
414 static void nfs_clear_request(struct nfs_page
*req
)
416 struct page
*page
= req
->wb_page
;
417 struct nfs_lock_context
*l_ctx
= req
->wb_lock_context
;
418 struct nfs_open_context
*ctx
;
425 if (atomic_dec_and_test(&l_ctx
->io_count
)) {
426 wake_up_var(&l_ctx
->io_count
);
427 ctx
= l_ctx
->open_context
;
428 if (test_bit(NFS_CONTEXT_UNLOCK
, &ctx
->flags
))
429 rpc_wake_up(&NFS_SERVER(d_inode(ctx
->dentry
))->uoc_rpcwaitq
);
431 nfs_put_lock_context(l_ctx
);
432 req
->wb_lock_context
= NULL
;
437 * nfs_release_request - Release the count on an NFS read/write request
438 * @req: request to release
440 * Note: Should never be called with the spinlock held!
442 void nfs_free_request(struct nfs_page
*req
)
444 WARN_ON_ONCE(req
->wb_this_page
!= req
);
446 /* extra debug: make sure no sync bits are still set */
447 WARN_ON_ONCE(test_bit(PG_TEARDOWN
, &req
->wb_flags
));
448 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE
, &req
->wb_flags
));
449 WARN_ON_ONCE(test_bit(PG_UPTODATE
, &req
->wb_flags
));
450 WARN_ON_ONCE(test_bit(PG_WB_END
, &req
->wb_flags
));
451 WARN_ON_ONCE(test_bit(PG_REMOVE
, &req
->wb_flags
));
453 /* Release struct file and open context */
454 nfs_clear_request(req
);
458 void nfs_release_request(struct nfs_page
*req
)
460 kref_put(&req
->wb_kref
, nfs_page_group_destroy
);
462 EXPORT_SYMBOL_GPL(nfs_release_request
);
465 * nfs_wait_on_request - Wait for a request to complete.
466 * @req: request to wait upon.
468 * Interruptible by fatal signals only.
469 * The user is responsible for holding a count on the request.
472 nfs_wait_on_request(struct nfs_page
*req
)
474 if (!test_bit(PG_BUSY
, &req
->wb_flags
))
476 set_bit(PG_CONTENDED2
, &req
->wb_flags
);
477 smp_mb__after_atomic();
478 return wait_on_bit_io(&req
->wb_flags
, PG_BUSY
,
479 TASK_UNINTERRUPTIBLE
);
481 EXPORT_SYMBOL_GPL(nfs_wait_on_request
);
484 * nfs_generic_pg_test - determine if requests can be coalesced
485 * @desc: pointer to descriptor
486 * @prev: previous request in desc, or NULL
489 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
490 * the size of the request.
492 size_t nfs_generic_pg_test(struct nfs_pageio_descriptor
*desc
,
493 struct nfs_page
*prev
, struct nfs_page
*req
)
495 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
498 if (mirror
->pg_count
> mirror
->pg_bsize
) {
499 /* should never happen */
505 * Limit the request size so that we can still allocate a page array
506 * for it without upsetting the slab allocator.
508 if (((mirror
->pg_count
+ req
->wb_bytes
) >> PAGE_SHIFT
) *
509 sizeof(struct page
*) > PAGE_SIZE
)
512 return min(mirror
->pg_bsize
- mirror
->pg_count
, (size_t)req
->wb_bytes
);
514 EXPORT_SYMBOL_GPL(nfs_generic_pg_test
);
516 struct nfs_pgio_header
*nfs_pgio_header_alloc(const struct nfs_rw_ops
*ops
)
518 struct nfs_pgio_header
*hdr
= ops
->rw_alloc_header();
521 INIT_LIST_HEAD(&hdr
->pages
);
526 EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc
);
529 * nfs_pgio_data_destroy - make @hdr suitable for reuse
531 * Frees memory and releases refs from nfs_generic_pgio, so that it may
534 * @hdr: A header that has had nfs_generic_pgio called
536 static void nfs_pgio_data_destroy(struct nfs_pgio_header
*hdr
)
538 if (hdr
->args
.context
)
539 put_nfs_open_context(hdr
->args
.context
);
540 if (hdr
->page_array
.pagevec
!= hdr
->page_array
.page_array
)
541 kfree(hdr
->page_array
.pagevec
);
545 * nfs_pgio_header_free - Free a read or write header
546 * @hdr: The header to free
548 void nfs_pgio_header_free(struct nfs_pgio_header
*hdr
)
550 nfs_pgio_data_destroy(hdr
);
551 hdr
->rw_ops
->rw_free_header(hdr
);
553 EXPORT_SYMBOL_GPL(nfs_pgio_header_free
);
556 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
557 * @hdr: The pageio hdr
558 * @count: Number of bytes to read
559 * @how: How to commit data (writes only)
560 * @cinfo: Commit information for the call (writes only)
562 static void nfs_pgio_rpcsetup(struct nfs_pgio_header
*hdr
,
564 int how
, struct nfs_commit_info
*cinfo
)
566 struct nfs_page
*req
= hdr
->req
;
568 /* Set up the RPC argument and reply structs
569 * NB: take care not to mess about with hdr->commit et al. */
571 hdr
->args
.fh
= NFS_FH(hdr
->inode
);
572 hdr
->args
.offset
= req_offset(req
);
573 /* pnfs_set_layoutcommit needs this */
574 hdr
->mds_offset
= hdr
->args
.offset
;
575 hdr
->args
.pgbase
= req
->wb_pgbase
;
576 hdr
->args
.pages
= hdr
->page_array
.pagevec
;
577 hdr
->args
.count
= count
;
578 hdr
->args
.context
= get_nfs_open_context(nfs_req_openctx(req
));
579 hdr
->args
.lock_context
= req
->wb_lock_context
;
580 hdr
->args
.stable
= NFS_UNSTABLE
;
581 switch (how
& (FLUSH_STABLE
| FLUSH_COND_STABLE
)) {
584 case FLUSH_COND_STABLE
:
585 if (nfs_reqs_to_commit(cinfo
))
589 hdr
->args
.stable
= NFS_FILE_SYNC
;
592 hdr
->res
.fattr
= &hdr
->fattr
;
595 hdr
->res
.verf
= &hdr
->verf
;
596 nfs_fattr_init(&hdr
->fattr
);
600 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
601 * @task: The current task
602 * @calldata: pageio header to prepare
604 static void nfs_pgio_prepare(struct rpc_task
*task
, void *calldata
)
606 struct nfs_pgio_header
*hdr
= calldata
;
608 err
= NFS_PROTO(hdr
->inode
)->pgio_rpc_prepare(task
, hdr
);
613 int nfs_initiate_pgio(struct rpc_clnt
*clnt
, struct nfs_pgio_header
*hdr
,
614 const struct cred
*cred
, const struct nfs_rpc_ops
*rpc_ops
,
615 const struct rpc_call_ops
*call_ops
, int how
, int flags
)
617 struct rpc_task
*task
;
618 struct rpc_message msg
= {
619 .rpc_argp
= &hdr
->args
,
620 .rpc_resp
= &hdr
->res
,
623 struct rpc_task_setup task_setup_data
= {
627 .callback_ops
= call_ops
,
628 .callback_data
= hdr
,
629 .workqueue
= nfsiod_workqueue
,
630 .flags
= RPC_TASK_ASYNC
| flags
,
634 hdr
->rw_ops
->rw_initiate(hdr
, &msg
, rpc_ops
, &task_setup_data
, how
);
636 dprintk("NFS: initiated pgio call "
637 "(req %s/%llu, %u bytes @ offset %llu)\n",
638 hdr
->inode
->i_sb
->s_id
,
639 (unsigned long long)NFS_FILEID(hdr
->inode
),
641 (unsigned long long)hdr
->args
.offset
);
643 task
= rpc_run_task(&task_setup_data
);
648 if (how
& FLUSH_SYNC
) {
649 ret
= rpc_wait_for_completion_task(task
);
651 ret
= task
->tk_status
;
657 EXPORT_SYMBOL_GPL(nfs_initiate_pgio
);
660 * nfs_pgio_error - Clean up from a pageio error
661 * @hdr: pageio header
663 static void nfs_pgio_error(struct nfs_pgio_header
*hdr
)
665 set_bit(NFS_IOHDR_REDO
, &hdr
->flags
);
666 hdr
->completion_ops
->completion(hdr
);
670 * nfs_pgio_release - Release pageio data
671 * @calldata: The pageio header to release
673 static void nfs_pgio_release(void *calldata
)
675 struct nfs_pgio_header
*hdr
= calldata
;
676 hdr
->completion_ops
->completion(hdr
);
679 static void nfs_pageio_mirror_init(struct nfs_pgio_mirror
*mirror
,
682 INIT_LIST_HEAD(&mirror
->pg_list
);
683 mirror
->pg_bytes_written
= 0;
684 mirror
->pg_count
= 0;
685 mirror
->pg_bsize
= bsize
;
687 mirror
->pg_recoalesce
= 0;
691 * nfs_pageio_init - initialise a page io descriptor
692 * @desc: pointer to descriptor
693 * @inode: pointer to inode
694 * @pg_ops: pointer to pageio operations
695 * @compl_ops: pointer to pageio completion operations
696 * @rw_ops: pointer to nfs read/write operations
697 * @bsize: io block size
698 * @io_flags: extra parameters for the io function
700 void nfs_pageio_init(struct nfs_pageio_descriptor
*desc
,
702 const struct nfs_pageio_ops
*pg_ops
,
703 const struct nfs_pgio_completion_ops
*compl_ops
,
704 const struct nfs_rw_ops
*rw_ops
,
709 desc
->pg_inode
= inode
;
710 desc
->pg_ops
= pg_ops
;
711 desc
->pg_completion_ops
= compl_ops
;
712 desc
->pg_rw_ops
= rw_ops
;
713 desc
->pg_ioflags
= io_flags
;
715 desc
->pg_lseg
= NULL
;
716 desc
->pg_io_completion
= NULL
;
717 desc
->pg_dreq
= NULL
;
718 desc
->pg_bsize
= bsize
;
720 desc
->pg_mirror_count
= 1;
721 desc
->pg_mirror_idx
= 0;
723 desc
->pg_mirrors_dynamic
= NULL
;
724 desc
->pg_mirrors
= desc
->pg_mirrors_static
;
725 nfs_pageio_mirror_init(&desc
->pg_mirrors
[0], bsize
);
726 desc
->pg_maxretrans
= 0;
730 * nfs_pgio_result - Basic pageio error handling
731 * @task: The task that ran
732 * @calldata: Pageio header to check
734 static void nfs_pgio_result(struct rpc_task
*task
, void *calldata
)
736 struct nfs_pgio_header
*hdr
= calldata
;
737 struct inode
*inode
= hdr
->inode
;
739 dprintk("NFS: %s: %5u, (status %d)\n", __func__
,
740 task
->tk_pid
, task
->tk_status
);
742 if (hdr
->rw_ops
->rw_done(task
, hdr
, inode
) != 0)
744 if (task
->tk_status
< 0)
745 nfs_set_pgio_error(hdr
, task
->tk_status
, hdr
->args
.offset
);
747 hdr
->rw_ops
->rw_result(task
, hdr
);
751 * Create an RPC task for the given read or write request and kick it.
752 * The page must have been locked by the caller.
754 * It may happen that the page we're passed is not marked dirty.
755 * This is the case if nfs_updatepage detects a conflicting request
756 * that has been written but not committed.
758 int nfs_generic_pgio(struct nfs_pageio_descriptor
*desc
,
759 struct nfs_pgio_header
*hdr
)
761 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
763 struct nfs_page
*req
;
766 struct list_head
*head
= &mirror
->pg_list
;
767 struct nfs_commit_info cinfo
;
768 struct nfs_page_array
*pg_array
= &hdr
->page_array
;
769 unsigned int pagecount
, pageused
;
770 gfp_t gfp_flags
= GFP_KERNEL
;
772 pagecount
= nfs_page_array_len(mirror
->pg_base
, mirror
->pg_count
);
773 pg_array
->npages
= pagecount
;
775 if (pagecount
<= ARRAY_SIZE(pg_array
->page_array
))
776 pg_array
->pagevec
= pg_array
->page_array
;
778 pg_array
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), gfp_flags
);
779 if (!pg_array
->pagevec
) {
780 pg_array
->npages
= 0;
782 desc
->pg_error
= -ENOMEM
;
783 return desc
->pg_error
;
787 nfs_init_cinfo(&cinfo
, desc
->pg_inode
, desc
->pg_dreq
);
788 pages
= hdr
->page_array
.pagevec
;
791 while (!list_empty(head
)) {
792 req
= nfs_list_entry(head
->next
);
793 nfs_list_move_request(req
, &hdr
->pages
);
795 if (!last_page
|| last_page
!= req
->wb_page
) {
797 if (pageused
> pagecount
)
799 *pages
++ = last_page
= req
->wb_page
;
802 if (WARN_ON_ONCE(pageused
!= pagecount
)) {
804 desc
->pg_error
= -EINVAL
;
805 return desc
->pg_error
;
808 if ((desc
->pg_ioflags
& FLUSH_COND_STABLE
) &&
809 (desc
->pg_moreio
|| nfs_reqs_to_commit(&cinfo
)))
810 desc
->pg_ioflags
&= ~FLUSH_COND_STABLE
;
812 /* Set up the argument struct */
813 nfs_pgio_rpcsetup(hdr
, mirror
->pg_count
, desc
->pg_ioflags
, &cinfo
);
814 desc
->pg_rpc_callops
= &nfs_pgio_common_ops
;
817 EXPORT_SYMBOL_GPL(nfs_generic_pgio
);
819 static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor
*desc
)
821 struct nfs_pgio_header
*hdr
;
824 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
826 desc
->pg_error
= -ENOMEM
;
827 return desc
->pg_error
;
829 nfs_pgheader_init(desc
, hdr
, nfs_pgio_header_free
);
830 ret
= nfs_generic_pgio(desc
, hdr
);
832 ret
= nfs_initiate_pgio(NFS_CLIENT(hdr
->inode
),
835 NFS_PROTO(hdr
->inode
),
836 desc
->pg_rpc_callops
,
837 desc
->pg_ioflags
, 0);
841 static struct nfs_pgio_mirror
*
842 nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor
*desc
,
843 unsigned int mirror_count
)
845 struct nfs_pgio_mirror
*ret
;
848 kfree(desc
->pg_mirrors_dynamic
);
849 desc
->pg_mirrors_dynamic
= NULL
;
850 if (mirror_count
== 1)
851 return desc
->pg_mirrors_static
;
852 ret
= kmalloc_array(mirror_count
, sizeof(*ret
), GFP_KERNEL
);
854 for (i
= 0; i
< mirror_count
; i
++)
855 nfs_pageio_mirror_init(&ret
[i
], desc
->pg_bsize
);
856 desc
->pg_mirrors_dynamic
= ret
;
862 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
863 * by calling the pg_get_mirror_count op
865 static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor
*pgio
,
866 struct nfs_page
*req
)
868 unsigned int mirror_count
= 1;
870 if (pgio
->pg_ops
->pg_get_mirror_count
)
871 mirror_count
= pgio
->pg_ops
->pg_get_mirror_count(pgio
, req
);
872 if (mirror_count
== pgio
->pg_mirror_count
|| pgio
->pg_error
< 0)
875 if (!mirror_count
|| mirror_count
> NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX
) {
876 pgio
->pg_error
= -EINVAL
;
880 pgio
->pg_mirrors
= nfs_pageio_alloc_mirrors(pgio
, mirror_count
);
881 if (pgio
->pg_mirrors
== NULL
) {
882 pgio
->pg_error
= -ENOMEM
;
883 pgio
->pg_mirrors
= pgio
->pg_mirrors_static
;
886 pgio
->pg_mirror_count
= mirror_count
;
890 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
892 void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor
*pgio
)
894 pgio
->pg_mirror_count
= 1;
895 pgio
->pg_mirror_idx
= 0;
898 static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor
*pgio
)
900 pgio
->pg_mirror_count
= 1;
901 pgio
->pg_mirror_idx
= 0;
902 pgio
->pg_mirrors
= pgio
->pg_mirrors_static
;
903 kfree(pgio
->pg_mirrors_dynamic
);
904 pgio
->pg_mirrors_dynamic
= NULL
;
907 static bool nfs_match_lock_context(const struct nfs_lock_context
*l1
,
908 const struct nfs_lock_context
*l2
)
910 return l1
->lockowner
== l2
->lockowner
;
914 * nfs_can_coalesce_requests - test two requests for compatibility
915 * @prev: pointer to nfs_page
916 * @req: pointer to nfs_page
917 * @pgio: pointer to nfs_pagio_descriptor
919 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
920 * page data area they describe is contiguous, and that their RPC
921 * credentials, NFSv4 open state, and lockowners are the same.
923 * Return 'true' if this is the case, else return 'false'.
925 static bool nfs_can_coalesce_requests(struct nfs_page
*prev
,
926 struct nfs_page
*req
,
927 struct nfs_pageio_descriptor
*pgio
)
930 struct file_lock_context
*flctx
;
933 if (!nfs_match_open_context(nfs_req_openctx(req
), nfs_req_openctx(prev
)))
935 flctx
= d_inode(nfs_req_openctx(req
)->dentry
)->i_flctx
;
937 !(list_empty_careful(&flctx
->flc_posix
) &&
938 list_empty_careful(&flctx
->flc_flock
)) &&
939 !nfs_match_lock_context(req
->wb_lock_context
,
940 prev
->wb_lock_context
))
942 if (req_offset(req
) != req_offset(prev
) + prev
->wb_bytes
)
944 if (req
->wb_page
== prev
->wb_page
) {
945 if (req
->wb_pgbase
!= prev
->wb_pgbase
+ prev
->wb_bytes
)
948 if (req
->wb_pgbase
!= 0 ||
949 prev
->wb_pgbase
+ prev
->wb_bytes
!= PAGE_SIZE
)
953 size
= pgio
->pg_ops
->pg_test(pgio
, prev
, req
);
954 WARN_ON_ONCE(size
> req
->wb_bytes
);
955 if (size
&& size
< req
->wb_bytes
)
956 req
->wb_bytes
= size
;
961 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
962 * @desc: destination io descriptor
965 * Returns true if the request 'req' was successfully coalesced into the
966 * existing list of pages 'desc'.
968 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor
*desc
,
969 struct nfs_page
*req
)
971 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
973 struct nfs_page
*prev
= NULL
;
975 if (mirror
->pg_count
!= 0) {
976 prev
= nfs_list_entry(mirror
->pg_list
.prev
);
978 if (desc
->pg_ops
->pg_init
)
979 desc
->pg_ops
->pg_init(desc
, req
);
980 if (desc
->pg_error
< 0)
982 mirror
->pg_base
= req
->wb_pgbase
;
985 if (desc
->pg_maxretrans
&& req
->wb_nio
> desc
->pg_maxretrans
) {
986 if (NFS_SERVER(desc
->pg_inode
)->flags
& NFS_MOUNT_SOFTERR
)
987 desc
->pg_error
= -ETIMEDOUT
;
989 desc
->pg_error
= -EIO
;
993 if (!nfs_can_coalesce_requests(prev
, req
, desc
))
995 nfs_list_move_request(req
, &mirror
->pg_list
);
996 mirror
->pg_count
+= req
->wb_bytes
;
1001 * Helper for nfs_pageio_add_request and nfs_pageio_complete
1003 static void nfs_pageio_doio(struct nfs_pageio_descriptor
*desc
)
1005 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1008 if (!list_empty(&mirror
->pg_list
)) {
1009 int error
= desc
->pg_ops
->pg_doio(desc
);
1011 desc
->pg_error
= error
;
1013 mirror
->pg_bytes_written
+= mirror
->pg_count
;
1015 if (list_empty(&mirror
->pg_list
)) {
1016 mirror
->pg_count
= 0;
1017 mirror
->pg_base
= 0;
1022 nfs_pageio_cleanup_request(struct nfs_pageio_descriptor
*desc
,
1023 struct nfs_page
*req
)
1027 nfs_list_move_request(req
, &head
);
1028 desc
->pg_completion_ops
->error_cleanup(&head
, desc
->pg_error
);
1032 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
1033 * @desc: destination io descriptor
1036 * This may split a request into subrequests which are all part of the
1039 * Returns true if the request 'req' was successfully coalesced into the
1040 * existing list of pages 'desc'.
1042 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
1043 struct nfs_page
*req
)
1045 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1047 struct nfs_page
*subreq
;
1048 unsigned int bytes_left
= 0;
1049 unsigned int offset
, pgbase
;
1051 nfs_page_group_lock(req
);
1054 bytes_left
= subreq
->wb_bytes
;
1055 offset
= subreq
->wb_offset
;
1056 pgbase
= subreq
->wb_pgbase
;
1059 if (!nfs_pageio_do_add_request(desc
, subreq
)) {
1060 /* make sure pg_test call(s) did nothing */
1061 WARN_ON_ONCE(subreq
->wb_bytes
!= bytes_left
);
1062 WARN_ON_ONCE(subreq
->wb_offset
!= offset
);
1063 WARN_ON_ONCE(subreq
->wb_pgbase
!= pgbase
);
1065 nfs_page_group_unlock(req
);
1066 desc
->pg_moreio
= 1;
1067 nfs_pageio_doio(desc
);
1068 if (desc
->pg_error
< 0 || mirror
->pg_recoalesce
)
1069 goto out_cleanup_subreq
;
1070 /* retry add_request for this subreq */
1071 nfs_page_group_lock(req
);
1075 /* check for buggy pg_test call(s) */
1076 WARN_ON_ONCE(subreq
->wb_bytes
+ subreq
->wb_pgbase
> PAGE_SIZE
);
1077 WARN_ON_ONCE(subreq
->wb_bytes
> bytes_left
);
1078 WARN_ON_ONCE(subreq
->wb_bytes
== 0);
1080 bytes_left
-= subreq
->wb_bytes
;
1081 offset
+= subreq
->wb_bytes
;
1082 pgbase
+= subreq
->wb_bytes
;
1085 subreq
= nfs_create_subreq(req
, subreq
, pgbase
,
1086 offset
, bytes_left
);
1090 } while (bytes_left
> 0);
1092 nfs_page_group_unlock(req
);
1095 desc
->pg_error
= PTR_ERR(subreq
);
1096 nfs_page_group_unlock(req
);
1100 nfs_pageio_cleanup_request(desc
, subreq
);
1104 static int nfs_do_recoalesce(struct nfs_pageio_descriptor
*desc
)
1106 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1110 list_splice_init(&mirror
->pg_list
, &head
);
1111 mirror
->pg_bytes_written
-= mirror
->pg_count
;
1112 mirror
->pg_count
= 0;
1113 mirror
->pg_base
= 0;
1114 mirror
->pg_recoalesce
= 0;
1116 while (!list_empty(&head
)) {
1117 struct nfs_page
*req
;
1119 req
= list_first_entry(&head
, struct nfs_page
, wb_list
);
1120 if (__nfs_pageio_add_request(desc
, req
))
1122 if (desc
->pg_error
< 0) {
1123 list_splice_tail(&head
, &mirror
->pg_list
);
1124 mirror
->pg_recoalesce
= 1;
1129 } while (mirror
->pg_recoalesce
);
1133 static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor
*desc
,
1134 struct nfs_page
*req
)
1139 ret
= __nfs_pageio_add_request(desc
, req
);
1142 if (desc
->pg_error
< 0)
1144 ret
= nfs_do_recoalesce(desc
);
1150 static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor
*desc
)
1153 struct nfs_pgio_mirror
*mirror
;
1155 if (!desc
->pg_error
)
1158 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++) {
1159 mirror
= &desc
->pg_mirrors
[midx
];
1160 desc
->pg_completion_ops
->error_cleanup(&mirror
->pg_list
,
1165 int nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
1166 struct nfs_page
*req
)
1169 unsigned int pgbase
, offset
, bytes
;
1170 struct nfs_page
*dupreq
, *lastreq
;
1172 pgbase
= req
->wb_pgbase
;
1173 offset
= req
->wb_offset
;
1174 bytes
= req
->wb_bytes
;
1176 nfs_pageio_setup_mirroring(desc
, req
);
1177 if (desc
->pg_error
< 0)
1180 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++) {
1182 nfs_page_group_lock(req
);
1184 /* find the last request */
1185 for (lastreq
= req
->wb_head
;
1186 lastreq
->wb_this_page
!= req
->wb_head
;
1187 lastreq
= lastreq
->wb_this_page
)
1190 dupreq
= nfs_create_subreq(req
, lastreq
,
1191 pgbase
, offset
, bytes
);
1193 nfs_page_group_unlock(req
);
1194 if (IS_ERR(dupreq
)) {
1195 desc
->pg_error
= PTR_ERR(dupreq
);
1201 if (nfs_pgio_has_mirroring(desc
))
1202 desc
->pg_mirror_idx
= midx
;
1203 if (!nfs_pageio_add_request_mirror(desc
, dupreq
))
1204 goto out_cleanup_subreq
;
1211 nfs_pageio_cleanup_request(desc
, dupreq
);
1213 nfs_pageio_error_cleanup(desc
);
1218 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1219 * nfs_pageio_descriptor
1220 * @desc: pointer to io descriptor
1221 * @mirror_idx: pointer to mirror index
1223 static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor
*desc
,
1226 struct nfs_pgio_mirror
*mirror
= &desc
->pg_mirrors
[mirror_idx
];
1227 u32 restore_idx
= desc
->pg_mirror_idx
;
1229 if (nfs_pgio_has_mirroring(desc
))
1230 desc
->pg_mirror_idx
= mirror_idx
;
1232 nfs_pageio_doio(desc
);
1233 if (desc
->pg_error
< 0 || !mirror
->pg_recoalesce
)
1235 if (!nfs_do_recoalesce(desc
))
1238 desc
->pg_mirror_idx
= restore_idx
;
1242 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1243 * @hdr - the pgio header to move request from
1244 * @desc - the pageio descriptor to add requests to
1246 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1249 * Returns 0 on success and < 0 on error.
1251 int nfs_pageio_resend(struct nfs_pageio_descriptor
*desc
,
1252 struct nfs_pgio_header
*hdr
)
1256 desc
->pg_io_completion
= hdr
->io_completion
;
1257 desc
->pg_dreq
= hdr
->dreq
;
1258 list_splice_init(&hdr
->pages
, &pages
);
1259 while (!list_empty(&pages
)) {
1260 struct nfs_page
*req
= nfs_list_entry(pages
.next
);
1262 if (!nfs_pageio_add_request(desc
, req
))
1265 nfs_pageio_complete(desc
);
1266 if (!list_empty(&pages
)) {
1267 int err
= desc
->pg_error
< 0 ? desc
->pg_error
: -EIO
;
1268 hdr
->completion_ops
->error_cleanup(&pages
, err
);
1269 nfs_set_pgio_error(hdr
, err
, hdr
->io_start
);
1274 EXPORT_SYMBOL_GPL(nfs_pageio_resend
);
1277 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1278 * @desc: pointer to io descriptor
1280 void nfs_pageio_complete(struct nfs_pageio_descriptor
*desc
)
1284 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++)
1285 nfs_pageio_complete_mirror(desc
, midx
);
1287 if (desc
->pg_error
< 0)
1288 nfs_pageio_error_cleanup(desc
);
1289 if (desc
->pg_ops
->pg_cleanup
)
1290 desc
->pg_ops
->pg_cleanup(desc
);
1291 nfs_pageio_cleanup_mirroring(desc
);
1295 * nfs_pageio_cond_complete - Conditional I/O completion
1296 * @desc: pointer to io descriptor
1297 * @index: page index
1299 * It is important to ensure that processes don't try to take locks
1300 * on non-contiguous ranges of pages as that might deadlock. This
1301 * function should be called before attempting to wait on a locked
1302 * nfs_page. It will complete the I/O if the page index 'index'
1303 * is not contiguous with the existing list of pages in 'desc'.
1305 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor
*desc
, pgoff_t index
)
1307 struct nfs_pgio_mirror
*mirror
;
1308 struct nfs_page
*prev
;
1311 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++) {
1312 mirror
= &desc
->pg_mirrors
[midx
];
1313 if (!list_empty(&mirror
->pg_list
)) {
1314 prev
= nfs_list_entry(mirror
->pg_list
.prev
);
1315 if (index
!= prev
->wb_index
+ 1) {
1316 nfs_pageio_complete(desc
);
1323 int __init
nfs_init_nfspagecache(void)
1325 nfs_page_cachep
= kmem_cache_create("nfs_page",
1326 sizeof(struct nfs_page
),
1327 0, SLAB_HWCACHE_ALIGN
,
1329 if (nfs_page_cachep
== NULL
)
1335 void nfs_destroy_nfspagecache(void)
1337 kmem_cache_destroy(nfs_page_cachep
);
1340 static const struct rpc_call_ops nfs_pgio_common_ops
= {
1341 .rpc_call_prepare
= nfs_pgio_prepare
,
1342 .rpc_call_done
= nfs_pgio_result
,
1343 .rpc_release
= nfs_pgio_release
,
1346 const struct nfs_pageio_ops nfs_pgio_rw_ops
= {
1347 .pg_test
= nfs_generic_pg_test
,
1348 .pg_doio
= nfs_generic_pg_pgios
,