1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/fs/nfs/pagelist.c
5 * A set of helper functions for managing NFS read and write requests.
6 * The main purpose of these routines is to provide support for the
7 * coalescing of several requests into a single RPC call.
9 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
13 #include <linux/slab.h>
14 #include <linux/file.h>
15 #include <linux/sched.h>
16 #include <linux/sunrpc/clnt.h>
17 #include <linux/nfs.h>
18 #include <linux/nfs3.h>
19 #include <linux/nfs4.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_page.h>
22 #include <linux/nfs_mount.h>
23 #include <linux/export.h>
28 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
30 static struct kmem_cache
*nfs_page_cachep
;
31 static const struct rpc_call_ops nfs_pgio_common_ops
;
33 struct nfs_pgio_mirror
*
34 nfs_pgio_current_mirror(struct nfs_pageio_descriptor
*desc
)
36 return &desc
->pg_mirrors
[desc
->pg_mirror_idx
];
38 EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror
);
40 void nfs_pgheader_init(struct nfs_pageio_descriptor
*desc
,
41 struct nfs_pgio_header
*hdr
,
42 void (*release
)(struct nfs_pgio_header
*hdr
))
44 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
47 hdr
->req
= nfs_list_entry(mirror
->pg_list
.next
);
48 hdr
->inode
= desc
->pg_inode
;
49 hdr
->cred
= nfs_req_openctx(hdr
->req
)->cred
;
50 hdr
->io_start
= req_offset(hdr
->req
);
51 hdr
->good_bytes
= mirror
->pg_count
;
52 hdr
->io_completion
= desc
->pg_io_completion
;
53 hdr
->dreq
= desc
->pg_dreq
;
54 hdr
->release
= release
;
55 hdr
->completion_ops
= desc
->pg_completion_ops
;
56 if (hdr
->completion_ops
->init_hdr
)
57 hdr
->completion_ops
->init_hdr(hdr
);
59 hdr
->pgio_mirror_idx
= desc
->pg_mirror_idx
;
61 EXPORT_SYMBOL_GPL(nfs_pgheader_init
);
63 void nfs_set_pgio_error(struct nfs_pgio_header
*hdr
, int error
, loff_t pos
)
65 unsigned int new = pos
- hdr
->io_start
;
67 if (hdr
->good_bytes
> new) {
68 hdr
->good_bytes
= new;
69 clear_bit(NFS_IOHDR_EOF
, &hdr
->flags
);
70 if (!test_and_set_bit(NFS_IOHDR_ERROR
, &hdr
->flags
))
75 static inline struct nfs_page
*
78 struct nfs_page
*p
= kmem_cache_zalloc(nfs_page_cachep
, GFP_KERNEL
);
80 INIT_LIST_HEAD(&p
->wb_list
);
85 nfs_page_free(struct nfs_page
*p
)
87 kmem_cache_free(nfs_page_cachep
, p
);
91 * nfs_iocounter_wait - wait for i/o to complete
92 * @l_ctx: nfs_lock_context with io_counter to use
94 * returns -ERESTARTSYS if interrupted by a fatal signal.
95 * Otherwise returns 0 once the io_count hits 0.
98 nfs_iocounter_wait(struct nfs_lock_context
*l_ctx
)
100 return wait_var_event_killable(&l_ctx
->io_count
,
101 !atomic_read(&l_ctx
->io_count
));
105 * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
107 * @task: the rpc_task that should wait
108 * @l_ctx: nfs_lock_context with io_counter to check
110 * Returns true if there is outstanding I/O to wait on and the
111 * task has been put to sleep.
114 nfs_async_iocounter_wait(struct rpc_task
*task
, struct nfs_lock_context
*l_ctx
)
116 struct inode
*inode
= d_inode(l_ctx
->open_context
->dentry
);
119 if (atomic_read(&l_ctx
->io_count
) > 0) {
120 rpc_sleep_on(&NFS_SERVER(inode
)->uoc_rpcwaitq
, task
, NULL
);
124 if (atomic_read(&l_ctx
->io_count
) == 0) {
125 rpc_wake_up_queued_task(&NFS_SERVER(inode
)->uoc_rpcwaitq
, task
);
131 EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait
);
134 * nfs_page_lock_head_request - page lock the head of the page group
135 * @req: any member of the page group
138 nfs_page_group_lock_head(struct nfs_page
*req
)
140 struct nfs_page
*head
= req
->wb_head
;
142 while (!nfs_lock_request(head
)) {
143 int ret
= nfs_wait_on_request(head
);
148 kref_get(&head
->wb_kref
);
153 * nfs_unroll_locks - unlock all newly locked reqs and wait on @req
154 * @head: head request of page group, must be holding head lock
155 * @req: request that couldn't lock and needs to wait on the req bit lock
157 * This is a helper function for nfs_lock_and_join_requests
158 * returns 0 on success, < 0 on error.
161 nfs_unroll_locks(struct nfs_page
*head
, struct nfs_page
*req
)
163 struct nfs_page
*tmp
;
165 /* relinquish all the locks successfully grabbed this run */
166 for (tmp
= head
->wb_this_page
; tmp
!= req
; tmp
= tmp
->wb_this_page
) {
167 if (!kref_read(&tmp
->wb_kref
))
169 nfs_unlock_and_release_request(tmp
);
174 * nfs_page_group_lock_subreq - try to lock a subrequest
175 * @head: head request of page group
176 * @subreq: request to lock
178 * This is a helper function for nfs_lock_and_join_requests which
179 * must be called with the head request and page group both locked.
180 * On error, it returns with the page group unlocked.
183 nfs_page_group_lock_subreq(struct nfs_page
*head
, struct nfs_page
*subreq
)
187 if (!kref_get_unless_zero(&subreq
->wb_kref
))
189 while (!nfs_lock_request(subreq
)) {
190 nfs_page_group_unlock(head
);
191 ret
= nfs_wait_on_request(subreq
);
193 ret
= nfs_page_group_lock(head
);
195 nfs_unroll_locks(head
, subreq
);
196 nfs_release_request(subreq
);
204 * nfs_page_group_lock_subrequests - try to lock the subrequests
205 * @head: head request of page group
207 * This is a helper function for nfs_lock_and_join_requests which
208 * must be called with the head request locked.
210 int nfs_page_group_lock_subrequests(struct nfs_page
*head
)
212 struct nfs_page
*subreq
;
215 ret
= nfs_page_group_lock(head
);
218 /* lock each request in the page group */
219 for (subreq
= head
->wb_this_page
; subreq
!= head
;
220 subreq
= subreq
->wb_this_page
) {
221 ret
= nfs_page_group_lock_subreq(head
, subreq
);
225 nfs_page_group_unlock(head
);
230 * nfs_page_set_headlock - set the request PG_HEADLOCK
231 * @req: request that is to be locked
233 * this lock must be held when modifying req->wb_head
235 * return 0 on success, < 0 on error
238 nfs_page_set_headlock(struct nfs_page
*req
)
240 if (!test_and_set_bit(PG_HEADLOCK
, &req
->wb_flags
))
243 set_bit(PG_CONTENDED1
, &req
->wb_flags
);
244 smp_mb__after_atomic();
245 return wait_on_bit_lock(&req
->wb_flags
, PG_HEADLOCK
,
246 TASK_UNINTERRUPTIBLE
);
250 * nfs_page_clear_headlock - clear the request PG_HEADLOCK
251 * @req: request that is to be locked
254 nfs_page_clear_headlock(struct nfs_page
*req
)
256 smp_mb__before_atomic();
257 clear_bit(PG_HEADLOCK
, &req
->wb_flags
);
258 smp_mb__after_atomic();
259 if (!test_bit(PG_CONTENDED1
, &req
->wb_flags
))
261 wake_up_bit(&req
->wb_flags
, PG_HEADLOCK
);
265 * nfs_page_group_lock - lock the head of the page group
266 * @req: request in group that is to be locked
268 * this lock must be held when traversing or modifying the page
271 * return 0 on success, < 0 on error
274 nfs_page_group_lock(struct nfs_page
*req
)
278 ret
= nfs_page_set_headlock(req
);
279 if (ret
|| req
->wb_head
== req
)
281 return nfs_page_set_headlock(req
->wb_head
);
285 * nfs_page_group_unlock - unlock the head of the page group
286 * @req: request in group that is to be unlocked
289 nfs_page_group_unlock(struct nfs_page
*req
)
291 if (req
!= req
->wb_head
)
292 nfs_page_clear_headlock(req
->wb_head
);
293 nfs_page_clear_headlock(req
);
297 * nfs_page_group_sync_on_bit_locked
299 * must be called with page group lock held
302 nfs_page_group_sync_on_bit_locked(struct nfs_page
*req
, unsigned int bit
)
304 struct nfs_page
*head
= req
->wb_head
;
305 struct nfs_page
*tmp
;
307 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &head
->wb_flags
));
308 WARN_ON_ONCE(test_and_set_bit(bit
, &req
->wb_flags
));
310 tmp
= req
->wb_this_page
;
312 if (!test_bit(bit
, &tmp
->wb_flags
))
314 tmp
= tmp
->wb_this_page
;
317 /* true! reset all bits */
320 clear_bit(bit
, &tmp
->wb_flags
);
321 tmp
= tmp
->wb_this_page
;
322 } while (tmp
!= req
);
328 * nfs_page_group_sync_on_bit - set bit on current request, but only
329 * return true if the bit is set for all requests in page group
330 * @req - request in page group
331 * @bit - PG_* bit that is used to sync page group
333 bool nfs_page_group_sync_on_bit(struct nfs_page
*req
, unsigned int bit
)
337 nfs_page_group_lock(req
);
338 ret
= nfs_page_group_sync_on_bit_locked(req
, bit
);
339 nfs_page_group_unlock(req
);
345 * nfs_page_group_init - Initialize the page group linkage for @req
346 * @req - a new nfs request
347 * @prev - the previous request in page group, or NULL if @req is the first
348 * or only request in the group (the head).
351 nfs_page_group_init(struct nfs_page
*req
, struct nfs_page
*prev
)
354 WARN_ON_ONCE(prev
== req
);
359 req
->wb_this_page
= req
;
362 WARN_ON_ONCE(prev
->wb_this_page
!= prev
->wb_head
);
363 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &prev
->wb_head
->wb_flags
));
364 req
->wb_head
= prev
->wb_head
;
365 req
->wb_this_page
= prev
->wb_this_page
;
366 prev
->wb_this_page
= req
;
368 /* All subrequests take a ref on the head request until
369 * nfs_page_group_destroy is called */
370 kref_get(&req
->wb_head
->wb_kref
);
372 /* grab extra ref and bump the request count if head request
373 * has extra ref from the write/commit path to handle handoff
374 * between write and commit lists. */
375 if (test_bit(PG_INODE_REF
, &prev
->wb_head
->wb_flags
)) {
376 inode
= page_file_mapping(req
->wb_page
)->host
;
377 set_bit(PG_INODE_REF
, &req
->wb_flags
);
378 kref_get(&req
->wb_kref
);
379 atomic_long_inc(&NFS_I(inode
)->nrequests
);
385 * nfs_page_group_destroy - sync the destruction of page groups
386 * @req - request that no longer needs the page group
388 * releases the page group reference from each member once all
389 * members have called this function.
392 nfs_page_group_destroy(struct kref
*kref
)
394 struct nfs_page
*req
= container_of(kref
, struct nfs_page
, wb_kref
);
395 struct nfs_page
*head
= req
->wb_head
;
396 struct nfs_page
*tmp
, *next
;
398 if (!nfs_page_group_sync_on_bit(req
, PG_TEARDOWN
))
403 next
= tmp
->wb_this_page
;
404 /* unlink and free */
405 tmp
->wb_this_page
= tmp
;
407 nfs_free_request(tmp
);
409 } while (tmp
!= req
);
411 /* subrequests must release the ref on the head request */
413 nfs_release_request(head
);
416 static struct nfs_page
*
417 __nfs_create_request(struct nfs_lock_context
*l_ctx
, struct page
*page
,
418 unsigned int pgbase
, unsigned int offset
,
421 struct nfs_page
*req
;
422 struct nfs_open_context
*ctx
= l_ctx
->open_context
;
424 if (test_bit(NFS_CONTEXT_BAD
, &ctx
->flags
))
425 return ERR_PTR(-EBADF
);
426 /* try to allocate the request struct */
427 req
= nfs_page_alloc();
429 return ERR_PTR(-ENOMEM
);
431 req
->wb_lock_context
= l_ctx
;
432 refcount_inc(&l_ctx
->count
);
433 atomic_inc(&l_ctx
->io_count
);
435 /* Initialize the request struct. Initially, we assume a
436 * long write-back delay. This will be adjusted in
437 * update_nfs_request below if the region is not locked. */
440 req
->wb_index
= page_index(page
);
443 req
->wb_offset
= offset
;
444 req
->wb_pgbase
= pgbase
;
445 req
->wb_bytes
= count
;
446 kref_init(&req
->wb_kref
);
452 * nfs_create_request - Create an NFS read/write request.
453 * @ctx: open context to use
454 * @page: page to write
455 * @offset: starting offset within the page for the write
456 * @count: number of bytes to read/write
458 * The page must be locked by the caller. This makes sure we never
459 * create two different requests for the same page.
460 * User should ensure it is safe to sleep in this function.
463 nfs_create_request(struct nfs_open_context
*ctx
, struct page
*page
,
464 unsigned int offset
, unsigned int count
)
466 struct nfs_lock_context
*l_ctx
= nfs_get_lock_context(ctx
);
467 struct nfs_page
*ret
;
470 return ERR_CAST(l_ctx
);
471 ret
= __nfs_create_request(l_ctx
, page
, offset
, offset
, count
);
473 nfs_page_group_init(ret
, NULL
);
474 nfs_put_lock_context(l_ctx
);
478 static struct nfs_page
*
479 nfs_create_subreq(struct nfs_page
*req
,
484 struct nfs_page
*last
;
485 struct nfs_page
*ret
;
487 ret
= __nfs_create_request(req
->wb_lock_context
, req
->wb_page
,
488 pgbase
, offset
, count
);
490 /* find the last request */
491 for (last
= req
->wb_head
;
492 last
->wb_this_page
!= req
->wb_head
;
493 last
= last
->wb_this_page
)
496 nfs_lock_request(ret
);
497 ret
->wb_index
= req
->wb_index
;
498 nfs_page_group_init(ret
, last
);
499 ret
->wb_nio
= req
->wb_nio
;
505 * nfs_unlock_request - Unlock request and wake up sleepers.
506 * @req: pointer to request
508 void nfs_unlock_request(struct nfs_page
*req
)
510 if (!NFS_WBACK_BUSY(req
)) {
511 printk(KERN_ERR
"NFS: Invalid unlock attempted\n");
514 smp_mb__before_atomic();
515 clear_bit(PG_BUSY
, &req
->wb_flags
);
516 smp_mb__after_atomic();
517 if (!test_bit(PG_CONTENDED2
, &req
->wb_flags
))
519 wake_up_bit(&req
->wb_flags
, PG_BUSY
);
523 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
524 * @req: pointer to request
526 void nfs_unlock_and_release_request(struct nfs_page
*req
)
528 nfs_unlock_request(req
);
529 nfs_release_request(req
);
533 * nfs_clear_request - Free up all resources allocated to the request
536 * Release page and open context resources associated with a read/write
537 * request after it has completed.
539 static void nfs_clear_request(struct nfs_page
*req
)
541 struct page
*page
= req
->wb_page
;
542 struct nfs_lock_context
*l_ctx
= req
->wb_lock_context
;
543 struct nfs_open_context
*ctx
;
550 if (atomic_dec_and_test(&l_ctx
->io_count
)) {
551 wake_up_var(&l_ctx
->io_count
);
552 ctx
= l_ctx
->open_context
;
553 if (test_bit(NFS_CONTEXT_UNLOCK
, &ctx
->flags
))
554 rpc_wake_up(&NFS_SERVER(d_inode(ctx
->dentry
))->uoc_rpcwaitq
);
556 nfs_put_lock_context(l_ctx
);
557 req
->wb_lock_context
= NULL
;
562 * nfs_release_request - Release the count on an NFS read/write request
563 * @req: request to release
565 * Note: Should never be called with the spinlock held!
567 void nfs_free_request(struct nfs_page
*req
)
569 WARN_ON_ONCE(req
->wb_this_page
!= req
);
571 /* extra debug: make sure no sync bits are still set */
572 WARN_ON_ONCE(test_bit(PG_TEARDOWN
, &req
->wb_flags
));
573 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE
, &req
->wb_flags
));
574 WARN_ON_ONCE(test_bit(PG_UPTODATE
, &req
->wb_flags
));
575 WARN_ON_ONCE(test_bit(PG_WB_END
, &req
->wb_flags
));
576 WARN_ON_ONCE(test_bit(PG_REMOVE
, &req
->wb_flags
));
578 /* Release struct file and open context */
579 nfs_clear_request(req
);
583 void nfs_release_request(struct nfs_page
*req
)
585 kref_put(&req
->wb_kref
, nfs_page_group_destroy
);
587 EXPORT_SYMBOL_GPL(nfs_release_request
);
590 * nfs_wait_on_request - Wait for a request to complete.
591 * @req: request to wait upon.
593 * Interruptible by fatal signals only.
594 * The user is responsible for holding a count on the request.
597 nfs_wait_on_request(struct nfs_page
*req
)
599 if (!test_bit(PG_BUSY
, &req
->wb_flags
))
601 set_bit(PG_CONTENDED2
, &req
->wb_flags
);
602 smp_mb__after_atomic();
603 return wait_on_bit_io(&req
->wb_flags
, PG_BUSY
,
604 TASK_UNINTERRUPTIBLE
);
606 EXPORT_SYMBOL_GPL(nfs_wait_on_request
);
609 * nfs_generic_pg_test - determine if requests can be coalesced
610 * @desc: pointer to descriptor
611 * @prev: previous request in desc, or NULL
614 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
615 * the size of the request.
617 size_t nfs_generic_pg_test(struct nfs_pageio_descriptor
*desc
,
618 struct nfs_page
*prev
, struct nfs_page
*req
)
620 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
623 if (mirror
->pg_count
> mirror
->pg_bsize
) {
624 /* should never happen */
630 * Limit the request size so that we can still allocate a page array
631 * for it without upsetting the slab allocator.
633 if (((mirror
->pg_count
+ req
->wb_bytes
) >> PAGE_SHIFT
) *
634 sizeof(struct page
*) > PAGE_SIZE
)
637 return min(mirror
->pg_bsize
- mirror
->pg_count
, (size_t)req
->wb_bytes
);
639 EXPORT_SYMBOL_GPL(nfs_generic_pg_test
);
641 struct nfs_pgio_header
*nfs_pgio_header_alloc(const struct nfs_rw_ops
*ops
)
643 struct nfs_pgio_header
*hdr
= ops
->rw_alloc_header();
646 INIT_LIST_HEAD(&hdr
->pages
);
651 EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc
);
654 * nfs_pgio_data_destroy - make @hdr suitable for reuse
656 * Frees memory and releases refs from nfs_generic_pgio, so that it may
659 * @hdr: A header that has had nfs_generic_pgio called
661 static void nfs_pgio_data_destroy(struct nfs_pgio_header
*hdr
)
663 if (hdr
->args
.context
)
664 put_nfs_open_context(hdr
->args
.context
);
665 if (hdr
->page_array
.pagevec
!= hdr
->page_array
.page_array
)
666 kfree(hdr
->page_array
.pagevec
);
670 * nfs_pgio_header_free - Free a read or write header
671 * @hdr: The header to free
673 void nfs_pgio_header_free(struct nfs_pgio_header
*hdr
)
675 nfs_pgio_data_destroy(hdr
);
676 hdr
->rw_ops
->rw_free_header(hdr
);
678 EXPORT_SYMBOL_GPL(nfs_pgio_header_free
);
681 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
682 * @hdr: The pageio hdr
683 * @count: Number of bytes to read
684 * @how: How to commit data (writes only)
685 * @cinfo: Commit information for the call (writes only)
687 static void nfs_pgio_rpcsetup(struct nfs_pgio_header
*hdr
,
689 int how
, struct nfs_commit_info
*cinfo
)
691 struct nfs_page
*req
= hdr
->req
;
693 /* Set up the RPC argument and reply structs
694 * NB: take care not to mess about with hdr->commit et al. */
696 hdr
->args
.fh
= NFS_FH(hdr
->inode
);
697 hdr
->args
.offset
= req_offset(req
);
698 /* pnfs_set_layoutcommit needs this */
699 hdr
->mds_offset
= hdr
->args
.offset
;
700 hdr
->args
.pgbase
= req
->wb_pgbase
;
701 hdr
->args
.pages
= hdr
->page_array
.pagevec
;
702 hdr
->args
.count
= count
;
703 hdr
->args
.context
= get_nfs_open_context(nfs_req_openctx(req
));
704 hdr
->args
.lock_context
= req
->wb_lock_context
;
705 hdr
->args
.stable
= NFS_UNSTABLE
;
706 switch (how
& (FLUSH_STABLE
| FLUSH_COND_STABLE
)) {
709 case FLUSH_COND_STABLE
:
710 if (nfs_reqs_to_commit(cinfo
))
714 hdr
->args
.stable
= NFS_FILE_SYNC
;
717 hdr
->res
.fattr
= &hdr
->fattr
;
720 hdr
->res
.verf
= &hdr
->verf
;
721 nfs_fattr_init(&hdr
->fattr
);
725 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
726 * @task: The current task
727 * @calldata: pageio header to prepare
729 static void nfs_pgio_prepare(struct rpc_task
*task
, void *calldata
)
731 struct nfs_pgio_header
*hdr
= calldata
;
733 err
= NFS_PROTO(hdr
->inode
)->pgio_rpc_prepare(task
, hdr
);
738 int nfs_initiate_pgio(struct rpc_clnt
*clnt
, struct nfs_pgio_header
*hdr
,
739 const struct cred
*cred
, const struct nfs_rpc_ops
*rpc_ops
,
740 const struct rpc_call_ops
*call_ops
, int how
, int flags
)
742 struct rpc_task
*task
;
743 struct rpc_message msg
= {
744 .rpc_argp
= &hdr
->args
,
745 .rpc_resp
= &hdr
->res
,
748 struct rpc_task_setup task_setup_data
= {
752 .callback_ops
= call_ops
,
753 .callback_data
= hdr
,
754 .workqueue
= nfsiod_workqueue
,
755 .flags
= RPC_TASK_ASYNC
| flags
,
758 hdr
->rw_ops
->rw_initiate(hdr
, &msg
, rpc_ops
, &task_setup_data
, how
);
760 dprintk("NFS: initiated pgio call "
761 "(req %s/%llu, %u bytes @ offset %llu)\n",
762 hdr
->inode
->i_sb
->s_id
,
763 (unsigned long long)NFS_FILEID(hdr
->inode
),
765 (unsigned long long)hdr
->args
.offset
);
767 task
= rpc_run_task(&task_setup_data
);
769 return PTR_ERR(task
);
773 EXPORT_SYMBOL_GPL(nfs_initiate_pgio
);
776 * nfs_pgio_error - Clean up from a pageio error
777 * @hdr: pageio header
779 static void nfs_pgio_error(struct nfs_pgio_header
*hdr
)
781 set_bit(NFS_IOHDR_REDO
, &hdr
->flags
);
782 hdr
->completion_ops
->completion(hdr
);
786 * nfs_pgio_release - Release pageio data
787 * @calldata: The pageio header to release
789 static void nfs_pgio_release(void *calldata
)
791 struct nfs_pgio_header
*hdr
= calldata
;
792 hdr
->completion_ops
->completion(hdr
);
795 static void nfs_pageio_mirror_init(struct nfs_pgio_mirror
*mirror
,
798 INIT_LIST_HEAD(&mirror
->pg_list
);
799 mirror
->pg_bytes_written
= 0;
800 mirror
->pg_count
= 0;
801 mirror
->pg_bsize
= bsize
;
803 mirror
->pg_recoalesce
= 0;
807 * nfs_pageio_init - initialise a page io descriptor
808 * @desc: pointer to descriptor
809 * @inode: pointer to inode
810 * @pg_ops: pointer to pageio operations
811 * @compl_ops: pointer to pageio completion operations
812 * @rw_ops: pointer to nfs read/write operations
813 * @bsize: io block size
814 * @io_flags: extra parameters for the io function
816 void nfs_pageio_init(struct nfs_pageio_descriptor
*desc
,
818 const struct nfs_pageio_ops
*pg_ops
,
819 const struct nfs_pgio_completion_ops
*compl_ops
,
820 const struct nfs_rw_ops
*rw_ops
,
825 desc
->pg_inode
= inode
;
826 desc
->pg_ops
= pg_ops
;
827 desc
->pg_completion_ops
= compl_ops
;
828 desc
->pg_rw_ops
= rw_ops
;
829 desc
->pg_ioflags
= io_flags
;
831 desc
->pg_lseg
= NULL
;
832 desc
->pg_io_completion
= NULL
;
833 desc
->pg_dreq
= NULL
;
834 desc
->pg_bsize
= bsize
;
836 desc
->pg_mirror_count
= 1;
837 desc
->pg_mirror_idx
= 0;
839 desc
->pg_mirrors_dynamic
= NULL
;
840 desc
->pg_mirrors
= desc
->pg_mirrors_static
;
841 nfs_pageio_mirror_init(&desc
->pg_mirrors
[0], bsize
);
842 desc
->pg_maxretrans
= 0;
846 * nfs_pgio_result - Basic pageio error handling
847 * @task: The task that ran
848 * @calldata: Pageio header to check
850 static void nfs_pgio_result(struct rpc_task
*task
, void *calldata
)
852 struct nfs_pgio_header
*hdr
= calldata
;
853 struct inode
*inode
= hdr
->inode
;
855 dprintk("NFS: %s: %5u, (status %d)\n", __func__
,
856 task
->tk_pid
, task
->tk_status
);
858 if (hdr
->rw_ops
->rw_done(task
, hdr
, inode
) != 0)
860 if (task
->tk_status
< 0)
861 nfs_set_pgio_error(hdr
, task
->tk_status
, hdr
->args
.offset
);
863 hdr
->rw_ops
->rw_result(task
, hdr
);
867 * Create an RPC task for the given read or write request and kick it.
868 * The page must have been locked by the caller.
870 * It may happen that the page we're passed is not marked dirty.
871 * This is the case if nfs_updatepage detects a conflicting request
872 * that has been written but not committed.
874 int nfs_generic_pgio(struct nfs_pageio_descriptor
*desc
,
875 struct nfs_pgio_header
*hdr
)
877 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
879 struct nfs_page
*req
;
882 struct list_head
*head
= &mirror
->pg_list
;
883 struct nfs_commit_info cinfo
;
884 struct nfs_page_array
*pg_array
= &hdr
->page_array
;
885 unsigned int pagecount
, pageused
;
886 gfp_t gfp_flags
= GFP_KERNEL
;
888 pagecount
= nfs_page_array_len(mirror
->pg_base
, mirror
->pg_count
);
889 pg_array
->npages
= pagecount
;
891 if (pagecount
<= ARRAY_SIZE(pg_array
->page_array
))
892 pg_array
->pagevec
= pg_array
->page_array
;
894 pg_array
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), gfp_flags
);
895 if (!pg_array
->pagevec
) {
896 pg_array
->npages
= 0;
898 desc
->pg_error
= -ENOMEM
;
899 return desc
->pg_error
;
903 nfs_init_cinfo(&cinfo
, desc
->pg_inode
, desc
->pg_dreq
);
904 pages
= hdr
->page_array
.pagevec
;
907 while (!list_empty(head
)) {
908 req
= nfs_list_entry(head
->next
);
909 nfs_list_move_request(req
, &hdr
->pages
);
911 if (!last_page
|| last_page
!= req
->wb_page
) {
913 if (pageused
> pagecount
)
915 *pages
++ = last_page
= req
->wb_page
;
918 if (WARN_ON_ONCE(pageused
!= pagecount
)) {
920 desc
->pg_error
= -EINVAL
;
921 return desc
->pg_error
;
924 if ((desc
->pg_ioflags
& FLUSH_COND_STABLE
) &&
925 (desc
->pg_moreio
|| nfs_reqs_to_commit(&cinfo
)))
926 desc
->pg_ioflags
&= ~FLUSH_COND_STABLE
;
928 /* Set up the argument struct */
929 nfs_pgio_rpcsetup(hdr
, mirror
->pg_count
, desc
->pg_ioflags
, &cinfo
);
930 desc
->pg_rpc_callops
= &nfs_pgio_common_ops
;
933 EXPORT_SYMBOL_GPL(nfs_generic_pgio
);
935 static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor
*desc
)
937 struct nfs_pgio_header
*hdr
;
940 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
942 desc
->pg_error
= -ENOMEM
;
943 return desc
->pg_error
;
945 nfs_pgheader_init(desc
, hdr
, nfs_pgio_header_free
);
946 ret
= nfs_generic_pgio(desc
, hdr
);
948 ret
= nfs_initiate_pgio(NFS_CLIENT(hdr
->inode
),
951 NFS_PROTO(hdr
->inode
),
952 desc
->pg_rpc_callops
,
954 RPC_TASK_CRED_NOREF
);
958 static struct nfs_pgio_mirror
*
959 nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor
*desc
,
960 unsigned int mirror_count
)
962 struct nfs_pgio_mirror
*ret
;
965 kfree(desc
->pg_mirrors_dynamic
);
966 desc
->pg_mirrors_dynamic
= NULL
;
967 if (mirror_count
== 1)
968 return desc
->pg_mirrors_static
;
969 ret
= kmalloc_array(mirror_count
, sizeof(*ret
), GFP_KERNEL
);
971 for (i
= 0; i
< mirror_count
; i
++)
972 nfs_pageio_mirror_init(&ret
[i
], desc
->pg_bsize
);
973 desc
->pg_mirrors_dynamic
= ret
;
979 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
980 * by calling the pg_get_mirror_count op
982 static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor
*pgio
,
983 struct nfs_page
*req
)
985 unsigned int mirror_count
= 1;
987 if (pgio
->pg_ops
->pg_get_mirror_count
)
988 mirror_count
= pgio
->pg_ops
->pg_get_mirror_count(pgio
, req
);
989 if (mirror_count
== pgio
->pg_mirror_count
|| pgio
->pg_error
< 0)
992 if (!mirror_count
|| mirror_count
> NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX
) {
993 pgio
->pg_error
= -EINVAL
;
997 pgio
->pg_mirrors
= nfs_pageio_alloc_mirrors(pgio
, mirror_count
);
998 if (pgio
->pg_mirrors
== NULL
) {
999 pgio
->pg_error
= -ENOMEM
;
1000 pgio
->pg_mirrors
= pgio
->pg_mirrors_static
;
1003 pgio
->pg_mirror_count
= mirror_count
;
1006 static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor
*pgio
)
1008 pgio
->pg_mirror_count
= 1;
1009 pgio
->pg_mirror_idx
= 0;
1010 pgio
->pg_mirrors
= pgio
->pg_mirrors_static
;
1011 kfree(pgio
->pg_mirrors_dynamic
);
1012 pgio
->pg_mirrors_dynamic
= NULL
;
1015 static bool nfs_match_lock_context(const struct nfs_lock_context
*l1
,
1016 const struct nfs_lock_context
*l2
)
1018 return l1
->lockowner
== l2
->lockowner
;
1022 * nfs_coalesce_size - test two requests for compatibility
1023 * @prev: pointer to nfs_page
1024 * @req: pointer to nfs_page
1025 * @pgio: pointer to nfs_pagio_descriptor
1027 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
1028 * page data area they describe is contiguous, and that their RPC
1029 * credentials, NFSv4 open state, and lockowners are the same.
1031 * Returns size of the request that can be coalesced
1033 static unsigned int nfs_coalesce_size(struct nfs_page
*prev
,
1034 struct nfs_page
*req
,
1035 struct nfs_pageio_descriptor
*pgio
)
1037 struct file_lock_context
*flctx
;
1040 if (!nfs_match_open_context(nfs_req_openctx(req
), nfs_req_openctx(prev
)))
1042 flctx
= d_inode(nfs_req_openctx(req
)->dentry
)->i_flctx
;
1043 if (flctx
!= NULL
&&
1044 !(list_empty_careful(&flctx
->flc_posix
) &&
1045 list_empty_careful(&flctx
->flc_flock
)) &&
1046 !nfs_match_lock_context(req
->wb_lock_context
,
1047 prev
->wb_lock_context
))
1049 if (req_offset(req
) != req_offset(prev
) + prev
->wb_bytes
)
1051 if (req
->wb_page
== prev
->wb_page
) {
1052 if (req
->wb_pgbase
!= prev
->wb_pgbase
+ prev
->wb_bytes
)
1055 if (req
->wb_pgbase
!= 0 ||
1056 prev
->wb_pgbase
+ prev
->wb_bytes
!= PAGE_SIZE
)
1060 return pgio
->pg_ops
->pg_test(pgio
, prev
, req
);
1064 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
1065 * @desc: destination io descriptor
1068 * If the request 'req' was successfully coalesced into the existing list
1069 * of pages 'desc', it returns the size of req.
1072 nfs_pageio_do_add_request(struct nfs_pageio_descriptor
*desc
,
1073 struct nfs_page
*req
)
1075 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1076 struct nfs_page
*prev
= NULL
;
1079 if (mirror
->pg_count
!= 0) {
1080 prev
= nfs_list_entry(mirror
->pg_list
.prev
);
1082 if (desc
->pg_ops
->pg_init
)
1083 desc
->pg_ops
->pg_init(desc
, req
);
1084 if (desc
->pg_error
< 0)
1086 mirror
->pg_base
= req
->wb_pgbase
;
1089 if (desc
->pg_maxretrans
&& req
->wb_nio
> desc
->pg_maxretrans
) {
1090 if (NFS_SERVER(desc
->pg_inode
)->flags
& NFS_MOUNT_SOFTERR
)
1091 desc
->pg_error
= -ETIMEDOUT
;
1093 desc
->pg_error
= -EIO
;
1097 size
= nfs_coalesce_size(prev
, req
, desc
);
1098 if (size
< req
->wb_bytes
)
1100 nfs_list_move_request(req
, &mirror
->pg_list
);
1101 mirror
->pg_count
+= req
->wb_bytes
;
1102 return req
->wb_bytes
;
1106 * Helper for nfs_pageio_add_request and nfs_pageio_complete
1108 static void nfs_pageio_doio(struct nfs_pageio_descriptor
*desc
)
1110 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1113 if (!list_empty(&mirror
->pg_list
)) {
1114 int error
= desc
->pg_ops
->pg_doio(desc
);
1116 desc
->pg_error
= error
;
1118 mirror
->pg_bytes_written
+= mirror
->pg_count
;
1120 if (list_empty(&mirror
->pg_list
)) {
1121 mirror
->pg_count
= 0;
1122 mirror
->pg_base
= 0;
1127 nfs_pageio_cleanup_request(struct nfs_pageio_descriptor
*desc
,
1128 struct nfs_page
*req
)
1132 nfs_list_move_request(req
, &head
);
1133 desc
->pg_completion_ops
->error_cleanup(&head
, desc
->pg_error
);
1137 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
1138 * @desc: destination io descriptor
1141 * This may split a request into subrequests which are all part of the
1142 * same page group. If so, it will submit @req as the last one, to ensure
1143 * the pointer to @req is still valid in case of failure.
1145 * Returns true if the request 'req' was successfully coalesced into the
1146 * existing list of pages 'desc'.
1148 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
1149 struct nfs_page
*req
)
1151 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1152 struct nfs_page
*subreq
;
1153 unsigned int size
, subreq_size
;
1155 nfs_page_group_lock(req
);
1158 subreq_size
= subreq
->wb_bytes
;
1160 size
= nfs_pageio_do_add_request(desc
, subreq
);
1161 if (size
== subreq_size
) {
1162 /* We successfully submitted a request */
1165 req
->wb_pgbase
+= size
;
1166 req
->wb_bytes
-= size
;
1167 req
->wb_offset
+= size
;
1168 subreq_size
= req
->wb_bytes
;
1172 if (WARN_ON_ONCE(subreq
!= req
)) {
1173 nfs_page_group_unlock(req
);
1174 nfs_pageio_cleanup_request(desc
, subreq
);
1176 subreq_size
= req
->wb_bytes
;
1177 nfs_page_group_lock(req
);
1180 /* Can't coalesce any more, so do I/O */
1181 nfs_page_group_unlock(req
);
1182 desc
->pg_moreio
= 1;
1183 nfs_pageio_doio(desc
);
1184 if (desc
->pg_error
< 0 || mirror
->pg_recoalesce
)
1186 /* retry add_request for this subreq */
1187 nfs_page_group_lock(req
);
1190 subreq
= nfs_create_subreq(req
, req
->wb_pgbase
,
1191 req
->wb_offset
, size
);
1197 nfs_page_group_unlock(req
);
1200 desc
->pg_error
= PTR_ERR(subreq
);
1201 nfs_page_group_unlock(req
);
1205 static int nfs_do_recoalesce(struct nfs_pageio_descriptor
*desc
)
1207 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1211 list_splice_init(&mirror
->pg_list
, &head
);
1212 mirror
->pg_bytes_written
-= mirror
->pg_count
;
1213 mirror
->pg_count
= 0;
1214 mirror
->pg_base
= 0;
1215 mirror
->pg_recoalesce
= 0;
1217 while (!list_empty(&head
)) {
1218 struct nfs_page
*req
;
1220 req
= list_first_entry(&head
, struct nfs_page
, wb_list
);
1221 if (__nfs_pageio_add_request(desc
, req
))
1223 if (desc
->pg_error
< 0) {
1224 list_splice_tail(&head
, &mirror
->pg_list
);
1225 mirror
->pg_recoalesce
= 1;
1230 } while (mirror
->pg_recoalesce
);
1234 static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor
*desc
,
1235 struct nfs_page
*req
)
1240 ret
= __nfs_pageio_add_request(desc
, req
);
1243 if (desc
->pg_error
< 0)
1245 ret
= nfs_do_recoalesce(desc
);
1251 static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor
*desc
)
1254 struct nfs_pgio_mirror
*mirror
;
1256 if (!desc
->pg_error
)
1259 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++) {
1260 mirror
= &desc
->pg_mirrors
[midx
];
1261 desc
->pg_completion_ops
->error_cleanup(&mirror
->pg_list
,
1266 int nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
1267 struct nfs_page
*req
)
1270 unsigned int pgbase
, offset
, bytes
;
1271 struct nfs_page
*dupreq
;
1273 pgbase
= req
->wb_pgbase
;
1274 offset
= req
->wb_offset
;
1275 bytes
= req
->wb_bytes
;
1277 nfs_pageio_setup_mirroring(desc
, req
);
1278 if (desc
->pg_error
< 0)
1281 /* Create the mirror instances first, and fire them off */
1282 for (midx
= 1; midx
< desc
->pg_mirror_count
; midx
++) {
1283 nfs_page_group_lock(req
);
1285 dupreq
= nfs_create_subreq(req
,
1286 pgbase
, offset
, bytes
);
1288 nfs_page_group_unlock(req
);
1289 if (IS_ERR(dupreq
)) {
1290 desc
->pg_error
= PTR_ERR(dupreq
);
1294 desc
->pg_mirror_idx
= midx
;
1295 if (!nfs_pageio_add_request_mirror(desc
, dupreq
))
1296 goto out_cleanup_subreq
;
1299 desc
->pg_mirror_idx
= 0;
1300 if (!nfs_pageio_add_request_mirror(desc
, req
))
1306 nfs_pageio_cleanup_request(desc
, dupreq
);
1308 nfs_pageio_error_cleanup(desc
);
1313 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1314 * nfs_pageio_descriptor
1315 * @desc: pointer to io descriptor
1316 * @mirror_idx: pointer to mirror index
1318 static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor
*desc
,
1321 struct nfs_pgio_mirror
*mirror
= &desc
->pg_mirrors
[mirror_idx
];
1322 u32 restore_idx
= desc
->pg_mirror_idx
;
1324 desc
->pg_mirror_idx
= mirror_idx
;
1326 nfs_pageio_doio(desc
);
1327 if (desc
->pg_error
< 0 || !mirror
->pg_recoalesce
)
1329 if (!nfs_do_recoalesce(desc
))
1332 desc
->pg_mirror_idx
= restore_idx
;
1336 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1337 * @hdr - the pgio header to move request from
1338 * @desc - the pageio descriptor to add requests to
1340 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1343 * Returns 0 on success and < 0 on error.
1345 int nfs_pageio_resend(struct nfs_pageio_descriptor
*desc
,
1346 struct nfs_pgio_header
*hdr
)
1350 desc
->pg_io_completion
= hdr
->io_completion
;
1351 desc
->pg_dreq
= hdr
->dreq
;
1352 list_splice_init(&hdr
->pages
, &pages
);
1353 while (!list_empty(&pages
)) {
1354 struct nfs_page
*req
= nfs_list_entry(pages
.next
);
1356 if (!nfs_pageio_add_request(desc
, req
))
1359 nfs_pageio_complete(desc
);
1360 if (!list_empty(&pages
)) {
1361 int err
= desc
->pg_error
< 0 ? desc
->pg_error
: -EIO
;
1362 hdr
->completion_ops
->error_cleanup(&pages
, err
);
1363 nfs_set_pgio_error(hdr
, err
, hdr
->io_start
);
1368 EXPORT_SYMBOL_GPL(nfs_pageio_resend
);
1371 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1372 * @desc: pointer to io descriptor
1374 void nfs_pageio_complete(struct nfs_pageio_descriptor
*desc
)
1378 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++)
1379 nfs_pageio_complete_mirror(desc
, midx
);
1381 if (desc
->pg_error
< 0)
1382 nfs_pageio_error_cleanup(desc
);
1383 if (desc
->pg_ops
->pg_cleanup
)
1384 desc
->pg_ops
->pg_cleanup(desc
);
1385 nfs_pageio_cleanup_mirroring(desc
);
1389 * nfs_pageio_cond_complete - Conditional I/O completion
1390 * @desc: pointer to io descriptor
1391 * @index: page index
1393 * It is important to ensure that processes don't try to take locks
1394 * on non-contiguous ranges of pages as that might deadlock. This
1395 * function should be called before attempting to wait on a locked
1396 * nfs_page. It will complete the I/O if the page index 'index'
1397 * is not contiguous with the existing list of pages in 'desc'.
1399 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor
*desc
, pgoff_t index
)
1401 struct nfs_pgio_mirror
*mirror
;
1402 struct nfs_page
*prev
;
1405 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++) {
1406 mirror
= &desc
->pg_mirrors
[midx
];
1407 if (!list_empty(&mirror
->pg_list
)) {
1408 prev
= nfs_list_entry(mirror
->pg_list
.prev
);
1409 if (index
!= prev
->wb_index
+ 1) {
1410 nfs_pageio_complete(desc
);
1418 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
1420 void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor
*pgio
)
1422 nfs_pageio_complete(pgio
);
1425 int __init
nfs_init_nfspagecache(void)
1427 nfs_page_cachep
= kmem_cache_create("nfs_page",
1428 sizeof(struct nfs_page
),
1429 0, SLAB_HWCACHE_ALIGN
,
1431 if (nfs_page_cachep
== NULL
)
1437 void nfs_destroy_nfspagecache(void)
1439 kmem_cache_destroy(nfs_page_cachep
);
1442 static const struct rpc_call_ops nfs_pgio_common_ops
= {
1443 .rpc_call_prepare
= nfs_pgio_prepare
,
1444 .rpc_call_done
= nfs_pgio_result
,
1445 .rpc_release
= nfs_pgio_release
,
1448 const struct nfs_pageio_ops nfs_pgio_rw_ops
= {
1449 .pg_test
= nfs_generic_pg_test
,
1450 .pg_doio
= nfs_generic_pg_pgios
,