1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/fs/nfs/pagelist.c
5 * A set of helper functions for managing NFS read and write requests.
6 * The main purpose of these routines is to provide support for the
7 * coalescing of several requests into a single RPC call.
9 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
13 #include <linux/slab.h>
14 #include <linux/file.h>
15 #include <linux/sched.h>
16 #include <linux/sunrpc/clnt.h>
17 #include <linux/nfs.h>
18 #include <linux/nfs3.h>
19 #include <linux/nfs4.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_page.h>
22 #include <linux/nfs_mount.h>
23 #include <linux/export.h>
24 #include <linux/filelock.h>
31 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
33 static struct kmem_cache
*nfs_page_cachep
;
34 static const struct rpc_call_ops nfs_pgio_common_ops
;
36 struct nfs_page_iter_page
{
37 const struct nfs_page
*req
;
41 static void nfs_page_iter_page_init(struct nfs_page_iter_page
*i
,
42 const struct nfs_page
*req
)
48 static void nfs_page_iter_page_advance(struct nfs_page_iter_page
*i
, size_t sz
)
50 const struct nfs_page
*req
= i
->req
;
51 size_t tmp
= i
->count
+ sz
;
53 i
->count
= (tmp
< req
->wb_bytes
) ? tmp
: req
->wb_bytes
;
56 static struct page
*nfs_page_iter_page_get(struct nfs_page_iter_page
*i
)
58 const struct nfs_page
*req
= i
->req
;
61 if (i
->count
!= req
->wb_bytes
) {
62 size_t base
= i
->count
+ req
->wb_pgbase
;
63 size_t len
= PAGE_SIZE
- offset_in_page(base
);
65 page
= nfs_page_to_page(req
, base
);
66 nfs_page_iter_page_advance(i
, len
);
72 static struct nfs_pgio_mirror
*
73 nfs_pgio_get_mirror(struct nfs_pageio_descriptor
*desc
, u32 idx
)
75 if (desc
->pg_ops
->pg_get_mirror
)
76 return desc
->pg_ops
->pg_get_mirror(desc
, idx
);
77 return &desc
->pg_mirrors
[0];
80 struct nfs_pgio_mirror
*
81 nfs_pgio_current_mirror(struct nfs_pageio_descriptor
*desc
)
83 return nfs_pgio_get_mirror(desc
, desc
->pg_mirror_idx
);
85 EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror
);
88 nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor
*desc
, u32 idx
)
90 if (desc
->pg_ops
->pg_set_mirror
)
91 return desc
->pg_ops
->pg_set_mirror(desc
, idx
);
92 return desc
->pg_mirror_idx
;
95 void nfs_pgheader_init(struct nfs_pageio_descriptor
*desc
,
96 struct nfs_pgio_header
*hdr
,
97 void (*release
)(struct nfs_pgio_header
*hdr
))
99 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
102 hdr
->req
= nfs_list_entry(mirror
->pg_list
.next
);
103 hdr
->inode
= desc
->pg_inode
;
104 hdr
->cred
= nfs_req_openctx(hdr
->req
)->cred
;
105 hdr
->io_start
= req_offset(hdr
->req
);
106 hdr
->good_bytes
= mirror
->pg_count
;
107 hdr
->io_completion
= desc
->pg_io_completion
;
108 hdr
->dreq
= desc
->pg_dreq
;
109 nfs_netfs_set_pgio_header(hdr
, desc
);
110 hdr
->release
= release
;
111 hdr
->completion_ops
= desc
->pg_completion_ops
;
112 if (hdr
->completion_ops
->init_hdr
)
113 hdr
->completion_ops
->init_hdr(hdr
);
115 hdr
->pgio_mirror_idx
= desc
->pg_mirror_idx
;
117 EXPORT_SYMBOL_GPL(nfs_pgheader_init
);
119 void nfs_set_pgio_error(struct nfs_pgio_header
*hdr
, int error
, loff_t pos
)
121 unsigned int new = pos
- hdr
->io_start
;
123 trace_nfs_pgio_error(hdr
, error
, pos
);
124 if (hdr
->good_bytes
> new) {
125 hdr
->good_bytes
= new;
126 clear_bit(NFS_IOHDR_EOF
, &hdr
->flags
);
127 if (!test_and_set_bit(NFS_IOHDR_ERROR
, &hdr
->flags
))
132 static inline struct nfs_page
*nfs_page_alloc(void)
135 kmem_cache_zalloc(nfs_page_cachep
, nfs_io_gfp_mask());
137 INIT_LIST_HEAD(&p
->wb_list
);
142 nfs_page_free(struct nfs_page
*p
)
144 kmem_cache_free(nfs_page_cachep
, p
);
148 * nfs_iocounter_wait - wait for i/o to complete
149 * @l_ctx: nfs_lock_context with io_counter to use
151 * returns -ERESTARTSYS if interrupted by a fatal signal.
152 * Otherwise returns 0 once the io_count hits 0.
155 nfs_iocounter_wait(struct nfs_lock_context
*l_ctx
)
157 return wait_var_event_killable(&l_ctx
->io_count
,
158 !atomic_read(&l_ctx
->io_count
));
162 * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
164 * @task: the rpc_task that should wait
165 * @l_ctx: nfs_lock_context with io_counter to check
167 * Returns true if there is outstanding I/O to wait on and the
168 * task has been put to sleep.
171 nfs_async_iocounter_wait(struct rpc_task
*task
, struct nfs_lock_context
*l_ctx
)
173 struct inode
*inode
= d_inode(l_ctx
->open_context
->dentry
);
176 if (atomic_read(&l_ctx
->io_count
) > 0) {
177 rpc_sleep_on(&NFS_SERVER(inode
)->uoc_rpcwaitq
, task
, NULL
);
181 if (atomic_read(&l_ctx
->io_count
) == 0) {
182 rpc_wake_up_queued_task(&NFS_SERVER(inode
)->uoc_rpcwaitq
, task
);
188 EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait
);
191 * nfs_page_set_headlock - set the request PG_HEADLOCK
192 * @req: request that is to be locked
194 * this lock must be held when modifying req->wb_head
196 * return 0 on success, < 0 on error
199 nfs_page_set_headlock(struct nfs_page
*req
)
201 if (!test_and_set_bit(PG_HEADLOCK
, &req
->wb_flags
))
204 set_bit(PG_CONTENDED1
, &req
->wb_flags
);
205 smp_mb__after_atomic();
206 return wait_on_bit_lock(&req
->wb_flags
, PG_HEADLOCK
,
207 TASK_UNINTERRUPTIBLE
);
211 * nfs_page_clear_headlock - clear the request PG_HEADLOCK
212 * @req: request that is to be locked
215 nfs_page_clear_headlock(struct nfs_page
*req
)
217 clear_bit_unlock(PG_HEADLOCK
, &req
->wb_flags
);
218 smp_mb__after_atomic();
219 if (!test_bit(PG_CONTENDED1
, &req
->wb_flags
))
221 wake_up_bit(&req
->wb_flags
, PG_HEADLOCK
);
225 * nfs_page_group_lock - lock the head of the page group
226 * @req: request in group that is to be locked
228 * this lock must be held when traversing or modifying the page
231 * return 0 on success, < 0 on error
234 nfs_page_group_lock(struct nfs_page
*req
)
238 ret
= nfs_page_set_headlock(req
);
239 if (ret
|| req
->wb_head
== req
)
241 return nfs_page_set_headlock(req
->wb_head
);
245 * nfs_page_group_unlock - unlock the head of the page group
246 * @req: request in group that is to be unlocked
249 nfs_page_group_unlock(struct nfs_page
*req
)
251 if (req
!= req
->wb_head
)
252 nfs_page_clear_headlock(req
->wb_head
);
253 nfs_page_clear_headlock(req
);
257 * nfs_page_group_sync_on_bit_locked
259 * must be called with page group lock held
262 nfs_page_group_sync_on_bit_locked(struct nfs_page
*req
, unsigned int bit
)
264 struct nfs_page
*head
= req
->wb_head
;
265 struct nfs_page
*tmp
;
267 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &head
->wb_flags
));
268 WARN_ON_ONCE(test_and_set_bit(bit
, &req
->wb_flags
));
270 tmp
= req
->wb_this_page
;
272 if (!test_bit(bit
, &tmp
->wb_flags
))
274 tmp
= tmp
->wb_this_page
;
277 /* true! reset all bits */
280 clear_bit(bit
, &tmp
->wb_flags
);
281 tmp
= tmp
->wb_this_page
;
282 } while (tmp
!= req
);
288 * nfs_page_group_sync_on_bit - set bit on current request, but only
289 * return true if the bit is set for all requests in page group
290 * @req - request in page group
291 * @bit - PG_* bit that is used to sync page group
293 bool nfs_page_group_sync_on_bit(struct nfs_page
*req
, unsigned int bit
)
297 nfs_page_group_lock(req
);
298 ret
= nfs_page_group_sync_on_bit_locked(req
, bit
);
299 nfs_page_group_unlock(req
);
305 * nfs_page_group_init - Initialize the page group linkage for @req
306 * @req - a new nfs request
307 * @prev - the previous request in page group, or NULL if @req is the first
308 * or only request in the group (the head).
311 nfs_page_group_init(struct nfs_page
*req
, struct nfs_page
*prev
)
314 WARN_ON_ONCE(prev
== req
);
319 req
->wb_this_page
= req
;
322 WARN_ON_ONCE(prev
->wb_this_page
!= prev
->wb_head
);
323 WARN_ON_ONCE(!test_bit(PG_HEADLOCK
, &prev
->wb_head
->wb_flags
));
324 req
->wb_head
= prev
->wb_head
;
325 req
->wb_this_page
= prev
->wb_this_page
;
326 prev
->wb_this_page
= req
;
328 /* All subrequests take a ref on the head request until
329 * nfs_page_group_destroy is called */
330 kref_get(&req
->wb_head
->wb_kref
);
332 /* grab extra ref and bump the request count if head request
333 * has extra ref from the write/commit path to handle handoff
334 * between write and commit lists. */
335 if (test_bit(PG_INODE_REF
, &prev
->wb_head
->wb_flags
)) {
336 inode
= nfs_page_to_inode(req
);
337 set_bit(PG_INODE_REF
, &req
->wb_flags
);
338 kref_get(&req
->wb_kref
);
339 atomic_long_inc(&NFS_I(inode
)->nrequests
);
345 * nfs_page_group_destroy - sync the destruction of page groups
346 * @req - request that no longer needs the page group
348 * releases the page group reference from each member once all
349 * members have called this function.
352 nfs_page_group_destroy(struct kref
*kref
)
354 struct nfs_page
*req
= container_of(kref
, struct nfs_page
, wb_kref
);
355 struct nfs_page
*head
= req
->wb_head
;
356 struct nfs_page
*tmp
, *next
;
358 if (!nfs_page_group_sync_on_bit(req
, PG_TEARDOWN
))
363 next
= tmp
->wb_this_page
;
364 /* unlink and free */
365 tmp
->wb_this_page
= tmp
;
367 nfs_free_request(tmp
);
369 } while (tmp
!= req
);
371 /* subrequests must release the ref on the head request */
373 nfs_release_request(head
);
376 static struct nfs_page
*nfs_page_create(struct nfs_lock_context
*l_ctx
,
377 unsigned int pgbase
, pgoff_t index
,
378 unsigned int offset
, unsigned int count
)
380 struct nfs_page
*req
;
381 struct nfs_open_context
*ctx
= l_ctx
->open_context
;
383 if (test_bit(NFS_CONTEXT_BAD
, &ctx
->flags
))
384 return ERR_PTR(-EBADF
);
385 /* try to allocate the request struct */
386 req
= nfs_page_alloc();
388 return ERR_PTR(-ENOMEM
);
390 req
->wb_lock_context
= l_ctx
;
391 refcount_inc(&l_ctx
->count
);
392 atomic_inc(&l_ctx
->io_count
);
394 /* Initialize the request struct. Initially, we assume a
395 * long write-back delay. This will be adjusted in
396 * update_nfs_request below if the region is not locked. */
397 req
->wb_pgbase
= pgbase
;
398 req
->wb_index
= index
;
399 req
->wb_offset
= offset
;
400 req
->wb_bytes
= count
;
401 kref_init(&req
->wb_kref
);
406 static void nfs_page_assign_folio(struct nfs_page
*req
, struct folio
*folio
)
409 req
->wb_folio
= folio
;
411 set_bit(PG_FOLIO
, &req
->wb_flags
);
415 static void nfs_page_assign_page(struct nfs_page
*req
, struct page
*page
)
424 * nfs_page_create_from_page - Create an NFS read/write request.
425 * @ctx: open context to use
426 * @page: page to write
427 * @pgbase: starting offset within the page for the write
428 * @offset: file offset for the write
429 * @count: number of bytes to read/write
431 * The page must be locked by the caller. This makes sure we never
432 * create two different requests for the same page.
433 * User should ensure it is safe to sleep in this function.
435 struct nfs_page
*nfs_page_create_from_page(struct nfs_open_context
*ctx
,
437 unsigned int pgbase
, loff_t offset
,
440 struct nfs_lock_context
*l_ctx
= nfs_get_lock_context(ctx
);
441 struct nfs_page
*ret
;
444 return ERR_CAST(l_ctx
);
445 ret
= nfs_page_create(l_ctx
, pgbase
, offset
>> PAGE_SHIFT
,
446 offset_in_page(offset
), count
);
448 nfs_page_assign_page(ret
, page
);
449 nfs_page_group_init(ret
, NULL
);
451 nfs_put_lock_context(l_ctx
);
456 * nfs_page_create_from_folio - Create an NFS read/write request.
457 * @ctx: open context to use
458 * @folio: folio to write
459 * @offset: starting offset within the folio for the write
460 * @count: number of bytes to read/write
462 * The page must be locked by the caller. This makes sure we never
463 * create two different requests for the same page.
464 * User should ensure it is safe to sleep in this function.
466 struct nfs_page
*nfs_page_create_from_folio(struct nfs_open_context
*ctx
,
471 struct nfs_lock_context
*l_ctx
= nfs_get_lock_context(ctx
);
472 struct nfs_page
*ret
;
475 return ERR_CAST(l_ctx
);
476 ret
= nfs_page_create(l_ctx
, offset
, folio
->index
, offset
, count
);
478 nfs_page_assign_folio(ret
, folio
);
479 nfs_page_group_init(ret
, NULL
);
481 nfs_put_lock_context(l_ctx
);
485 static struct nfs_page
*
486 nfs_create_subreq(struct nfs_page
*req
,
491 struct nfs_page
*last
;
492 struct nfs_page
*ret
;
493 struct folio
*folio
= nfs_page_to_folio(req
);
494 struct page
*page
= nfs_page_to_page(req
, pgbase
);
496 ret
= nfs_page_create(req
->wb_lock_context
, pgbase
, req
->wb_index
,
500 nfs_page_assign_folio(ret
, folio
);
502 nfs_page_assign_page(ret
, page
);
503 /* find the last request */
504 for (last
= req
->wb_head
;
505 last
->wb_this_page
!= req
->wb_head
;
506 last
= last
->wb_this_page
)
509 nfs_lock_request(ret
);
510 nfs_page_group_init(ret
, last
);
511 ret
->wb_nio
= req
->wb_nio
;
517 * nfs_unlock_request - Unlock request and wake up sleepers.
518 * @req: pointer to request
520 void nfs_unlock_request(struct nfs_page
*req
)
522 clear_bit_unlock(PG_BUSY
, &req
->wb_flags
);
523 smp_mb__after_atomic();
524 if (!test_bit(PG_CONTENDED2
, &req
->wb_flags
))
526 wake_up_bit(&req
->wb_flags
, PG_BUSY
);
530 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
531 * @req: pointer to request
533 void nfs_unlock_and_release_request(struct nfs_page
*req
)
535 nfs_unlock_request(req
);
536 nfs_release_request(req
);
540 * nfs_clear_request - Free up all resources allocated to the request
543 * Release page and open context resources associated with a read/write
544 * request after it has completed.
546 static void nfs_clear_request(struct nfs_page
*req
)
548 struct folio
*folio
= nfs_page_to_folio(req
);
549 struct page
*page
= req
->wb_page
;
550 struct nfs_lock_context
*l_ctx
= req
->wb_lock_context
;
551 struct nfs_open_context
*ctx
;
555 req
->wb_folio
= NULL
;
556 clear_bit(PG_FOLIO
, &req
->wb_flags
);
557 } else if (page
!= NULL
) {
562 if (atomic_dec_and_test(&l_ctx
->io_count
)) {
563 wake_up_var(&l_ctx
->io_count
);
564 ctx
= l_ctx
->open_context
;
565 if (test_bit(NFS_CONTEXT_UNLOCK
, &ctx
->flags
))
566 rpc_wake_up(&NFS_SERVER(d_inode(ctx
->dentry
))->uoc_rpcwaitq
);
568 nfs_put_lock_context(l_ctx
);
569 req
->wb_lock_context
= NULL
;
574 * nfs_free_request - Release the count on an NFS read/write request
575 * @req: request to release
577 * Note: Should never be called with the spinlock held!
579 void nfs_free_request(struct nfs_page
*req
)
581 WARN_ON_ONCE(req
->wb_this_page
!= req
);
583 /* extra debug: make sure no sync bits are still set */
584 WARN_ON_ONCE(test_bit(PG_TEARDOWN
, &req
->wb_flags
));
585 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE
, &req
->wb_flags
));
586 WARN_ON_ONCE(test_bit(PG_UPTODATE
, &req
->wb_flags
));
587 WARN_ON_ONCE(test_bit(PG_WB_END
, &req
->wb_flags
));
588 WARN_ON_ONCE(test_bit(PG_REMOVE
, &req
->wb_flags
));
590 /* Release struct file and open context */
591 nfs_clear_request(req
);
595 void nfs_release_request(struct nfs_page
*req
)
597 kref_put(&req
->wb_kref
, nfs_page_group_destroy
);
599 EXPORT_SYMBOL_GPL(nfs_release_request
);
602 * nfs_generic_pg_test - determine if requests can be coalesced
603 * @desc: pointer to descriptor
604 * @prev: previous request in desc, or NULL
607 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
608 * the size of the request.
610 size_t nfs_generic_pg_test(struct nfs_pageio_descriptor
*desc
,
611 struct nfs_page
*prev
, struct nfs_page
*req
)
613 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
616 if (mirror
->pg_count
> mirror
->pg_bsize
) {
617 /* should never happen */
623 * Limit the request size so that we can still allocate a page array
624 * for it without upsetting the slab allocator.
626 if (((mirror
->pg_count
+ req
->wb_bytes
) >> PAGE_SHIFT
) *
627 sizeof(struct page
*) > PAGE_SIZE
)
630 return min(mirror
->pg_bsize
- mirror
->pg_count
, (size_t)req
->wb_bytes
);
632 EXPORT_SYMBOL_GPL(nfs_generic_pg_test
);
634 struct nfs_pgio_header
*nfs_pgio_header_alloc(const struct nfs_rw_ops
*ops
)
636 struct nfs_pgio_header
*hdr
= ops
->rw_alloc_header();
639 INIT_LIST_HEAD(&hdr
->pages
);
644 EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc
);
647 * nfs_pgio_data_destroy - make @hdr suitable for reuse
649 * Frees memory and releases refs from nfs_generic_pgio, so that it may
652 * @hdr: A header that has had nfs_generic_pgio called
654 static void nfs_pgio_data_destroy(struct nfs_pgio_header
*hdr
)
656 if (hdr
->args
.context
)
657 put_nfs_open_context(hdr
->args
.context
);
658 if (hdr
->page_array
.pagevec
!= hdr
->page_array
.page_array
)
659 kfree(hdr
->page_array
.pagevec
);
663 * nfs_pgio_header_free - Free a read or write header
664 * @hdr: The header to free
666 void nfs_pgio_header_free(struct nfs_pgio_header
*hdr
)
668 nfs_pgio_data_destroy(hdr
);
669 hdr
->rw_ops
->rw_free_header(hdr
);
671 EXPORT_SYMBOL_GPL(nfs_pgio_header_free
);
674 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
675 * @hdr: The pageio hdr
677 * @count: Number of bytes to read
678 * @how: How to commit data (writes only)
679 * @cinfo: Commit information for the call (writes only)
681 static void nfs_pgio_rpcsetup(struct nfs_pgio_header
*hdr
, unsigned int pgbase
,
682 unsigned int count
, int how
,
683 struct nfs_commit_info
*cinfo
)
685 struct nfs_page
*req
= hdr
->req
;
687 /* Set up the RPC argument and reply structs
688 * NB: take care not to mess about with hdr->commit et al. */
690 hdr
->args
.fh
= NFS_FH(hdr
->inode
);
691 hdr
->args
.offset
= req_offset(req
);
692 /* pnfs_set_layoutcommit needs this */
693 hdr
->mds_offset
= hdr
->args
.offset
;
694 hdr
->args
.pgbase
= pgbase
;
695 hdr
->args
.pages
= hdr
->page_array
.pagevec
;
696 hdr
->args
.count
= count
;
697 hdr
->args
.context
= get_nfs_open_context(nfs_req_openctx(req
));
698 hdr
->args
.lock_context
= req
->wb_lock_context
;
699 hdr
->args
.stable
= NFS_UNSTABLE
;
700 switch (how
& (FLUSH_STABLE
| FLUSH_COND_STABLE
)) {
703 case FLUSH_COND_STABLE
:
704 if (nfs_reqs_to_commit(cinfo
))
708 hdr
->args
.stable
= NFS_FILE_SYNC
;
711 hdr
->res
.fattr
= &hdr
->fattr
;
714 hdr
->res
.verf
= &hdr
->verf
;
715 nfs_fattr_init(&hdr
->fattr
);
719 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
720 * @task: The current task
721 * @calldata: pageio header to prepare
723 static void nfs_pgio_prepare(struct rpc_task
*task
, void *calldata
)
725 struct nfs_pgio_header
*hdr
= calldata
;
727 err
= NFS_PROTO(hdr
->inode
)->pgio_rpc_prepare(task
, hdr
);
732 int nfs_initiate_pgio(struct rpc_clnt
*clnt
, struct nfs_pgio_header
*hdr
,
733 const struct cred
*cred
, const struct nfs_rpc_ops
*rpc_ops
,
734 const struct rpc_call_ops
*call_ops
, int how
, int flags
,
735 struct nfsd_file
*localio
)
737 struct rpc_task
*task
;
738 struct rpc_message msg
= {
739 .rpc_argp
= &hdr
->args
,
740 .rpc_resp
= &hdr
->res
,
743 struct rpc_task_setup task_setup_data
= {
747 .callback_ops
= call_ops
,
748 .callback_data
= hdr
,
749 .workqueue
= nfsiod_workqueue
,
750 .flags
= RPC_TASK_ASYNC
| flags
,
753 if (nfs_server_capable(hdr
->inode
, NFS_CAP_MOVEABLE
))
754 task_setup_data
.flags
|= RPC_TASK_MOVEABLE
;
756 hdr
->rw_ops
->rw_initiate(hdr
, &msg
, rpc_ops
, &task_setup_data
, how
);
758 dprintk("NFS: initiated pgio call "
759 "(req %s/%llu, %u bytes @ offset %llu)\n",
760 hdr
->inode
->i_sb
->s_id
,
761 (unsigned long long)NFS_FILEID(hdr
->inode
),
763 (unsigned long long)hdr
->args
.offset
);
766 return nfs_local_doio(NFS_SERVER(hdr
->inode
)->nfs_client
,
767 localio
, hdr
, call_ops
);
769 task
= rpc_run_task(&task_setup_data
);
771 return PTR_ERR(task
);
775 EXPORT_SYMBOL_GPL(nfs_initiate_pgio
);
778 * nfs_pgio_error - Clean up from a pageio error
779 * @hdr: pageio header
781 static void nfs_pgio_error(struct nfs_pgio_header
*hdr
)
783 set_bit(NFS_IOHDR_REDO
, &hdr
->flags
);
784 hdr
->completion_ops
->completion(hdr
);
788 * nfs_pgio_release - Release pageio data
789 * @calldata: The pageio header to release
791 static void nfs_pgio_release(void *calldata
)
793 struct nfs_pgio_header
*hdr
= calldata
;
794 hdr
->completion_ops
->completion(hdr
);
797 static void nfs_pageio_mirror_init(struct nfs_pgio_mirror
*mirror
,
800 INIT_LIST_HEAD(&mirror
->pg_list
);
801 mirror
->pg_bytes_written
= 0;
802 mirror
->pg_count
= 0;
803 mirror
->pg_bsize
= bsize
;
805 mirror
->pg_recoalesce
= 0;
809 * nfs_pageio_init - initialise a page io descriptor
810 * @desc: pointer to descriptor
811 * @inode: pointer to inode
812 * @pg_ops: pointer to pageio operations
813 * @compl_ops: pointer to pageio completion operations
814 * @rw_ops: pointer to nfs read/write operations
815 * @bsize: io block size
816 * @io_flags: extra parameters for the io function
818 void nfs_pageio_init(struct nfs_pageio_descriptor
*desc
,
820 const struct nfs_pageio_ops
*pg_ops
,
821 const struct nfs_pgio_completion_ops
*compl_ops
,
822 const struct nfs_rw_ops
*rw_ops
,
827 desc
->pg_inode
= inode
;
828 desc
->pg_ops
= pg_ops
;
829 desc
->pg_completion_ops
= compl_ops
;
830 desc
->pg_rw_ops
= rw_ops
;
831 desc
->pg_ioflags
= io_flags
;
833 desc
->pg_lseg
= NULL
;
834 desc
->pg_io_completion
= NULL
;
835 desc
->pg_dreq
= NULL
;
836 nfs_netfs_reset_pageio_descriptor(desc
);
837 desc
->pg_bsize
= bsize
;
839 desc
->pg_mirror_count
= 1;
840 desc
->pg_mirror_idx
= 0;
842 desc
->pg_mirrors_dynamic
= NULL
;
843 desc
->pg_mirrors
= desc
->pg_mirrors_static
;
844 nfs_pageio_mirror_init(&desc
->pg_mirrors
[0], bsize
);
845 desc
->pg_maxretrans
= 0;
849 * nfs_pgio_result - Basic pageio error handling
850 * @task: The task that ran
851 * @calldata: Pageio header to check
853 static void nfs_pgio_result(struct rpc_task
*task
, void *calldata
)
855 struct nfs_pgio_header
*hdr
= calldata
;
856 struct inode
*inode
= hdr
->inode
;
858 if (hdr
->rw_ops
->rw_done(task
, hdr
, inode
) != 0)
860 if (task
->tk_status
< 0)
861 nfs_set_pgio_error(hdr
, task
->tk_status
, hdr
->args
.offset
);
863 hdr
->rw_ops
->rw_result(task
, hdr
);
867 * Create an RPC task for the given read or write request and kick it.
868 * The page must have been locked by the caller.
870 * It may happen that the page we're passed is not marked dirty.
871 * This is the case if nfs_updatepage detects a conflicting request
872 * that has been written but not committed.
874 int nfs_generic_pgio(struct nfs_pageio_descriptor
*desc
,
875 struct nfs_pgio_header
*hdr
)
877 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
879 struct nfs_page
*req
;
882 struct list_head
*head
= &mirror
->pg_list
;
883 struct nfs_commit_info cinfo
;
884 struct nfs_page_array
*pg_array
= &hdr
->page_array
;
885 unsigned int pagecount
, pageused
;
886 unsigned int pg_base
= offset_in_page(mirror
->pg_base
);
887 gfp_t gfp_flags
= nfs_io_gfp_mask();
889 pagecount
= nfs_page_array_len(pg_base
, mirror
->pg_count
);
890 pg_array
->npages
= pagecount
;
892 if (pagecount
<= ARRAY_SIZE(pg_array
->page_array
))
893 pg_array
->pagevec
= pg_array
->page_array
;
895 pg_array
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), gfp_flags
);
896 if (!pg_array
->pagevec
) {
897 pg_array
->npages
= 0;
899 desc
->pg_error
= -ENOMEM
;
900 return desc
->pg_error
;
904 nfs_init_cinfo(&cinfo
, desc
->pg_inode
, desc
->pg_dreq
);
905 pages
= hdr
->page_array
.pagevec
;
908 while (!list_empty(head
)) {
909 struct nfs_page_iter_page i
;
912 req
= nfs_list_entry(head
->next
);
913 nfs_list_move_request(req
, &hdr
->pages
);
915 if (req
->wb_pgbase
== 0)
918 nfs_page_iter_page_init(&i
, req
);
919 while ((page
= nfs_page_iter_page_get(&i
)) != NULL
) {
920 if (last_page
!= page
) {
922 if (pageused
> pagecount
)
924 *pages
++ = last_page
= page
;
929 if (WARN_ON_ONCE(pageused
!= pagecount
)) {
931 desc
->pg_error
= -EINVAL
;
932 return desc
->pg_error
;
935 if ((desc
->pg_ioflags
& FLUSH_COND_STABLE
) &&
936 (desc
->pg_moreio
|| nfs_reqs_to_commit(&cinfo
)))
937 desc
->pg_ioflags
&= ~FLUSH_COND_STABLE
;
939 /* Set up the argument struct */
940 nfs_pgio_rpcsetup(hdr
, pg_base
, mirror
->pg_count
, desc
->pg_ioflags
,
942 desc
->pg_rpc_callops
= &nfs_pgio_common_ops
;
945 EXPORT_SYMBOL_GPL(nfs_generic_pgio
);
947 static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor
*desc
)
949 struct nfs_pgio_header
*hdr
;
951 unsigned short task_flags
= 0;
953 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
955 desc
->pg_error
= -ENOMEM
;
956 return desc
->pg_error
;
958 nfs_pgheader_init(desc
, hdr
, nfs_pgio_header_free
);
959 ret
= nfs_generic_pgio(desc
, hdr
);
961 struct nfs_client
*clp
= NFS_SERVER(hdr
->inode
)->nfs_client
;
963 struct nfsd_file
*localio
=
964 nfs_local_open_fh(clp
, hdr
->cred
,
965 hdr
->args
.fh
, hdr
->args
.context
->mode
);
967 if (NFS_SERVER(hdr
->inode
)->nfs_client
->cl_minorversion
)
968 task_flags
= RPC_TASK_MOVEABLE
;
969 ret
= nfs_initiate_pgio(NFS_CLIENT(hdr
->inode
),
972 NFS_PROTO(hdr
->inode
),
973 desc
->pg_rpc_callops
,
975 RPC_TASK_CRED_NOREF
| task_flags
,
981 static struct nfs_pgio_mirror
*
982 nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor
*desc
,
983 unsigned int mirror_count
)
985 struct nfs_pgio_mirror
*ret
;
988 kfree(desc
->pg_mirrors_dynamic
);
989 desc
->pg_mirrors_dynamic
= NULL
;
990 if (mirror_count
== 1)
991 return desc
->pg_mirrors_static
;
992 ret
= kmalloc_array(mirror_count
, sizeof(*ret
), nfs_io_gfp_mask());
994 for (i
= 0; i
< mirror_count
; i
++)
995 nfs_pageio_mirror_init(&ret
[i
], desc
->pg_bsize
);
996 desc
->pg_mirrors_dynamic
= ret
;
1002 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
1003 * by calling the pg_get_mirror_count op
1005 static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor
*pgio
,
1006 struct nfs_page
*req
)
1008 unsigned int mirror_count
= 1;
1010 if (pgio
->pg_ops
->pg_get_mirror_count
)
1011 mirror_count
= pgio
->pg_ops
->pg_get_mirror_count(pgio
, req
);
1012 if (mirror_count
== pgio
->pg_mirror_count
|| pgio
->pg_error
< 0)
1015 if (!mirror_count
|| mirror_count
> NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX
) {
1016 pgio
->pg_error
= -EINVAL
;
1020 pgio
->pg_mirrors
= nfs_pageio_alloc_mirrors(pgio
, mirror_count
);
1021 if (pgio
->pg_mirrors
== NULL
) {
1022 pgio
->pg_error
= -ENOMEM
;
1023 pgio
->pg_mirrors
= pgio
->pg_mirrors_static
;
1026 pgio
->pg_mirror_count
= mirror_count
;
1029 static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor
*pgio
)
1031 pgio
->pg_mirror_count
= 1;
1032 pgio
->pg_mirror_idx
= 0;
1033 pgio
->pg_mirrors
= pgio
->pg_mirrors_static
;
1034 kfree(pgio
->pg_mirrors_dynamic
);
1035 pgio
->pg_mirrors_dynamic
= NULL
;
1038 static bool nfs_match_lock_context(const struct nfs_lock_context
*l1
,
1039 const struct nfs_lock_context
*l2
)
1041 return l1
->lockowner
== l2
->lockowner
;
1044 static bool nfs_page_is_contiguous(const struct nfs_page
*prev
,
1045 const struct nfs_page
*req
)
1047 size_t prev_end
= prev
->wb_pgbase
+ prev
->wb_bytes
;
1049 if (req_offset(req
) != req_offset(prev
) + prev
->wb_bytes
)
1051 if (req
->wb_pgbase
== 0)
1052 return prev_end
== nfs_page_max_length(prev
);
1053 if (req
->wb_pgbase
== prev_end
) {
1054 struct folio
*folio
= nfs_page_to_folio(req
);
1056 return folio
== nfs_page_to_folio(prev
);
1057 return req
->wb_page
== prev
->wb_page
;
1063 * nfs_coalesce_size - test two requests for compatibility
1064 * @prev: pointer to nfs_page
1065 * @req: pointer to nfs_page
1066 * @pgio: pointer to nfs_pagio_descriptor
1068 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
1069 * page data area they describe is contiguous, and that their RPC
1070 * credentials, NFSv4 open state, and lockowners are the same.
1072 * Returns size of the request that can be coalesced
1074 static unsigned int nfs_coalesce_size(struct nfs_page
*prev
,
1075 struct nfs_page
*req
,
1076 struct nfs_pageio_descriptor
*pgio
)
1078 struct file_lock_context
*flctx
;
1081 if (!nfs_match_open_context(nfs_req_openctx(req
), nfs_req_openctx(prev
)))
1083 flctx
= locks_inode_context(d_inode(nfs_req_openctx(req
)->dentry
));
1084 if (flctx
!= NULL
&&
1085 !(list_empty_careful(&flctx
->flc_posix
) &&
1086 list_empty_careful(&flctx
->flc_flock
)) &&
1087 !nfs_match_lock_context(req
->wb_lock_context
,
1088 prev
->wb_lock_context
))
1090 if (!nfs_page_is_contiguous(prev
, req
))
1093 return pgio
->pg_ops
->pg_test(pgio
, prev
, req
);
1097 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
1098 * @desc: destination io descriptor
1101 * If the request 'req' was successfully coalesced into the existing list
1102 * of pages 'desc', it returns the size of req.
1105 nfs_pageio_do_add_request(struct nfs_pageio_descriptor
*desc
,
1106 struct nfs_page
*req
)
1108 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1109 struct nfs_page
*prev
= NULL
;
1112 if (list_empty(&mirror
->pg_list
)) {
1113 if (desc
->pg_ops
->pg_init
)
1114 desc
->pg_ops
->pg_init(desc
, req
);
1115 if (desc
->pg_error
< 0)
1117 mirror
->pg_base
= req
->wb_pgbase
;
1118 mirror
->pg_count
= 0;
1119 mirror
->pg_recoalesce
= 0;
1121 prev
= nfs_list_entry(mirror
->pg_list
.prev
);
1123 if (desc
->pg_maxretrans
&& req
->wb_nio
> desc
->pg_maxretrans
) {
1124 if (NFS_SERVER(desc
->pg_inode
)->flags
& NFS_MOUNT_SOFTERR
)
1125 desc
->pg_error
= -ETIMEDOUT
;
1127 desc
->pg_error
= -EIO
;
1131 size
= nfs_coalesce_size(prev
, req
, desc
);
1132 if (size
< req
->wb_bytes
)
1134 nfs_list_move_request(req
, &mirror
->pg_list
);
1135 mirror
->pg_count
+= req
->wb_bytes
;
1136 return req
->wb_bytes
;
1140 * Helper for nfs_pageio_add_request and nfs_pageio_complete
1142 static void nfs_pageio_doio(struct nfs_pageio_descriptor
*desc
)
1144 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1146 if (!list_empty(&mirror
->pg_list
)) {
1147 int error
= desc
->pg_ops
->pg_doio(desc
);
1149 desc
->pg_error
= error
;
1150 if (list_empty(&mirror
->pg_list
))
1151 mirror
->pg_bytes_written
+= mirror
->pg_count
;
1156 nfs_pageio_cleanup_request(struct nfs_pageio_descriptor
*desc
,
1157 struct nfs_page
*req
)
1161 nfs_list_move_request(req
, &head
);
1162 desc
->pg_completion_ops
->error_cleanup(&head
, desc
->pg_error
);
1166 * __nfs_pageio_add_request - Attempt to coalesce a request into a page list.
1167 * @desc: destination io descriptor
1170 * This may split a request into subrequests which are all part of the
1171 * same page group. If so, it will submit @req as the last one, to ensure
1172 * the pointer to @req is still valid in case of failure.
1174 * Returns true if the request 'req' was successfully coalesced into the
1175 * existing list of pages 'desc'.
1177 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
1178 struct nfs_page
*req
)
1180 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1181 struct nfs_page
*subreq
;
1182 unsigned int size
, subreq_size
;
1184 nfs_page_group_lock(req
);
1187 subreq_size
= subreq
->wb_bytes
;
1189 size
= nfs_pageio_do_add_request(desc
, subreq
);
1190 if (size
== subreq_size
) {
1191 /* We successfully submitted a request */
1194 req
->wb_pgbase
+= size
;
1195 req
->wb_bytes
-= size
;
1196 req
->wb_offset
+= size
;
1197 subreq_size
= req
->wb_bytes
;
1201 if (WARN_ON_ONCE(subreq
!= req
)) {
1202 nfs_page_group_unlock(req
);
1203 nfs_pageio_cleanup_request(desc
, subreq
);
1205 subreq_size
= req
->wb_bytes
;
1206 nfs_page_group_lock(req
);
1209 /* Can't coalesce any more, so do I/O */
1210 nfs_page_group_unlock(req
);
1211 desc
->pg_moreio
= 1;
1212 nfs_pageio_doio(desc
);
1213 if (desc
->pg_error
< 0 || mirror
->pg_recoalesce
)
1215 /* retry add_request for this subreq */
1216 nfs_page_group_lock(req
);
1219 subreq
= nfs_create_subreq(req
, req
->wb_pgbase
,
1220 req
->wb_offset
, size
);
1226 nfs_page_group_unlock(req
);
1229 desc
->pg_error
= PTR_ERR(subreq
);
1230 nfs_page_group_unlock(req
);
1234 static int nfs_do_recoalesce(struct nfs_pageio_descriptor
*desc
)
1236 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1240 list_splice_init(&mirror
->pg_list
, &head
);
1241 mirror
->pg_recoalesce
= 0;
1243 while (!list_empty(&head
)) {
1244 struct nfs_page
*req
;
1246 req
= list_first_entry(&head
, struct nfs_page
, wb_list
);
1247 if (__nfs_pageio_add_request(desc
, req
))
1249 if (desc
->pg_error
< 0) {
1250 list_splice_tail(&head
, &mirror
->pg_list
);
1251 mirror
->pg_recoalesce
= 1;
1256 } while (mirror
->pg_recoalesce
);
1260 static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor
*desc
,
1261 struct nfs_page
*req
)
1266 ret
= __nfs_pageio_add_request(desc
, req
);
1269 if (desc
->pg_error
< 0)
1271 ret
= nfs_do_recoalesce(desc
);
1277 static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor
*desc
)
1280 struct nfs_pgio_mirror
*mirror
;
1282 if (!desc
->pg_error
)
1285 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++) {
1286 mirror
= nfs_pgio_get_mirror(desc
, midx
);
1287 desc
->pg_completion_ops
->error_cleanup(&mirror
->pg_list
,
1292 int nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
1293 struct nfs_page
*req
)
1296 unsigned int pgbase
, offset
, bytes
;
1297 struct nfs_page
*dupreq
;
1299 pgbase
= req
->wb_pgbase
;
1300 offset
= req
->wb_offset
;
1301 bytes
= req
->wb_bytes
;
1303 nfs_pageio_setup_mirroring(desc
, req
);
1304 if (desc
->pg_error
< 0)
1307 /* Create the mirror instances first, and fire them off */
1308 for (midx
= 1; midx
< desc
->pg_mirror_count
; midx
++) {
1309 nfs_page_group_lock(req
);
1311 dupreq
= nfs_create_subreq(req
,
1312 pgbase
, offset
, bytes
);
1314 nfs_page_group_unlock(req
);
1315 if (IS_ERR(dupreq
)) {
1316 desc
->pg_error
= PTR_ERR(dupreq
);
1320 nfs_pgio_set_current_mirror(desc
, midx
);
1321 if (!nfs_pageio_add_request_mirror(desc
, dupreq
))
1322 goto out_cleanup_subreq
;
1325 nfs_pgio_set_current_mirror(desc
, 0);
1326 if (!nfs_pageio_add_request_mirror(desc
, req
))
1332 nfs_pageio_cleanup_request(desc
, dupreq
);
1334 nfs_pageio_error_cleanup(desc
);
1339 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1340 * nfs_pageio_descriptor
1341 * @desc: pointer to io descriptor
1342 * @mirror_idx: pointer to mirror index
1344 static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor
*desc
,
1347 struct nfs_pgio_mirror
*mirror
;
1350 restore_idx
= nfs_pgio_set_current_mirror(desc
, mirror_idx
);
1351 mirror
= nfs_pgio_current_mirror(desc
);
1354 nfs_pageio_doio(desc
);
1355 if (desc
->pg_error
< 0 || !mirror
->pg_recoalesce
)
1357 if (!nfs_do_recoalesce(desc
))
1360 nfs_pgio_set_current_mirror(desc
, restore_idx
);
1364 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1365 * @hdr - the pgio header to move request from
1366 * @desc - the pageio descriptor to add requests to
1368 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1371 * Returns 0 on success and < 0 on error.
1373 int nfs_pageio_resend(struct nfs_pageio_descriptor
*desc
,
1374 struct nfs_pgio_header
*hdr
)
1378 desc
->pg_io_completion
= hdr
->io_completion
;
1379 desc
->pg_dreq
= hdr
->dreq
;
1380 nfs_netfs_set_pageio_descriptor(desc
, hdr
);
1381 list_splice_init(&hdr
->pages
, &pages
);
1382 while (!list_empty(&pages
)) {
1383 struct nfs_page
*req
= nfs_list_entry(pages
.next
);
1385 if (!nfs_pageio_add_request(desc
, req
))
1388 nfs_pageio_complete(desc
);
1389 if (!list_empty(&pages
)) {
1390 int err
= desc
->pg_error
< 0 ? desc
->pg_error
: -EIO
;
1391 hdr
->completion_ops
->error_cleanup(&pages
, err
);
1392 nfs_set_pgio_error(hdr
, err
, hdr
->io_start
);
1397 EXPORT_SYMBOL_GPL(nfs_pageio_resend
);
1400 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1401 * @desc: pointer to io descriptor
1403 void nfs_pageio_complete(struct nfs_pageio_descriptor
*desc
)
1407 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++)
1408 nfs_pageio_complete_mirror(desc
, midx
);
1410 if (desc
->pg_error
< 0)
1411 nfs_pageio_error_cleanup(desc
);
1412 if (desc
->pg_ops
->pg_cleanup
)
1413 desc
->pg_ops
->pg_cleanup(desc
);
1414 nfs_pageio_cleanup_mirroring(desc
);
1418 * nfs_pageio_cond_complete - Conditional I/O completion
1419 * @desc: pointer to io descriptor
1420 * @index: page index
1422 * It is important to ensure that processes don't try to take locks
1423 * on non-contiguous ranges of pages as that might deadlock. This
1424 * function should be called before attempting to wait on a locked
1425 * nfs_page. It will complete the I/O if the page index 'index'
1426 * is not contiguous with the existing list of pages in 'desc'.
1428 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor
*desc
, pgoff_t index
)
1430 struct nfs_pgio_mirror
*mirror
;
1431 struct nfs_page
*prev
;
1432 struct folio
*folio
;
1435 for (midx
= 0; midx
< desc
->pg_mirror_count
; midx
++) {
1436 mirror
= nfs_pgio_get_mirror(desc
, midx
);
1437 if (!list_empty(&mirror
->pg_list
)) {
1438 prev
= nfs_list_entry(mirror
->pg_list
.prev
);
1439 folio
= nfs_page_to_folio(prev
);
1441 if (index
== folio_next_index(folio
))
1443 } else if (index
== prev
->wb_index
+ 1)
1446 * We will submit more requests after these. Indicate
1447 * this to the underlying layers.
1449 desc
->pg_moreio
= 1;
1450 nfs_pageio_complete(desc
);
1457 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
1459 void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor
*pgio
)
1461 nfs_pageio_complete(pgio
);
1464 int __init
nfs_init_nfspagecache(void)
1466 nfs_page_cachep
= kmem_cache_create("nfs_page",
1467 sizeof(struct nfs_page
),
1468 0, SLAB_HWCACHE_ALIGN
,
1470 if (nfs_page_cachep
== NULL
)
1476 void nfs_destroy_nfspagecache(void)
1478 kmem_cache_destroy(nfs_page_cachep
);
1481 static const struct rpc_call_ops nfs_pgio_common_ops
= {
1482 .rpc_call_prepare
= nfs_pgio_prepare
,
1483 .rpc_call_done
= nfs_pgio_result
,
1484 .rpc_release
= nfs_pgio_release
,
1487 const struct nfs_pageio_ops nfs_pgio_rw_ops
= {
1488 .pg_test
= nfs_generic_pg_test
,
1489 .pg_doio
= nfs_generic_pg_pgios
,