1 // SPDX-License-Identifier: GPL-2.0-only
7 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
8 * modified for async RPC by okir@monad.swb.de
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
17 #include <linux/slab.h>
18 #include <linux/task_io_accounting_ops.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/nfs_fs.h>
22 #include <linux/nfs_page.h>
23 #include <linux/module.h>
31 #include "delegation.h"
33 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
35 const struct nfs_pgio_completion_ops nfs_async_read_completion_ops
;
36 static const struct nfs_rw_ops nfs_rw_read_ops
;
38 static struct kmem_cache
*nfs_rdata_cachep
;
40 static struct nfs_pgio_header
*nfs_readhdr_alloc(void)
42 struct nfs_pgio_header
*p
= kmem_cache_zalloc(nfs_rdata_cachep
, GFP_KERNEL
);
45 p
->rw_mode
= FMODE_READ
;
49 static void nfs_readhdr_free(struct nfs_pgio_header
*rhdr
)
51 kfree(rhdr
->res
.scratch
);
52 kmem_cache_free(nfs_rdata_cachep
, rhdr
);
55 static int nfs_return_empty_folio(struct folio
*folio
)
57 folio_zero_segment(folio
, 0, folio_size(folio
));
58 folio_mark_uptodate(folio
);
63 void nfs_pageio_init_read(struct nfs_pageio_descriptor
*pgio
,
64 struct inode
*inode
, bool force_mds
,
65 const struct nfs_pgio_completion_ops
*compl_ops
)
67 struct nfs_server
*server
= NFS_SERVER(inode
);
68 const struct nfs_pageio_ops
*pg_ops
= &nfs_pgio_rw_ops
;
70 #ifdef CONFIG_NFS_V4_1
71 if (server
->pnfs_curr_ld
&& !force_mds
)
72 pg_ops
= server
->pnfs_curr_ld
->pg_read_ops
;
74 nfs_pageio_init(pgio
, inode
, pg_ops
, compl_ops
, &nfs_rw_read_ops
,
77 EXPORT_SYMBOL_GPL(nfs_pageio_init_read
);
79 void nfs_pageio_complete_read(struct nfs_pageio_descriptor
*pgio
)
81 struct nfs_pgio_mirror
*pgm
;
84 nfs_pageio_complete(pgio
);
86 /* It doesn't make sense to do mirrored reads! */
87 WARN_ON_ONCE(pgio
->pg_mirror_count
!= 1);
89 pgm
= &pgio
->pg_mirrors
[0];
90 NFS_I(pgio
->pg_inode
)->read_io
+= pgm
->pg_bytes_written
;
91 npages
= (pgm
->pg_bytes_written
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
92 nfs_add_stats(pgio
->pg_inode
, NFSIOS_READPAGES
, npages
);
96 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor
*pgio
)
98 struct nfs_pgio_mirror
*mirror
;
100 if (pgio
->pg_ops
&& pgio
->pg_ops
->pg_cleanup
)
101 pgio
->pg_ops
->pg_cleanup(pgio
);
103 pgio
->pg_ops
= &nfs_pgio_rw_ops
;
105 /* read path should never have more than one mirror */
106 WARN_ON_ONCE(pgio
->pg_mirror_count
!= 1);
108 mirror
= &pgio
->pg_mirrors
[0];
109 mirror
->pg_bsize
= NFS_SERVER(pgio
->pg_inode
)->rsize
;
111 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds
);
113 bool nfs_read_alloc_scratch(struct nfs_pgio_header
*hdr
, size_t size
)
115 WARN_ON(hdr
->res
.scratch
!= NULL
);
116 hdr
->res
.scratch
= kmalloc(size
, GFP_KERNEL
);
117 return hdr
->res
.scratch
!= NULL
;
119 EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch
);
121 static void nfs_readpage_release(struct nfs_page
*req
, int error
)
123 struct folio
*folio
= nfs_page_to_folio(req
);
125 if (nfs_page_group_sync_on_bit(req
, PG_UNLOCKPAGE
))
126 if (nfs_netfs_folio_unlock(folio
))
129 nfs_release_request(req
);
132 static void nfs_page_group_set_uptodate(struct nfs_page
*req
)
134 if (nfs_page_group_sync_on_bit(req
, PG_UPTODATE
))
135 folio_mark_uptodate(nfs_page_to_folio(req
));
138 static void nfs_read_completion(struct nfs_pgio_header
*hdr
)
140 unsigned long bytes
= 0;
143 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
145 while (!list_empty(&hdr
->pages
)) {
146 struct nfs_page
*req
= nfs_list_entry(hdr
->pages
.next
);
147 struct folio
*folio
= nfs_page_to_folio(req
);
148 unsigned long start
= req
->wb_pgbase
;
149 unsigned long end
= req
->wb_pgbase
+ req
->wb_bytes
;
151 if (test_bit(NFS_IOHDR_EOF
, &hdr
->flags
)) {
152 /* note: regions of the page not covered by a
153 * request are zeroed in nfs_read_add_folio
155 if (bytes
> hdr
->good_bytes
) {
156 /* nothing in this request was good, so zero
157 * the full extent of the request */
158 folio_zero_segment(folio
, start
, end
);
160 } else if (hdr
->good_bytes
- bytes
< req
->wb_bytes
) {
161 /* part of this request has good bytes, but
162 * not all. zero the bad bytes */
163 start
+= hdr
->good_bytes
- bytes
;
164 WARN_ON(start
< req
->wb_pgbase
);
165 folio_zero_segment(folio
, start
, end
);
169 bytes
+= req
->wb_bytes
;
170 if (test_bit(NFS_IOHDR_ERROR
, &hdr
->flags
)) {
171 if (bytes
<= hdr
->good_bytes
)
172 nfs_page_group_set_uptodate(req
);
175 xchg(&nfs_req_openctx(req
)->error
, error
);
178 nfs_page_group_set_uptodate(req
);
179 nfs_list_remove_request(req
);
180 nfs_readpage_release(req
, error
);
182 nfs_netfs_read_completion(hdr
);
188 static void nfs_initiate_read(struct nfs_pgio_header
*hdr
,
189 struct rpc_message
*msg
,
190 const struct nfs_rpc_ops
*rpc_ops
,
191 struct rpc_task_setup
*task_setup_data
, int how
)
193 rpc_ops
->read_setup(hdr
, msg
);
194 nfs_netfs_initiate_read(hdr
);
195 trace_nfs_initiate_read(hdr
);
199 nfs_async_read_error(struct list_head
*head
, int error
)
201 struct nfs_page
*req
;
203 while (!list_empty(head
)) {
204 req
= nfs_list_entry(head
->next
);
205 nfs_list_remove_request(req
);
206 nfs_readpage_release(req
, error
);
210 const struct nfs_pgio_completion_ops nfs_async_read_completion_ops
= {
211 .error_cleanup
= nfs_async_read_error
,
212 .completion
= nfs_read_completion
,
216 * This is the callback from RPC telling us whether a reply was
217 * received or some error occurred (timeout or socket shutdown).
219 static int nfs_readpage_done(struct rpc_task
*task
,
220 struct nfs_pgio_header
*hdr
,
223 int status
= NFS_PROTO(inode
)->read_done(task
, hdr
);
227 nfs_add_stats(inode
, NFSIOS_SERVERREADBYTES
, hdr
->res
.count
);
228 trace_nfs_readpage_done(task
, hdr
);
230 if (task
->tk_status
== -ESTALE
) {
231 nfs_set_inode_stale(inode
);
232 nfs_mark_for_revalidate(inode
);
237 static void nfs_readpage_retry(struct rpc_task
*task
,
238 struct nfs_pgio_header
*hdr
)
240 struct nfs_pgio_args
*argp
= &hdr
->args
;
241 struct nfs_pgio_res
*resp
= &hdr
->res
;
243 /* This is a short read! */
244 nfs_inc_stats(hdr
->inode
, NFSIOS_SHORTREAD
);
245 trace_nfs_readpage_short(task
, hdr
);
247 /* Has the server at least made some progress? */
248 if (resp
->count
== 0) {
249 nfs_set_pgio_error(hdr
, -EIO
, argp
->offset
);
253 /* For non rpc-based layout drivers, retry-through-MDS */
255 hdr
->pnfs_error
= -EAGAIN
;
259 /* Yes, so retry the read at the end of the hdr */
260 hdr
->mds_offset
+= resp
->count
;
261 argp
->offset
+= resp
->count
;
262 argp
->pgbase
+= resp
->count
;
263 argp
->count
-= resp
->count
;
266 rpc_restart_call_prepare(task
);
269 static void nfs_readpage_result(struct rpc_task
*task
,
270 struct nfs_pgio_header
*hdr
)
273 loff_t pos
= hdr
->args
.offset
+ hdr
->res
.count
;
274 unsigned int new = pos
- hdr
->io_start
;
276 if (hdr
->good_bytes
> new) {
277 hdr
->good_bytes
= new;
278 set_bit(NFS_IOHDR_EOF
, &hdr
->flags
);
279 clear_bit(NFS_IOHDR_ERROR
, &hdr
->flags
);
281 } else if (hdr
->res
.count
< hdr
->args
.count
)
282 nfs_readpage_retry(task
, hdr
);
285 int nfs_read_add_folio(struct nfs_pageio_descriptor
*pgio
,
286 struct nfs_open_context
*ctx
,
289 struct inode
*inode
= folio
->mapping
->host
;
290 struct nfs_server
*server
= NFS_SERVER(inode
);
291 size_t fsize
= folio_size(folio
);
292 unsigned int rsize
= server
->rsize
;
293 struct nfs_page
*new;
294 unsigned int len
, aligned_len
;
297 len
= nfs_folio_length(folio
);
299 return nfs_return_empty_folio(folio
);
301 aligned_len
= min_t(unsigned int, ALIGN(len
, rsize
), fsize
);
303 new = nfs_page_create_from_folio(ctx
, folio
, 0, aligned_len
);
305 error
= PTR_ERR(new);
306 if (nfs_netfs_folio_unlock(folio
))
312 folio_zero_segment(folio
, len
, fsize
);
313 if (!nfs_pageio_add_request(pgio
, new)) {
314 nfs_list_remove_request(new);
315 error
= pgio
->pg_error
;
316 nfs_readpage_release(new, error
);
325 * Actually read a folio over the wire.
327 static int nfs_do_read_folio(struct file
*file
, struct folio
*folio
)
329 struct inode
*inode
= file_inode(file
);
330 struct nfs_pageio_descriptor pgio
;
331 struct nfs_open_context
*ctx
;
334 ctx
= get_nfs_open_context(nfs_file_open_context(file
));
336 xchg(&ctx
->error
, 0);
337 nfs_pageio_init_read(&pgio
, inode
, false,
338 &nfs_async_read_completion_ops
);
340 ret
= nfs_read_add_folio(&pgio
, ctx
, folio
);
344 nfs_pageio_complete_read(&pgio
);
345 nfs_update_delegated_atime(inode
);
346 if (pgio
.pg_error
< 0) {
351 ret
= folio_wait_locked_killable(folio
);
352 if (!folio_test_uptodate(folio
) && !ret
)
353 ret
= xchg(&ctx
->error
, 0);
356 put_nfs_open_context(ctx
);
361 * Synchronously read a folio.
363 * This is not heavily used as most users to try an asynchronous
364 * large read through ->readahead first.
366 int nfs_read_folio(struct file
*file
, struct folio
*folio
)
368 struct inode
*inode
= file_inode(file
);
369 loff_t pos
= folio_pos(folio
);
370 size_t len
= folio_size(folio
);
373 trace_nfs_aop_readpage(inode
, pos
, len
);
374 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGE
);
375 task_io_account_read(len
);
378 * Try to flush any pending writes to the file..
380 * NOTE! Because we own the folio lock, there cannot
381 * be any new pending writes generated at this point
382 * for this folio (other folios can be written to).
384 ret
= nfs_wb_folio(inode
, folio
);
387 if (folio_test_uptodate(folio
))
391 if (NFS_STALE(inode
))
394 ret
= nfs_netfs_read_folio(file
, folio
);
396 ret
= nfs_do_read_folio(file
, folio
);
398 trace_nfs_aop_readpage_done(inode
, pos
, len
, ret
);
405 void nfs_readahead(struct readahead_control
*ractl
)
407 struct nfs_pageio_descriptor pgio
;
408 struct nfs_open_context
*ctx
;
409 unsigned int nr_pages
= readahead_count(ractl
);
410 struct file
*file
= ractl
->file
;
411 struct inode
*inode
= ractl
->mapping
->host
;
415 trace_nfs_aop_readahead(inode
, readahead_pos(ractl
), nr_pages
);
416 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGES
);
417 task_io_account_read(readahead_length(ractl
));
420 if (NFS_STALE(inode
))
423 ret
= nfs_netfs_readahead(ractl
);
429 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
433 ctx
= get_nfs_open_context(nfs_file_open_context(file
));
435 nfs_pageio_init_read(&pgio
, inode
, false,
436 &nfs_async_read_completion_ops
);
438 while ((folio
= readahead_folio(ractl
)) != NULL
) {
439 ret
= nfs_read_add_folio(&pgio
, ctx
, folio
);
444 nfs_pageio_complete_read(&pgio
);
445 nfs_update_delegated_atime(inode
);
447 put_nfs_open_context(ctx
);
449 trace_nfs_aop_readahead_done(inode
, nr_pages
, ret
);
452 int __init
nfs_init_readpagecache(void)
454 nfs_rdata_cachep
= kmem_cache_create("nfs_read_data",
455 sizeof(struct nfs_pgio_header
),
456 0, SLAB_HWCACHE_ALIGN
,
458 if (nfs_rdata_cachep
== NULL
)
464 void nfs_destroy_readpagecache(void)
466 kmem_cache_destroy(nfs_rdata_cachep
);
469 static const struct nfs_rw_ops nfs_rw_read_ops
= {
470 .rw_alloc_header
= nfs_readhdr_alloc
,
471 .rw_free_header
= nfs_readhdr_free
,
472 .rw_done
= nfs_readpage_done
,
473 .rw_result
= nfs_readpage_result
,
474 .rw_initiate
= nfs_initiate_read
,