6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/module.h>
23 #include <asm/system.h>
31 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
33 static const struct nfs_pageio_ops nfs_pageio_read_ops
;
34 static const struct rpc_call_ops nfs_read_partial_ops
;
35 static const struct rpc_call_ops nfs_read_full_ops
;
37 static struct kmem_cache
*nfs_rdata_cachep
;
39 struct nfs_read_data
*nfs_readdata_alloc(unsigned int pagecount
)
41 struct nfs_read_data
*p
;
43 p
= kmem_cache_zalloc(nfs_rdata_cachep
, GFP_KERNEL
);
45 INIT_LIST_HEAD(&p
->pages
);
46 p
->npages
= pagecount
;
47 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
48 p
->pagevec
= p
->page_array
;
50 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_KERNEL
);
52 kmem_cache_free(nfs_rdata_cachep
, p
);
60 void nfs_readdata_free(struct nfs_read_data
*p
)
62 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
64 kmem_cache_free(nfs_rdata_cachep
, p
);
67 void nfs_readdata_release(struct nfs_read_data
*rdata
)
69 put_lseg(rdata
->lseg
);
70 put_nfs_open_context(rdata
->args
.context
);
71 nfs_readdata_free(rdata
);
75 int nfs_return_empty_page(struct page
*page
)
77 zero_user(page
, 0, PAGE_CACHE_SIZE
);
78 SetPageUptodate(page
);
83 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data
*data
)
85 unsigned int remainder
= data
->args
.count
- data
->res
.count
;
86 unsigned int base
= data
->args
.pgbase
+ data
->res
.count
;
90 if (data
->res
.eof
== 0 || remainder
== 0)
93 * Note: "remainder" can never be negative, since we check for
94 * this in the XDR code.
96 pages
= &data
->args
.pages
[base
>> PAGE_CACHE_SHIFT
];
97 base
&= ~PAGE_CACHE_MASK
;
98 pglen
= PAGE_CACHE_SIZE
- base
;
100 if (remainder
<= pglen
) {
101 zero_user(*pages
, base
, remainder
);
104 zero_user(*pages
, base
, pglen
);
107 pglen
= PAGE_CACHE_SIZE
;
112 void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor
*pgio
,
115 nfs_pageio_init(pgio
, inode
, &nfs_pageio_read_ops
,
116 NFS_SERVER(inode
)->rsize
, 0);
119 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor
*pgio
)
121 pgio
->pg_ops
= &nfs_pageio_read_ops
;
122 pgio
->pg_bsize
= NFS_SERVER(pgio
->pg_inode
)->rsize
;
124 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds
);
126 static void nfs_pageio_init_read(struct nfs_pageio_descriptor
*pgio
,
129 if (!pnfs_pageio_init_read(pgio
, inode
))
130 nfs_pageio_init_read_mds(pgio
, inode
);
133 int nfs_readpage_async(struct nfs_open_context
*ctx
, struct inode
*inode
,
136 struct nfs_page
*new;
138 struct nfs_pageio_descriptor pgio
;
140 len
= nfs_page_length(page
);
142 return nfs_return_empty_page(page
);
143 new = nfs_create_request(ctx
, inode
, page
, 0, len
);
148 if (len
< PAGE_CACHE_SIZE
)
149 zero_user_segment(page
, len
, PAGE_CACHE_SIZE
);
151 nfs_pageio_init_read(&pgio
, inode
);
152 nfs_pageio_add_request(&pgio
, new);
153 nfs_pageio_complete(&pgio
);
157 static void nfs_readpage_release(struct nfs_page
*req
)
159 struct inode
*d_inode
= req
->wb_context
->dentry
->d_inode
;
161 if (PageUptodate(req
->wb_page
))
162 nfs_readpage_to_fscache(d_inode
, req
->wb_page
, 0);
164 unlock_page(req
->wb_page
);
166 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
167 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
168 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
170 (long long)req_offset(req
));
171 nfs_release_request(req
);
174 int nfs_initiate_read(struct nfs_read_data
*data
, struct rpc_clnt
*clnt
,
175 const struct rpc_call_ops
*call_ops
)
177 struct inode
*inode
= data
->inode
;
178 int swap_flags
= IS_SWAPFILE(inode
) ? NFS_RPC_SWAPFLAGS
: 0;
179 struct rpc_task
*task
;
180 struct rpc_message msg
= {
181 .rpc_argp
= &data
->args
,
182 .rpc_resp
= &data
->res
,
183 .rpc_cred
= data
->cred
,
185 struct rpc_task_setup task_setup_data
= {
189 .callback_ops
= call_ops
,
190 .callback_data
= data
,
191 .workqueue
= nfsiod_workqueue
,
192 .flags
= RPC_TASK_ASYNC
| swap_flags
,
195 /* Set up the initial task struct. */
196 NFS_PROTO(inode
)->read_setup(data
, &msg
);
198 dprintk("NFS: %5u initiated read call (req %s/%lld, %u bytes @ "
202 (long long)NFS_FILEID(inode
),
204 (unsigned long long)data
->args
.offset
);
206 task
= rpc_run_task(&task_setup_data
);
208 return PTR_ERR(task
);
212 EXPORT_SYMBOL_GPL(nfs_initiate_read
);
215 * Set up the NFS read request struct
217 static void nfs_read_rpcsetup(struct nfs_page
*req
, struct nfs_read_data
*data
,
218 unsigned int count
, unsigned int offset
)
220 struct inode
*inode
= req
->wb_context
->dentry
->d_inode
;
224 data
->cred
= req
->wb_context
->cred
;
226 data
->args
.fh
= NFS_FH(inode
);
227 data
->args
.offset
= req_offset(req
) + offset
;
228 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
229 data
->args
.pages
= data
->pagevec
;
230 data
->args
.count
= count
;
231 data
->args
.context
= get_nfs_open_context(req
->wb_context
);
232 data
->args
.lock_context
= req
->wb_lock_context
;
234 data
->res
.fattr
= &data
->fattr
;
235 data
->res
.count
= count
;
237 nfs_fattr_init(&data
->fattr
);
240 static int nfs_do_read(struct nfs_read_data
*data
,
241 const struct rpc_call_ops
*call_ops
)
243 struct inode
*inode
= data
->args
.context
->dentry
->d_inode
;
245 return nfs_initiate_read(data
, NFS_CLIENT(inode
), call_ops
);
249 nfs_do_multiple_reads(struct list_head
*head
,
250 const struct rpc_call_ops
*call_ops
)
252 struct nfs_read_data
*data
;
255 while (!list_empty(head
)) {
258 data
= list_entry(head
->next
, struct nfs_read_data
, list
);
259 list_del_init(&data
->list
);
261 ret2
= nfs_do_read(data
, call_ops
);
269 nfs_async_read_error(struct list_head
*head
)
271 struct nfs_page
*req
;
273 while (!list_empty(head
)) {
274 req
= nfs_list_entry(head
->next
);
275 nfs_list_remove_request(req
);
276 nfs_readpage_release(req
);
281 * Generate multiple requests to fill a single page.
283 * We optimize to reduce the number of read operations on the wire. If we
284 * detect that we're reading a page, or an area of a page, that is past the
285 * end of file, we do not generate NFS read operations but just clear the
286 * parts of the page that would have come back zero from the server anyway.
288 * We rely on the cached value of i_size to make this determination; another
289 * client can fill pages on the server past our cached end-of-file, but we
290 * won't see the new data until our attribute cache is updated. This is more
291 * or less conventional NFS client behavior.
293 static int nfs_pagein_multi(struct nfs_pageio_descriptor
*desc
, struct list_head
*res
)
295 struct nfs_page
*req
= nfs_list_entry(desc
->pg_list
.next
);
296 struct page
*page
= req
->wb_page
;
297 struct nfs_read_data
*data
;
298 size_t rsize
= desc
->pg_bsize
, nbytes
;
303 nfs_list_remove_request(req
);
306 nbytes
= desc
->pg_count
;
308 size_t len
= min(nbytes
,rsize
);
310 data
= nfs_readdata_alloc(1);
313 data
->pagevec
[0] = page
;
314 nfs_read_rpcsetup(req
, data
, len
, offset
);
315 list_add(&data
->list
, res
);
319 } while(nbytes
!= 0);
320 atomic_set(&req
->wb_complete
, requests
);
321 desc
->pg_rpc_callops
= &nfs_read_partial_ops
;
324 while (!list_empty(res
)) {
325 data
= list_entry(res
->next
, struct nfs_read_data
, list
);
326 list_del(&data
->list
);
327 nfs_readdata_free(data
);
329 nfs_readpage_release(req
);
333 static int nfs_pagein_one(struct nfs_pageio_descriptor
*desc
, struct list_head
*res
)
335 struct nfs_page
*req
;
337 struct nfs_read_data
*data
;
338 struct list_head
*head
= &desc
->pg_list
;
341 data
= nfs_readdata_alloc(nfs_page_array_len(desc
->pg_base
,
344 nfs_async_read_error(head
);
349 pages
= data
->pagevec
;
350 while (!list_empty(head
)) {
351 req
= nfs_list_entry(head
->next
);
352 nfs_list_remove_request(req
);
353 nfs_list_add_request(req
, &data
->pages
);
354 *pages
++ = req
->wb_page
;
356 req
= nfs_list_entry(data
->pages
.next
);
358 nfs_read_rpcsetup(req
, data
, desc
->pg_count
, 0);
359 list_add(&data
->list
, res
);
360 desc
->pg_rpc_callops
= &nfs_read_full_ops
;
365 int nfs_generic_pagein(struct nfs_pageio_descriptor
*desc
, struct list_head
*head
)
367 if (desc
->pg_bsize
< PAGE_CACHE_SIZE
)
368 return nfs_pagein_multi(desc
, head
);
369 return nfs_pagein_one(desc
, head
);
372 static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor
*desc
)
377 ret
= nfs_generic_pagein(desc
, &head
);
379 ret
= nfs_do_multiple_reads(&head
, desc
->pg_rpc_callops
);
383 static const struct nfs_pageio_ops nfs_pageio_read_ops
= {
384 .pg_test
= nfs_generic_pg_test
,
385 .pg_doio
= nfs_generic_pg_readpages
,
389 * This is the callback from RPC telling us whether a reply was
390 * received or some error occurred (timeout or socket shutdown).
392 int nfs_readpage_result(struct rpc_task
*task
, struct nfs_read_data
*data
)
396 dprintk("NFS: %s: %5u, (status %d)\n", __func__
, task
->tk_pid
,
399 status
= NFS_PROTO(data
->inode
)->read_done(task
, data
);
403 nfs_add_stats(data
->inode
, NFSIOS_SERVERREADBYTES
, data
->res
.count
);
405 if (task
->tk_status
== -ESTALE
) {
406 set_bit(NFS_INO_STALE
, &NFS_I(data
->inode
)->flags
);
407 nfs_mark_for_revalidate(data
->inode
);
412 static void nfs_readpage_retry(struct rpc_task
*task
, struct nfs_read_data
*data
)
414 struct nfs_readargs
*argp
= &data
->args
;
415 struct nfs_readres
*resp
= &data
->res
;
417 if (resp
->eof
|| resp
->count
== argp
->count
)
420 /* This is a short read! */
421 nfs_inc_stats(data
->inode
, NFSIOS_SHORTREAD
);
422 /* Has the server at least made some progress? */
423 if (resp
->count
== 0)
426 /* Yes, so retry the read at the end of the data */
427 data
->mds_offset
+= resp
->count
;
428 argp
->offset
+= resp
->count
;
429 argp
->pgbase
+= resp
->count
;
430 argp
->count
-= resp
->count
;
431 rpc_restart_call_prepare(task
);
435 * Handle a read reply that fills part of a page.
437 static void nfs_readpage_result_partial(struct rpc_task
*task
, void *calldata
)
439 struct nfs_read_data
*data
= calldata
;
441 if (nfs_readpage_result(task
, data
) != 0)
443 if (task
->tk_status
< 0)
446 nfs_readpage_truncate_uninitialised_page(data
);
447 nfs_readpage_retry(task
, data
);
450 static void nfs_readpage_release_partial(void *calldata
)
452 struct nfs_read_data
*data
= calldata
;
453 struct nfs_page
*req
= data
->req
;
454 struct page
*page
= req
->wb_page
;
455 int status
= data
->task
.tk_status
;
458 set_bit(PG_PARTIAL_READ_FAILED
, &req
->wb_flags
);
460 if (atomic_dec_and_test(&req
->wb_complete
)) {
461 if (!test_bit(PG_PARTIAL_READ_FAILED
, &req
->wb_flags
))
462 SetPageUptodate(page
);
463 nfs_readpage_release(req
);
465 nfs_readdata_release(calldata
);
468 #if defined(CONFIG_NFS_V4_1)
469 void nfs_read_prepare(struct rpc_task
*task
, void *calldata
)
471 struct nfs_read_data
*data
= calldata
;
473 if (nfs4_setup_sequence(NFS_SERVER(data
->inode
),
474 &data
->args
.seq_args
, &data
->res
.seq_res
,
477 rpc_call_start(task
);
479 #endif /* CONFIG_NFS_V4_1 */
481 static const struct rpc_call_ops nfs_read_partial_ops
= {
482 #if defined(CONFIG_NFS_V4_1)
483 .rpc_call_prepare
= nfs_read_prepare
,
484 #endif /* CONFIG_NFS_V4_1 */
485 .rpc_call_done
= nfs_readpage_result_partial
,
486 .rpc_release
= nfs_readpage_release_partial
,
489 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data
*data
)
491 unsigned int count
= data
->res
.count
;
492 unsigned int base
= data
->args
.pgbase
;
496 count
= data
->args
.count
;
497 if (unlikely(count
== 0))
499 pages
= &data
->args
.pages
[base
>> PAGE_CACHE_SHIFT
];
500 base
&= ~PAGE_CACHE_MASK
;
502 for (;count
>= PAGE_CACHE_SIZE
; count
-= PAGE_CACHE_SIZE
, pages
++)
503 SetPageUptodate(*pages
);
506 /* Was this a short read? */
507 if (data
->res
.eof
|| data
->res
.count
== data
->args
.count
)
508 SetPageUptodate(*pages
);
512 * This is the callback from RPC telling us whether a reply was
513 * received or some error occurred (timeout or socket shutdown).
515 static void nfs_readpage_result_full(struct rpc_task
*task
, void *calldata
)
517 struct nfs_read_data
*data
= calldata
;
519 if (nfs_readpage_result(task
, data
) != 0)
521 if (task
->tk_status
< 0)
524 * Note: nfs_readpage_retry may change the values of
525 * data->args. In the multi-page case, we therefore need
526 * to ensure that we call nfs_readpage_set_pages_uptodate()
529 nfs_readpage_truncate_uninitialised_page(data
);
530 nfs_readpage_set_pages_uptodate(data
);
531 nfs_readpage_retry(task
, data
);
534 static void nfs_readpage_release_full(void *calldata
)
536 struct nfs_read_data
*data
= calldata
;
538 while (!list_empty(&data
->pages
)) {
539 struct nfs_page
*req
= nfs_list_entry(data
->pages
.next
);
541 nfs_list_remove_request(req
);
542 nfs_readpage_release(req
);
544 nfs_readdata_release(calldata
);
547 static const struct rpc_call_ops nfs_read_full_ops
= {
548 #if defined(CONFIG_NFS_V4_1)
549 .rpc_call_prepare
= nfs_read_prepare
,
550 #endif /* CONFIG_NFS_V4_1 */
551 .rpc_call_done
= nfs_readpage_result_full
,
552 .rpc_release
= nfs_readpage_release_full
,
556 * Read a page over NFS.
557 * We read the page synchronously in the following case:
558 * - The error flag is set for this page. This happens only when a
559 * previous async read operation failed.
561 int nfs_readpage(struct file
*file
, struct page
*page
)
563 struct nfs_open_context
*ctx
;
564 struct inode
*inode
= page
->mapping
->host
;
567 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
568 page
, PAGE_CACHE_SIZE
, page
->index
);
569 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGE
);
570 nfs_add_stats(inode
, NFSIOS_READPAGES
, 1);
573 * Try to flush any pending writes to the file..
575 * NOTE! Because we own the page lock, there cannot
576 * be any new pending writes generated at this point
577 * for this page (other pages can be written to).
579 error
= nfs_wb_page(inode
, page
);
582 if (PageUptodate(page
))
586 if (NFS_STALE(inode
))
591 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
595 ctx
= get_nfs_open_context(nfs_file_open_context(file
));
597 if (!IS_SYNC(inode
)) {
598 error
= nfs_readpage_from_fscache(ctx
, inode
, page
);
603 error
= nfs_readpage_async(ctx
, inode
, page
);
606 put_nfs_open_context(ctx
);
613 struct nfs_readdesc
{
614 struct nfs_pageio_descriptor
*pgio
;
615 struct nfs_open_context
*ctx
;
619 readpage_async_filler(void *data
, struct page
*page
)
621 struct nfs_readdesc
*desc
= (struct nfs_readdesc
*)data
;
622 struct inode
*inode
= page
->mapping
->host
;
623 struct nfs_page
*new;
627 len
= nfs_page_length(page
);
629 return nfs_return_empty_page(page
);
631 new = nfs_create_request(desc
->ctx
, inode
, page
, 0, len
);
635 if (len
< PAGE_CACHE_SIZE
)
636 zero_user_segment(page
, len
, PAGE_CACHE_SIZE
);
637 if (!nfs_pageio_add_request(desc
->pgio
, new)) {
638 error
= desc
->pgio
->pg_error
;
643 error
= PTR_ERR(new);
649 int nfs_readpages(struct file
*filp
, struct address_space
*mapping
,
650 struct list_head
*pages
, unsigned nr_pages
)
652 struct nfs_pageio_descriptor pgio
;
653 struct nfs_readdesc desc
= {
656 struct inode
*inode
= mapping
->host
;
657 unsigned long npages
;
660 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
662 (long long)NFS_FILEID(inode
),
664 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGES
);
666 if (NFS_STALE(inode
))
670 desc
.ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
671 if (desc
.ctx
== NULL
)
674 desc
.ctx
= get_nfs_open_context(nfs_file_open_context(filp
));
676 /* attempt to read as many of the pages as possible from the cache
677 * - this returns -ENOBUFS immediately if the cookie is negative
679 ret
= nfs_readpages_from_fscache(desc
.ctx
, inode
, mapping
,
682 goto read_complete
; /* all pages were read */
684 nfs_pageio_init_read(&pgio
, inode
);
686 ret
= read_cache_pages(mapping
, pages
, readpage_async_filler
, &desc
);
688 nfs_pageio_complete(&pgio
);
689 npages
= (pgio
.pg_bytes_written
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
690 nfs_add_stats(inode
, NFSIOS_READPAGES
, npages
);
692 put_nfs_open_context(desc
.ctx
);
697 int __init
nfs_init_readpagecache(void)
699 nfs_rdata_cachep
= kmem_cache_create("nfs_read_data",
700 sizeof(struct nfs_read_data
),
701 0, SLAB_HWCACHE_ALIGN
,
703 if (nfs_rdata_cachep
== NULL
)
709 void nfs_destroy_readpagecache(void)
711 kmem_cache_destroy(nfs_rdata_cachep
);