6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/module.h>
29 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
31 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops
;
32 static const struct nfs_rw_ops nfs_rw_read_ops
;
34 static struct kmem_cache
*nfs_rdata_cachep
;
36 static struct nfs_pgio_header
*nfs_readhdr_alloc(void)
38 return kmem_cache_zalloc(nfs_rdata_cachep
, GFP_KERNEL
);
41 static void nfs_readhdr_free(struct nfs_pgio_header
*rhdr
)
43 kmem_cache_free(nfs_rdata_cachep
, rhdr
);
47 int nfs_return_empty_page(struct page
*page
)
49 zero_user(page
, 0, PAGE_CACHE_SIZE
);
50 SetPageUptodate(page
);
55 void nfs_pageio_init_read(struct nfs_pageio_descriptor
*pgio
,
56 struct inode
*inode
, bool force_mds
,
57 const struct nfs_pgio_completion_ops
*compl_ops
)
59 struct nfs_server
*server
= NFS_SERVER(inode
);
60 const struct nfs_pageio_ops
*pg_ops
= &nfs_pgio_rw_ops
;
62 #ifdef CONFIG_NFS_V4_1
63 if (server
->pnfs_curr_ld
&& !force_mds
)
64 pg_ops
= server
->pnfs_curr_ld
->pg_read_ops
;
66 nfs_pageio_init(pgio
, inode
, pg_ops
, compl_ops
, &nfs_rw_read_ops
,
69 EXPORT_SYMBOL_GPL(nfs_pageio_init_read
);
71 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor
*pgio
)
73 struct nfs_pgio_mirror
*mirror
;
75 pgio
->pg_ops
= &nfs_pgio_rw_ops
;
77 /* read path should never have more than one mirror */
78 WARN_ON_ONCE(pgio
->pg_mirror_count
!= 1);
80 mirror
= &pgio
->pg_mirrors
[0];
81 mirror
->pg_bsize
= NFS_SERVER(pgio
->pg_inode
)->rsize
;
83 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds
);
85 int nfs_readpage_async(struct nfs_open_context
*ctx
, struct inode
*inode
,
90 struct nfs_pageio_descriptor pgio
;
91 struct nfs_pgio_mirror
*pgm
;
93 len
= nfs_page_length(page
);
95 return nfs_return_empty_page(page
);
96 new = nfs_create_request(ctx
, page
, NULL
, 0, len
);
101 if (len
< PAGE_CACHE_SIZE
)
102 zero_user_segment(page
, len
, PAGE_CACHE_SIZE
);
104 nfs_pageio_init_read(&pgio
, inode
, false,
105 &nfs_async_read_completion_ops
);
106 nfs_pageio_add_request(&pgio
, new);
107 nfs_pageio_complete(&pgio
);
109 /* It doesn't make sense to do mirrored reads! */
110 WARN_ON_ONCE(pgio
.pg_mirror_count
!= 1);
112 pgm
= &pgio
.pg_mirrors
[0];
113 NFS_I(inode
)->read_io
+= pgm
->pg_bytes_written
;
118 static void nfs_readpage_release(struct nfs_page
*req
)
120 struct inode
*d_inode
= req
->wb_context
->dentry
->d_inode
;
122 dprintk("NFS: read done (%s/%llu %d@%lld)\n", d_inode
->i_sb
->s_id
,
123 (unsigned long long)NFS_FILEID(d_inode
), req
->wb_bytes
,
124 (long long)req_offset(req
));
126 if (nfs_page_group_sync_on_bit(req
, PG_UNLOCKPAGE
)) {
127 if (PageUptodate(req
->wb_page
))
128 nfs_readpage_to_fscache(d_inode
, req
->wb_page
, 0);
130 unlock_page(req
->wb_page
);
132 nfs_release_request(req
);
135 static void nfs_page_group_set_uptodate(struct nfs_page
*req
)
137 if (nfs_page_group_sync_on_bit(req
, PG_UPTODATE
))
138 SetPageUptodate(req
->wb_page
);
141 static void nfs_read_completion(struct nfs_pgio_header
*hdr
)
143 unsigned long bytes
= 0;
145 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
147 while (!list_empty(&hdr
->pages
)) {
148 struct nfs_page
*req
= nfs_list_entry(hdr
->pages
.next
);
149 struct page
*page
= req
->wb_page
;
150 unsigned long start
= req
->wb_pgbase
;
151 unsigned long end
= req
->wb_pgbase
+ req
->wb_bytes
;
153 if (test_bit(NFS_IOHDR_EOF
, &hdr
->flags
)) {
154 /* note: regions of the page not covered by a
155 * request are zeroed in nfs_readpage_async /
156 * readpage_async_filler */
157 if (bytes
> hdr
->good_bytes
) {
158 /* nothing in this request was good, so zero
159 * the full extent of the request */
160 zero_user_segment(page
, start
, end
);
162 } else if (hdr
->good_bytes
- bytes
< req
->wb_bytes
) {
163 /* part of this request has good bytes, but
164 * not all. zero the bad bytes */
165 start
+= hdr
->good_bytes
- bytes
;
166 WARN_ON(start
< req
->wb_pgbase
);
167 zero_user_segment(page
, start
, end
);
170 bytes
+= req
->wb_bytes
;
171 if (test_bit(NFS_IOHDR_ERROR
, &hdr
->flags
)) {
172 if (bytes
<= hdr
->good_bytes
)
173 nfs_page_group_set_uptodate(req
);
175 nfs_page_group_set_uptodate(req
);
176 nfs_list_remove_request(req
);
177 nfs_readpage_release(req
);
183 static void nfs_initiate_read(struct nfs_pgio_header
*hdr
,
184 struct rpc_message
*msg
,
185 const struct nfs_rpc_ops
*rpc_ops
,
186 struct rpc_task_setup
*task_setup_data
, int how
)
188 struct inode
*inode
= hdr
->inode
;
189 int swap_flags
= IS_SWAPFILE(inode
) ? NFS_RPC_SWAPFLAGS
: 0;
191 task_setup_data
->flags
|= swap_flags
;
192 rpc_ops
->read_setup(hdr
, msg
);
196 nfs_async_read_error(struct list_head
*head
)
198 struct nfs_page
*req
;
200 while (!list_empty(head
)) {
201 req
= nfs_list_entry(head
->next
);
202 nfs_list_remove_request(req
);
203 nfs_readpage_release(req
);
207 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops
= {
208 .error_cleanup
= nfs_async_read_error
,
209 .completion
= nfs_read_completion
,
213 * This is the callback from RPC telling us whether a reply was
214 * received or some error occurred (timeout or socket shutdown).
216 static int nfs_readpage_done(struct rpc_task
*task
,
217 struct nfs_pgio_header
*hdr
,
220 int status
= NFS_PROTO(inode
)->read_done(task
, hdr
);
224 nfs_add_stats(inode
, NFSIOS_SERVERREADBYTES
, hdr
->res
.count
);
226 if (task
->tk_status
== -ESTALE
) {
227 set_bit(NFS_INO_STALE
, &NFS_I(inode
)->flags
);
228 nfs_mark_for_revalidate(inode
);
233 static void nfs_readpage_retry(struct rpc_task
*task
,
234 struct nfs_pgio_header
*hdr
)
236 struct nfs_pgio_args
*argp
= &hdr
->args
;
237 struct nfs_pgio_res
*resp
= &hdr
->res
;
239 /* This is a short read! */
240 nfs_inc_stats(hdr
->inode
, NFSIOS_SHORTREAD
);
241 /* Has the server at least made some progress? */
242 if (resp
->count
== 0) {
243 nfs_set_pgio_error(hdr
, -EIO
, argp
->offset
);
246 /* Yes, so retry the read at the end of the hdr */
247 hdr
->mds_offset
+= resp
->count
;
248 argp
->offset
+= resp
->count
;
249 argp
->pgbase
+= resp
->count
;
250 argp
->count
-= resp
->count
;
251 rpc_restart_call_prepare(task
);
254 static void nfs_readpage_result(struct rpc_task
*task
,
255 struct nfs_pgio_header
*hdr
)
260 bound
= hdr
->args
.offset
+ hdr
->res
.count
;
261 spin_lock(&hdr
->lock
);
262 if (bound
< hdr
->io_start
+ hdr
->good_bytes
) {
263 set_bit(NFS_IOHDR_EOF
, &hdr
->flags
);
264 clear_bit(NFS_IOHDR_ERROR
, &hdr
->flags
);
265 hdr
->good_bytes
= bound
- hdr
->io_start
;
267 spin_unlock(&hdr
->lock
);
268 } else if (hdr
->res
.count
!= hdr
->args
.count
)
269 nfs_readpage_retry(task
, hdr
);
273 * Read a page over NFS.
274 * We read the page synchronously in the following case:
275 * - The error flag is set for this page. This happens only when a
276 * previous async read operation failed.
278 int nfs_readpage(struct file
*file
, struct page
*page
)
280 struct nfs_open_context
*ctx
;
281 struct inode
*inode
= page_file_mapping(page
)->host
;
284 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
285 page
, PAGE_CACHE_SIZE
, page_file_index(page
));
286 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGE
);
287 nfs_inc_stats(inode
, NFSIOS_READPAGES
);
290 * Try to flush any pending writes to the file..
292 * NOTE! Because we own the page lock, there cannot
293 * be any new pending writes generated at this point
294 * for this page (other pages can be written to).
296 error
= nfs_wb_page(inode
, page
);
299 if (PageUptodate(page
))
303 if (NFS_STALE(inode
))
308 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
312 ctx
= get_nfs_open_context(nfs_file_open_context(file
));
314 if (!IS_SYNC(inode
)) {
315 error
= nfs_readpage_from_fscache(ctx
, inode
, page
);
320 error
= nfs_readpage_async(ctx
, inode
, page
);
323 put_nfs_open_context(ctx
);
330 struct nfs_readdesc
{
331 struct nfs_pageio_descriptor
*pgio
;
332 struct nfs_open_context
*ctx
;
336 readpage_async_filler(void *data
, struct page
*page
)
338 struct nfs_readdesc
*desc
= (struct nfs_readdesc
*)data
;
339 struct nfs_page
*new;
343 len
= nfs_page_length(page
);
345 return nfs_return_empty_page(page
);
347 new = nfs_create_request(desc
->ctx
, page
, NULL
, 0, len
);
351 if (len
< PAGE_CACHE_SIZE
)
352 zero_user_segment(page
, len
, PAGE_CACHE_SIZE
);
353 if (!nfs_pageio_add_request(desc
->pgio
, new)) {
354 error
= desc
->pgio
->pg_error
;
359 error
= PTR_ERR(new);
365 int nfs_readpages(struct file
*filp
, struct address_space
*mapping
,
366 struct list_head
*pages
, unsigned nr_pages
)
368 struct nfs_pageio_descriptor pgio
;
369 struct nfs_pgio_mirror
*pgm
;
370 struct nfs_readdesc desc
= {
373 struct inode
*inode
= mapping
->host
;
374 unsigned long npages
;
377 dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
379 (unsigned long long)NFS_FILEID(inode
),
381 nfs_inc_stats(inode
, NFSIOS_VFSREADPAGES
);
383 if (NFS_STALE(inode
))
387 desc
.ctx
= nfs_find_open_context(inode
, NULL
, FMODE_READ
);
388 if (desc
.ctx
== NULL
)
391 desc
.ctx
= get_nfs_open_context(nfs_file_open_context(filp
));
393 /* attempt to read as many of the pages as possible from the cache
394 * - this returns -ENOBUFS immediately if the cookie is negative
396 ret
= nfs_readpages_from_fscache(desc
.ctx
, inode
, mapping
,
399 goto read_complete
; /* all pages were read */
401 nfs_pageio_init_read(&pgio
, inode
, false,
402 &nfs_async_read_completion_ops
);
404 ret
= read_cache_pages(mapping
, pages
, readpage_async_filler
, &desc
);
405 nfs_pageio_complete(&pgio
);
407 /* It doesn't make sense to do mirrored reads! */
408 WARN_ON_ONCE(pgio
.pg_mirror_count
!= 1);
410 pgm
= &pgio
.pg_mirrors
[0];
411 NFS_I(inode
)->read_io
+= pgm
->pg_bytes_written
;
412 npages
= (pgm
->pg_bytes_written
+ PAGE_CACHE_SIZE
- 1) >>
414 nfs_add_stats(inode
, NFSIOS_READPAGES
, npages
);
416 put_nfs_open_context(desc
.ctx
);
421 int __init
nfs_init_readpagecache(void)
423 nfs_rdata_cachep
= kmem_cache_create("nfs_read_data",
424 sizeof(struct nfs_pgio_header
),
425 0, SLAB_HWCACHE_ALIGN
,
427 if (nfs_rdata_cachep
== NULL
)
433 void nfs_destroy_readpagecache(void)
435 kmem_cache_destroy(nfs_rdata_cachep
);
438 static const struct nfs_rw_ops nfs_rw_read_ops
= {
439 .rw_mode
= FMODE_READ
,
440 .rw_alloc_header
= nfs_readhdr_alloc
,
441 .rw_free_header
= nfs_readhdr_free
,
442 .rw_done
= nfs_readpage_done
,
443 .rw_result
= nfs_readpage_result
,
444 .rw_initiate
= nfs_initiate_read
,