x86: use _ASM_EXTABLE macro in arch/x86/lib/usercopy_32.c
[wrt350n-kernel.git] / fs / nfs / read.c
blob8fd6dfbe1bc3b8c248595a85fc5d3f3b86779ee3
1 /*
2 * linux/fs/nfs/read.c
4 * Block I/O for NFS
6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
8 */
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/smp_lock.h>
23 #include <asm/system.h>
25 #include "internal.h"
26 #include "iostat.h"
28 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
30 static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int);
31 static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int);
32 static const struct rpc_call_ops nfs_read_partial_ops;
33 static const struct rpc_call_ops nfs_read_full_ops;
35 static struct kmem_cache *nfs_rdata_cachep;
36 static mempool_t *nfs_rdata_mempool;
38 #define MIN_POOL_READ (32)
40 struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
42 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
44 if (p) {
45 memset(p, 0, sizeof(*p));
46 INIT_LIST_HEAD(&p->pages);
47 p->npages = pagecount;
48 if (pagecount <= ARRAY_SIZE(p->page_array))
49 p->pagevec = p->page_array;
50 else {
51 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
52 if (!p->pagevec) {
53 mempool_free(p, nfs_rdata_mempool);
54 p = NULL;
58 return p;
61 static void nfs_readdata_rcu_free(struct rcu_head *head)
63 struct nfs_read_data *p = container_of(head, struct nfs_read_data, task.u.tk_rcu);
64 if (p && (p->pagevec != &p->page_array[0]))
65 kfree(p->pagevec);
66 mempool_free(p, nfs_rdata_mempool);
69 static void nfs_readdata_free(struct nfs_read_data *rdata)
71 call_rcu_bh(&rdata->task.u.tk_rcu, nfs_readdata_rcu_free);
74 void nfs_readdata_release(void *data)
76 nfs_readdata_free(data);
79 static
80 int nfs_return_empty_page(struct page *page)
82 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
83 SetPageUptodate(page);
84 unlock_page(page);
85 return 0;
88 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
90 unsigned int remainder = data->args.count - data->res.count;
91 unsigned int base = data->args.pgbase + data->res.count;
92 unsigned int pglen;
93 struct page **pages;
95 if (data->res.eof == 0 || remainder == 0)
96 return;
98 * Note: "remainder" can never be negative, since we check for
99 * this in the XDR code.
101 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
102 base &= ~PAGE_CACHE_MASK;
103 pglen = PAGE_CACHE_SIZE - base;
104 for (;;) {
105 if (remainder <= pglen) {
106 zero_user_page(*pages, base, remainder, KM_USER0);
107 break;
109 zero_user_page(*pages, base, pglen, KM_USER0);
110 pages++;
111 remainder -= pglen;
112 pglen = PAGE_CACHE_SIZE;
113 base = 0;
117 static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
118 struct page *page)
120 LIST_HEAD(one_request);
121 struct nfs_page *new;
122 unsigned int len;
124 len = nfs_page_length(page);
125 if (len == 0)
126 return nfs_return_empty_page(page);
127 new = nfs_create_request(ctx, inode, page, 0, len);
128 if (IS_ERR(new)) {
129 unlock_page(page);
130 return PTR_ERR(new);
132 if (len < PAGE_CACHE_SIZE)
133 zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
135 nfs_list_add_request(new, &one_request);
136 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
137 nfs_pagein_multi(inode, &one_request, 1, len, 0);
138 else
139 nfs_pagein_one(inode, &one_request, 1, len, 0);
140 return 0;
143 static void nfs_readpage_release(struct nfs_page *req)
145 unlock_page(req->wb_page);
147 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
148 req->wb_context->path.dentry->d_inode->i_sb->s_id,
149 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
150 req->wb_bytes,
151 (long long)req_offset(req));
152 nfs_clear_request(req);
153 nfs_release_request(req);
157 * Set up the NFS read request struct
159 static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
160 const struct rpc_call_ops *call_ops,
161 unsigned int count, unsigned int offset)
163 struct inode *inode = req->wb_context->path.dentry->d_inode;
164 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
165 struct rpc_task *task;
166 struct rpc_message msg = {
167 .rpc_argp = &data->args,
168 .rpc_resp = &data->res,
169 .rpc_cred = req->wb_context->cred,
171 struct rpc_task_setup task_setup_data = {
172 .task = &data->task,
173 .rpc_client = NFS_CLIENT(inode),
174 .rpc_message = &msg,
175 .callback_ops = call_ops,
176 .callback_data = data,
177 .flags = RPC_TASK_ASYNC | swap_flags,
180 data->req = req;
181 data->inode = inode;
182 data->cred = msg.rpc_cred;
184 data->args.fh = NFS_FH(inode);
185 data->args.offset = req_offset(req) + offset;
186 data->args.pgbase = req->wb_pgbase + offset;
187 data->args.pages = data->pagevec;
188 data->args.count = count;
189 data->args.context = req->wb_context;
191 data->res.fattr = &data->fattr;
192 data->res.count = count;
193 data->res.eof = 0;
194 nfs_fattr_init(&data->fattr);
196 /* Set up the initial task struct. */
197 NFS_PROTO(inode)->read_setup(data, &msg);
199 dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
200 data->task.tk_pid,
201 inode->i_sb->s_id,
202 (long long)NFS_FILEID(inode),
203 count,
204 (unsigned long long)data->args.offset);
206 task = rpc_run_task(&task_setup_data);
207 if (!IS_ERR(task))
208 rpc_put_task(task);
211 static void
212 nfs_async_read_error(struct list_head *head)
214 struct nfs_page *req;
216 while (!list_empty(head)) {
217 req = nfs_list_entry(head->next);
218 nfs_list_remove_request(req);
219 SetPageError(req->wb_page);
220 nfs_readpage_release(req);
225 * Generate multiple requests to fill a single page.
227 * We optimize to reduce the number of read operations on the wire. If we
228 * detect that we're reading a page, or an area of a page, that is past the
229 * end of file, we do not generate NFS read operations but just clear the
230 * parts of the page that would have come back zero from the server anyway.
232 * We rely on the cached value of i_size to make this determination; another
233 * client can fill pages on the server past our cached end-of-file, but we
234 * won't see the new data until our attribute cache is updated. This is more
235 * or less conventional NFS client behavior.
237 static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
239 struct nfs_page *req = nfs_list_entry(head->next);
240 struct page *page = req->wb_page;
241 struct nfs_read_data *data;
242 size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
243 unsigned int offset;
244 int requests = 0;
245 LIST_HEAD(list);
247 nfs_list_remove_request(req);
249 nbytes = count;
250 do {
251 size_t len = min(nbytes,rsize);
253 data = nfs_readdata_alloc(1);
254 if (!data)
255 goto out_bad;
256 INIT_LIST_HEAD(&data->pages);
257 list_add(&data->pages, &list);
258 requests++;
259 nbytes -= len;
260 } while(nbytes != 0);
261 atomic_set(&req->wb_complete, requests);
263 ClearPageError(page);
264 offset = 0;
265 nbytes = count;
266 do {
267 data = list_entry(list.next, struct nfs_read_data, pages);
268 list_del_init(&data->pages);
270 data->pagevec[0] = page;
272 if (nbytes < rsize)
273 rsize = nbytes;
274 nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
275 rsize, offset);
276 offset += rsize;
277 nbytes -= rsize;
278 } while (nbytes != 0);
280 return 0;
282 out_bad:
283 while (!list_empty(&list)) {
284 data = list_entry(list.next, struct nfs_read_data, pages);
285 list_del(&data->pages);
286 nfs_readdata_free(data);
288 SetPageError(page);
289 nfs_readpage_release(req);
290 return -ENOMEM;
293 static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
295 struct nfs_page *req;
296 struct page **pages;
297 struct nfs_read_data *data;
299 data = nfs_readdata_alloc(npages);
300 if (!data)
301 goto out_bad;
303 INIT_LIST_HEAD(&data->pages);
304 pages = data->pagevec;
305 while (!list_empty(head)) {
306 req = nfs_list_entry(head->next);
307 nfs_list_remove_request(req);
308 nfs_list_add_request(req, &data->pages);
309 ClearPageError(req->wb_page);
310 *pages++ = req->wb_page;
312 req = nfs_list_entry(data->pages.next);
314 nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
315 return 0;
316 out_bad:
317 nfs_async_read_error(head);
318 return -ENOMEM;
322 * This is the callback from RPC telling us whether a reply was
323 * received or some error occurred (timeout or socket shutdown).
325 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
327 int status;
329 dprintk("NFS: %s: %5u, (status %d)\n", __FUNCTION__, task->tk_pid,
330 task->tk_status);
332 status = NFS_PROTO(data->inode)->read_done(task, data);
333 if (status != 0)
334 return status;
336 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
338 if (task->tk_status == -ESTALE) {
339 set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
340 nfs_mark_for_revalidate(data->inode);
342 return 0;
345 static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
347 struct nfs_readargs *argp = &data->args;
348 struct nfs_readres *resp = &data->res;
350 if (resp->eof || resp->count == argp->count)
351 return 0;
353 /* This is a short read! */
354 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
355 /* Has the server at least made some progress? */
356 if (resp->count == 0)
357 return 0;
359 /* Yes, so retry the read at the end of the data */
360 argp->offset += resp->count;
361 argp->pgbase += resp->count;
362 argp->count -= resp->count;
363 rpc_restart_call(task);
364 return -EAGAIN;
368 * Handle a read reply that fills part of a page.
370 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
372 struct nfs_read_data *data = calldata;
373 struct nfs_page *req = data->req;
374 struct page *page = req->wb_page;
376 if (nfs_readpage_result(task, data) != 0)
377 return;
379 if (likely(task->tk_status >= 0)) {
380 nfs_readpage_truncate_uninitialised_page(data);
381 if (nfs_readpage_retry(task, data) != 0)
382 return;
384 if (unlikely(task->tk_status < 0))
385 SetPageError(page);
386 if (atomic_dec_and_test(&req->wb_complete)) {
387 if (!PageError(page))
388 SetPageUptodate(page);
389 nfs_readpage_release(req);
393 static const struct rpc_call_ops nfs_read_partial_ops = {
394 .rpc_call_done = nfs_readpage_result_partial,
395 .rpc_release = nfs_readdata_release,
398 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
400 unsigned int count = data->res.count;
401 unsigned int base = data->args.pgbase;
402 struct page **pages;
404 if (data->res.eof)
405 count = data->args.count;
406 if (unlikely(count == 0))
407 return;
408 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
409 base &= ~PAGE_CACHE_MASK;
410 count += base;
411 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
412 SetPageUptodate(*pages);
413 if (count == 0)
414 return;
415 /* Was this a short read? */
416 if (data->res.eof || data->res.count == data->args.count)
417 SetPageUptodate(*pages);
421 * This is the callback from RPC telling us whether a reply was
422 * received or some error occurred (timeout or socket shutdown).
424 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
426 struct nfs_read_data *data = calldata;
428 if (nfs_readpage_result(task, data) != 0)
429 return;
431 * Note: nfs_readpage_retry may change the values of
432 * data->args. In the multi-page case, we therefore need
433 * to ensure that we call nfs_readpage_set_pages_uptodate()
434 * first.
436 if (likely(task->tk_status >= 0)) {
437 nfs_readpage_truncate_uninitialised_page(data);
438 nfs_readpage_set_pages_uptodate(data);
439 if (nfs_readpage_retry(task, data) != 0)
440 return;
442 while (!list_empty(&data->pages)) {
443 struct nfs_page *req = nfs_list_entry(data->pages.next);
445 nfs_list_remove_request(req);
446 nfs_readpage_release(req);
450 static const struct rpc_call_ops nfs_read_full_ops = {
451 .rpc_call_done = nfs_readpage_result_full,
452 .rpc_release = nfs_readdata_release,
456 * Read a page over NFS.
457 * We read the page synchronously in the following case:
458 * - The error flag is set for this page. This happens only when a
459 * previous async read operation failed.
461 int nfs_readpage(struct file *file, struct page *page)
463 struct nfs_open_context *ctx;
464 struct inode *inode = page->mapping->host;
465 int error;
467 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
468 page, PAGE_CACHE_SIZE, page->index);
469 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
470 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
473 * Try to flush any pending writes to the file..
475 * NOTE! Because we own the page lock, there cannot
476 * be any new pending writes generated at this point
477 * for this page (other pages can be written to).
479 error = nfs_wb_page(inode, page);
480 if (error)
481 goto out_unlock;
482 if (PageUptodate(page))
483 goto out_unlock;
485 error = -ESTALE;
486 if (NFS_STALE(inode))
487 goto out_unlock;
489 if (file == NULL) {
490 error = -EBADF;
491 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
492 if (ctx == NULL)
493 goto out_unlock;
494 } else
495 ctx = get_nfs_open_context(nfs_file_open_context(file));
497 error = nfs_readpage_async(ctx, inode, page);
499 put_nfs_open_context(ctx);
500 return error;
501 out_unlock:
502 unlock_page(page);
503 return error;
506 struct nfs_readdesc {
507 struct nfs_pageio_descriptor *pgio;
508 struct nfs_open_context *ctx;
511 static int
512 readpage_async_filler(void *data, struct page *page)
514 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
515 struct inode *inode = page->mapping->host;
516 struct nfs_page *new;
517 unsigned int len;
518 int error;
520 error = nfs_wb_page(inode, page);
521 if (error)
522 goto out_unlock;
523 if (PageUptodate(page))
524 goto out_unlock;
526 len = nfs_page_length(page);
527 if (len == 0)
528 return nfs_return_empty_page(page);
530 new = nfs_create_request(desc->ctx, inode, page, 0, len);
531 if (IS_ERR(new))
532 goto out_error;
534 if (len < PAGE_CACHE_SIZE)
535 zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
536 nfs_pageio_add_request(desc->pgio, new);
537 return 0;
538 out_error:
539 error = PTR_ERR(new);
540 SetPageError(page);
541 out_unlock:
542 unlock_page(page);
543 return error;
546 int nfs_readpages(struct file *filp, struct address_space *mapping,
547 struct list_head *pages, unsigned nr_pages)
549 struct nfs_pageio_descriptor pgio;
550 struct nfs_readdesc desc = {
551 .pgio = &pgio,
553 struct inode *inode = mapping->host;
554 struct nfs_server *server = NFS_SERVER(inode);
555 size_t rsize = server->rsize;
556 unsigned long npages;
557 int ret = -ESTALE;
559 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
560 inode->i_sb->s_id,
561 (long long)NFS_FILEID(inode),
562 nr_pages);
563 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
565 if (NFS_STALE(inode))
566 goto out;
568 if (filp == NULL) {
569 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
570 if (desc.ctx == NULL)
571 return -EBADF;
572 } else
573 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
574 if (rsize < PAGE_CACHE_SIZE)
575 nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
576 else
577 nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0);
579 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
581 nfs_pageio_complete(&pgio);
582 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
583 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
584 put_nfs_open_context(desc.ctx);
585 out:
586 return ret;
589 int __init nfs_init_readpagecache(void)
591 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
592 sizeof(struct nfs_read_data),
593 0, SLAB_HWCACHE_ALIGN,
594 NULL);
595 if (nfs_rdata_cachep == NULL)
596 return -ENOMEM;
598 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
599 nfs_rdata_cachep);
600 if (nfs_rdata_mempool == NULL)
601 return -ENOMEM;
603 return 0;
606 void nfs_destroy_readpagecache(void)
608 mempool_destroy(nfs_rdata_mempool);
609 kmem_cache_destroy(nfs_rdata_cachep);