x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / fs / nfs / fscache.c
blob70fad69eb9593a41894102164e26db60eb13fa56
1 /* NFS filesystem cache interface
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_fs_sb.h>
18 #include <linux/in6.h>
19 #include <linux/seq_file.h>
21 #include "internal.h"
22 #include "iostat.h"
23 #include "fscache.h"
25 #define NFSDBG_FACILITY NFSDBG_FSCACHE
27 static struct rb_root nfs_fscache_keys = RB_ROOT;
28 static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
31 * Get the per-client index cookie for an NFS client if the appropriate mount
32 * flag was set
33 * - We always try and get an index cookie for the client, but get filehandle
34 * cookies on a per-superblock basis, depending on the mount flags
36 void nfs_fscache_get_client_cookie(struct nfs_client *clp)
38 /* create a cache index for looking up filehandles */
39 clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
40 &nfs_fscache_server_index_def,
41 clp);
42 dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
43 clp, clp->fscache);
47 * Dispose of a per-client cookie
49 void nfs_fscache_release_client_cookie(struct nfs_client *clp)
51 dfprintk(FSCACHE, "NFS: releasing client cookie (0x%p/0x%p)\n",
52 clp, clp->fscache);
54 fscache_relinquish_cookie(clp->fscache, 0);
55 clp->fscache = NULL;
59 * Get the cache cookie for an NFS superblock. We have to handle
60 * uniquification here because the cache doesn't do it for us.
62 * The default uniquifier is just an empty string, but it may be overridden
63 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
64 * superblock across an automount point of some nature.
66 void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq,
67 struct nfs_clone_mount *mntdata)
69 struct nfs_fscache_key *key, *xkey;
70 struct nfs_server *nfss = NFS_SB(sb);
71 struct rb_node **p, *parent;
72 int diff, ulen;
74 if (uniq) {
75 ulen = strlen(uniq);
76 } else if (mntdata) {
77 struct nfs_server *mnt_s = NFS_SB(mntdata->sb);
78 if (mnt_s->fscache_key) {
79 uniq = mnt_s->fscache_key->key.uniquifier;
80 ulen = mnt_s->fscache_key->key.uniq_len;
84 if (!uniq) {
85 uniq = "";
86 ulen = 1;
89 key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL);
90 if (!key)
91 return;
93 key->nfs_client = nfss->nfs_client;
94 key->key.super.s_flags = sb->s_flags & NFS_MS_MASK;
95 key->key.nfs_server.flags = nfss->flags;
96 key->key.nfs_server.rsize = nfss->rsize;
97 key->key.nfs_server.wsize = nfss->wsize;
98 key->key.nfs_server.acregmin = nfss->acregmin;
99 key->key.nfs_server.acregmax = nfss->acregmax;
100 key->key.nfs_server.acdirmin = nfss->acdirmin;
101 key->key.nfs_server.acdirmax = nfss->acdirmax;
102 key->key.nfs_server.fsid = nfss->fsid;
103 key->key.rpc_auth.au_flavor = nfss->client->cl_auth->au_flavor;
105 key->key.uniq_len = ulen;
106 memcpy(key->key.uniquifier, uniq, ulen);
108 spin_lock(&nfs_fscache_keys_lock);
109 p = &nfs_fscache_keys.rb_node;
110 parent = NULL;
111 while (*p) {
112 parent = *p;
113 xkey = rb_entry(parent, struct nfs_fscache_key, node);
115 if (key->nfs_client < xkey->nfs_client)
116 goto go_left;
117 if (key->nfs_client > xkey->nfs_client)
118 goto go_right;
120 diff = memcmp(&key->key, &xkey->key, sizeof(key->key));
121 if (diff < 0)
122 goto go_left;
123 if (diff > 0)
124 goto go_right;
126 if (key->key.uniq_len == 0)
127 goto non_unique;
128 diff = memcmp(key->key.uniquifier,
129 xkey->key.uniquifier,
130 key->key.uniq_len);
131 if (diff < 0)
132 goto go_left;
133 if (diff > 0)
134 goto go_right;
135 goto non_unique;
137 go_left:
138 p = &(*p)->rb_left;
139 continue;
140 go_right:
141 p = &(*p)->rb_right;
144 rb_link_node(&key->node, parent, p);
145 rb_insert_color(&key->node, &nfs_fscache_keys);
146 spin_unlock(&nfs_fscache_keys_lock);
147 nfss->fscache_key = key;
149 /* create a cache index for looking up filehandles */
150 nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
151 &nfs_fscache_super_index_def,
152 nfss);
153 dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
154 nfss, nfss->fscache);
155 return;
157 non_unique:
158 spin_unlock(&nfs_fscache_keys_lock);
159 kfree(key);
160 nfss->fscache_key = NULL;
161 nfss->fscache = NULL;
162 printk(KERN_WARNING "NFS:"
163 " Cache request denied due to non-unique superblock keys\n");
167 * release a per-superblock cookie
169 void nfs_fscache_release_super_cookie(struct super_block *sb)
171 struct nfs_server *nfss = NFS_SB(sb);
173 dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n",
174 nfss, nfss->fscache);
176 fscache_relinquish_cookie(nfss->fscache, 0);
177 nfss->fscache = NULL;
179 if (nfss->fscache_key) {
180 spin_lock(&nfs_fscache_keys_lock);
181 rb_erase(&nfss->fscache_key->node, &nfs_fscache_keys);
182 spin_unlock(&nfs_fscache_keys_lock);
183 kfree(nfss->fscache_key);
184 nfss->fscache_key = NULL;
189 * Initialise the per-inode cache cookie pointer for an NFS inode.
191 void nfs_fscache_init_inode_cookie(struct inode *inode)
193 NFS_I(inode)->fscache = NULL;
194 if (S_ISREG(inode->i_mode))
195 set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
199 * Get the per-inode cache cookie for an NFS inode.
201 static void nfs_fscache_enable_inode_cookie(struct inode *inode)
203 struct super_block *sb = inode->i_sb;
204 struct nfs_inode *nfsi = NFS_I(inode);
206 if (nfsi->fscache || !NFS_FSCACHE(inode))
207 return;
209 if ((NFS_SB(sb)->options & NFS_OPTION_FSCACHE)) {
210 nfsi->fscache = fscache_acquire_cookie(
211 NFS_SB(sb)->fscache,
212 &nfs_fscache_inode_object_def,
213 nfsi);
215 dfprintk(FSCACHE, "NFS: get FH cookie (0x%p/0x%p/0x%p)\n",
216 sb, nfsi, nfsi->fscache);
221 * Release a per-inode cookie.
223 void nfs_fscache_release_inode_cookie(struct inode *inode)
225 struct nfs_inode *nfsi = NFS_I(inode);
227 dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n",
228 nfsi, nfsi->fscache);
230 fscache_relinquish_cookie(nfsi->fscache, 0);
231 nfsi->fscache = NULL;
235 * Retire a per-inode cookie, destroying the data attached to it.
237 void nfs_fscache_zap_inode_cookie(struct inode *inode)
239 struct nfs_inode *nfsi = NFS_I(inode);
241 dfprintk(FSCACHE, "NFS: zapping cookie (0x%p/0x%p)\n",
242 nfsi, nfsi->fscache);
244 fscache_relinquish_cookie(nfsi->fscache, 1);
245 nfsi->fscache = NULL;
249 * Turn off the cache with regard to a per-inode cookie if opened for writing,
250 * invalidating all the pages in the page cache relating to the associated
251 * inode to clear the per-page caching.
253 static void nfs_fscache_disable_inode_cookie(struct inode *inode)
255 clear_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
257 if (NFS_I(inode)->fscache) {
258 dfprintk(FSCACHE,
259 "NFS: nfsi 0x%p turning cache off\n", NFS_I(inode));
261 /* Need to invalidate any mapped pages that were read in before
262 * turning off the cache.
264 if (inode->i_mapping && inode->i_mapping->nrpages)
265 invalidate_inode_pages2(inode->i_mapping);
267 nfs_fscache_zap_inode_cookie(inode);
272 * wait_on_bit() sleep function for uninterruptible waiting
274 static int nfs_fscache_wait_bit(void *flags)
276 schedule();
277 return 0;
281 * Lock against someone else trying to also acquire or relinquish a cookie
283 static inline void nfs_fscache_inode_lock(struct inode *inode)
285 struct nfs_inode *nfsi = NFS_I(inode);
287 while (test_and_set_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags))
288 wait_on_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK,
289 nfs_fscache_wait_bit, TASK_UNINTERRUPTIBLE);
293 * Unlock cookie management lock
295 static inline void nfs_fscache_inode_unlock(struct inode *inode)
297 struct nfs_inode *nfsi = NFS_I(inode);
299 smp_mb__before_clear_bit();
300 clear_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags);
301 smp_mb__after_clear_bit();
302 wake_up_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK);
306 * Decide if we should enable or disable local caching for this inode.
307 * - For now, with NFS, only regular files that are open read-only will be able
308 * to use the cache.
309 * - May be invoked multiple times in parallel by parallel nfs_open() functions.
311 void nfs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
313 if (NFS_FSCACHE(inode)) {
314 nfs_fscache_inode_lock(inode);
315 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
316 nfs_fscache_disable_inode_cookie(inode);
317 else
318 nfs_fscache_enable_inode_cookie(inode);
319 nfs_fscache_inode_unlock(inode);
324 * Replace a per-inode cookie due to revalidation detecting a file having
325 * changed on the server.
327 void nfs_fscache_reset_inode_cookie(struct inode *inode)
329 struct nfs_inode *nfsi = NFS_I(inode);
330 struct nfs_server *nfss = NFS_SERVER(inode);
331 struct fscache_cookie *old = nfsi->fscache;
333 nfs_fscache_inode_lock(inode);
334 if (nfsi->fscache) {
335 /* retire the current fscache cache and get a new one */
336 fscache_relinquish_cookie(nfsi->fscache, 1);
338 nfsi->fscache = fscache_acquire_cookie(
339 nfss->nfs_client->fscache,
340 &nfs_fscache_inode_object_def,
341 nfsi);
343 dfprintk(FSCACHE,
344 "NFS: revalidation new cookie (0x%p/0x%p/0x%p/0x%p)\n",
345 nfss, nfsi, old, nfsi->fscache);
347 nfs_fscache_inode_unlock(inode);
351 * Release the caching state associated with a page, if the page isn't busy
352 * interacting with the cache.
353 * - Returns true (can release page) or false (page busy).
355 int nfs_fscache_release_page(struct page *page, gfp_t gfp)
357 struct nfs_inode *nfsi = NFS_I(page->mapping->host);
358 struct fscache_cookie *cookie = nfsi->fscache;
360 BUG_ON(!cookie);
362 if (fscache_check_page_write(cookie, page)) {
363 if (!(gfp & __GFP_WAIT))
364 return 0;
365 fscache_wait_on_page_write(cookie, page);
368 if (PageFsCache(page)) {
369 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
370 cookie, page, nfsi);
372 fscache_uncache_page(cookie, page);
373 nfs_add_fscache_stats(page->mapping->host,
374 NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
377 return 1;
381 * Release the caching state associated with a page if undergoing complete page
382 * invalidation.
384 void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
386 struct nfs_inode *nfsi = NFS_I(inode);
387 struct fscache_cookie *cookie = nfsi->fscache;
389 BUG_ON(!cookie);
391 dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
392 cookie, page, nfsi);
394 fscache_wait_on_page_write(cookie, page);
396 BUG_ON(!PageLocked(page));
397 fscache_uncache_page(cookie, page);
398 nfs_add_fscache_stats(page->mapping->host,
399 NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
403 * Handle completion of a page being read from the cache.
404 * - Called in process (keventd) context.
406 static void nfs_readpage_from_fscache_complete(struct page *page,
407 void *context,
408 int error)
410 dfprintk(FSCACHE,
411 "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
412 page, context, error);
414 /* if the read completes with an error, we just unlock the page and let
415 * the VM reissue the readpage */
416 if (!error) {
417 SetPageUptodate(page);
418 unlock_page(page);
419 } else {
420 error = nfs_readpage_async(context, page->mapping->host, page);
421 if (error)
422 unlock_page(page);
427 * Retrieve a page from fscache
429 int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
430 struct inode *inode, struct page *page)
432 int ret;
434 dfprintk(FSCACHE,
435 "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
436 NFS_I(inode)->fscache, page, page->index, page->flags, inode);
438 ret = fscache_read_or_alloc_page(NFS_I(inode)->fscache,
439 page,
440 nfs_readpage_from_fscache_complete,
441 ctx,
442 GFP_KERNEL);
444 switch (ret) {
445 case 0: /* read BIO submitted (page in fscache) */
446 dfprintk(FSCACHE,
447 "NFS: readpage_from_fscache: BIO submitted\n");
448 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK, 1);
449 return ret;
451 case -ENOBUFS: /* inode not in cache */
452 case -ENODATA: /* page not in cache */
453 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, 1);
454 dfprintk(FSCACHE,
455 "NFS: readpage_from_fscache %d\n", ret);
456 return 1;
458 default:
459 dfprintk(FSCACHE, "NFS: readpage_from_fscache %d\n", ret);
460 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, 1);
462 return ret;
466 * Retrieve a set of pages from fscache
468 int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
469 struct inode *inode,
470 struct address_space *mapping,
471 struct list_head *pages,
472 unsigned *nr_pages)
474 int ret, npages = *nr_pages;
476 dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
477 NFS_I(inode)->fscache, npages, inode);
479 ret = fscache_read_or_alloc_pages(NFS_I(inode)->fscache,
480 mapping, pages, nr_pages,
481 nfs_readpage_from_fscache_complete,
482 ctx,
483 mapping_gfp_mask(mapping));
484 if (*nr_pages < npages)
485 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK,
486 npages);
487 if (*nr_pages > 0)
488 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL,
489 *nr_pages);
491 switch (ret) {
492 case 0: /* read submitted to the cache for all pages */
493 BUG_ON(!list_empty(pages));
494 BUG_ON(*nr_pages != 0);
495 dfprintk(FSCACHE,
496 "NFS: nfs_getpages_from_fscache: submitted\n");
498 return ret;
500 case -ENOBUFS: /* some pages aren't cached and can't be */
501 case -ENODATA: /* some pages aren't cached */
502 dfprintk(FSCACHE,
503 "NFS: nfs_getpages_from_fscache: no page: %d\n", ret);
504 return 1;
506 default:
507 dfprintk(FSCACHE,
508 "NFS: nfs_getpages_from_fscache: ret %d\n", ret);
511 return ret;
515 * Store a newly fetched page in fscache
516 * - PG_fscache must be set on the page
518 void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
520 int ret;
522 dfprintk(FSCACHE,
523 "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
524 NFS_I(inode)->fscache, page, page->index, page->flags, sync);
526 ret = fscache_write_page(NFS_I(inode)->fscache, page, GFP_KERNEL);
527 dfprintk(FSCACHE,
528 "NFS: readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
529 page, page->index, page->flags, ret);
531 if (ret != 0) {
532 fscache_uncache_page(NFS_I(inode)->fscache, page);
533 nfs_add_fscache_stats(inode,
534 NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, 1);
535 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
536 } else {
537 nfs_add_fscache_stats(inode,
538 NFSIOS_FSCACHE_PAGES_WRITTEN_OK, 1);