1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* NFS filesystem cache interface
4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
12 #include <linux/nfs_fs.h>
13 #include <linux/nfs_fs_sb.h>
14 #include <linux/in6.h>
15 #include <linux/seq_file.h>
16 #include <linux/slab.h>
17 #include <linux/iversion.h>
18 #include <linux/xarray.h>
19 #include <linux/fscache.h>
20 #include <linux/netfs.h>
27 #define NFS_MAX_KEY_LEN 1000
29 static bool nfs_append_int(char *key
, int *_len
, unsigned long long x
)
31 if (*_len
> NFS_MAX_KEY_LEN
)
36 *_len
+= sprintf(key
+ *_len
, ",%llx", x
);
41 * Get the per-client index cookie for an NFS client if the appropriate mount
43 * - We always try and get an index cookie for the client, but get filehandle
44 * cookies on a per-superblock basis, depending on the mount flags
46 static bool nfs_fscache_get_client_key(struct nfs_client
*clp
,
49 const struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) &clp
->cl_addr
;
50 const struct sockaddr_in
*sin
= (struct sockaddr_in
*) &clp
->cl_addr
;
52 *_len
+= snprintf(key
+ *_len
, NFS_MAX_KEY_LEN
- *_len
,
54 clp
->rpc_ops
->version
,
56 clp
->cl_addr
.ss_family
);
58 switch (clp
->cl_addr
.ss_family
) {
60 if (!nfs_append_int(key
, _len
, sin
->sin_port
) ||
61 !nfs_append_int(key
, _len
, sin
->sin_addr
.s_addr
))
66 if (!nfs_append_int(key
, _len
, sin6
->sin6_port
) ||
67 !nfs_append_int(key
, _len
, sin6
->sin6_addr
.s6_addr32
[0]) ||
68 !nfs_append_int(key
, _len
, sin6
->sin6_addr
.s6_addr32
[1]) ||
69 !nfs_append_int(key
, _len
, sin6
->sin6_addr
.s6_addr32
[2]) ||
70 !nfs_append_int(key
, _len
, sin6
->sin6_addr
.s6_addr32
[3]))
75 printk(KERN_WARNING
"NFS: Unknown network family '%d'\n",
76 clp
->cl_addr
.ss_family
);
82 * Get the cache cookie for an NFS superblock.
84 * The default uniquifier is just an empty string, but it may be overridden
85 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
86 * superblock across an automount point of some nature.
88 int nfs_fscache_get_super_cookie(struct super_block
*sb
, const char *uniq
, int ulen
)
90 struct fscache_volume
*vcookie
;
91 struct nfs_server
*nfss
= NFS_SB(sb
);
96 nfss
->fscache_uniq
= kmemdup_nul(uniq
, ulen
, GFP_KERNEL
);
97 if (!nfss
->fscache_uniq
)
101 key
= kmalloc(NFS_MAX_KEY_LEN
+ 24, GFP_KERNEL
);
105 memcpy(key
, "nfs", 3);
106 if (!nfs_fscache_get_client_key(nfss
->nfs_client
, key
, &len
) ||
107 !nfs_append_int(key
, &len
, nfss
->fsid
.major
) ||
108 !nfs_append_int(key
, &len
, nfss
->fsid
.minor
) ||
109 !nfs_append_int(key
, &len
, sb
->s_flags
& NFS_SB_MASK
) ||
110 !nfs_append_int(key
, &len
, nfss
->flags
) ||
111 !nfs_append_int(key
, &len
, nfss
->rsize
) ||
112 !nfs_append_int(key
, &len
, nfss
->wsize
) ||
113 !nfs_append_int(key
, &len
, nfss
->acregmin
) ||
114 !nfs_append_int(key
, &len
, nfss
->acregmax
) ||
115 !nfs_append_int(key
, &len
, nfss
->acdirmin
) ||
116 !nfs_append_int(key
, &len
, nfss
->acdirmax
) ||
117 !nfs_append_int(key
, &len
, nfss
->client
->cl_auth
->au_flavor
))
121 if (ulen
> NFS_MAX_KEY_LEN
- len
)
124 memcpy(key
+ len
, uniq
, ulen
);
129 /* create a cache index for looking up filehandles */
130 vcookie
= fscache_acquire_volume(key
,
131 NULL
, /* preferred_cache */
132 NULL
, 0 /* coherency_data */);
133 if (IS_ERR(vcookie
)) {
134 if (vcookie
!= ERR_PTR(-EBUSY
)) {
136 return PTR_ERR(vcookie
);
138 pr_err("NFS: Cache volume key already in use (%s)\n", key
);
141 nfss
->fscache
= vcookie
;
149 * release a per-superblock cookie
151 void nfs_fscache_release_super_cookie(struct super_block
*sb
)
153 struct nfs_server
*nfss
= NFS_SB(sb
);
155 fscache_relinquish_volume(nfss
->fscache
, NULL
, false);
156 nfss
->fscache
= NULL
;
157 kfree(nfss
->fscache_uniq
);
161 * Initialise the per-inode cache cookie pointer for an NFS inode.
163 void nfs_fscache_init_inode(struct inode
*inode
)
165 struct nfs_fscache_inode_auxdata auxdata
;
166 struct nfs_server
*nfss
= NFS_SERVER(inode
);
167 struct nfs_inode
*nfsi
= NFS_I(inode
);
169 netfs_inode(inode
)->cache
= NULL
;
170 if (!(nfss
->fscache
&& S_ISREG(inode
->i_mode
)))
173 nfs_fscache_update_auxdata(&auxdata
, inode
);
175 netfs_inode(inode
)->cache
= fscache_acquire_cookie(
178 nfsi
->fh
.data
, /* index_key */
180 &auxdata
, /* aux_data */
184 if (netfs_inode(inode
)->cache
)
185 mapping_set_release_always(inode
->i_mapping
);
189 * Release a per-inode cookie.
191 void nfs_fscache_clear_inode(struct inode
*inode
)
193 fscache_relinquish_cookie(netfs_i_cookie(netfs_inode(inode
)), false);
194 netfs_inode(inode
)->cache
= NULL
;
198 * Enable or disable caching for a file that is being opened as appropriate.
199 * The cookie is allocated when the inode is initialised, but is not enabled at
200 * that time. Enablement is deferred to file-open time to avoid stat() and
201 * access() thrashing the cache.
203 * For now, with NFS, only regular files that are open read-only will be able
206 * We enable the cache for an inode if we open it read-only and it isn't
207 * currently open for writing. We disable the cache if the inode is open
210 * The caller uses the file struct to pin i_writecount on the inode before
211 * calling us when a file is opened for writing, so we can make use of that.
213 * Note that this may be invoked multiple times in parallel by parallel
214 * nfs_open() functions.
216 void nfs_fscache_open_file(struct inode
*inode
, struct file
*filp
)
218 struct nfs_fscache_inode_auxdata auxdata
;
219 struct fscache_cookie
*cookie
= netfs_i_cookie(netfs_inode(inode
));
220 bool open_for_write
= inode_is_open_for_write(inode
);
222 if (!fscache_cookie_valid(cookie
))
225 fscache_use_cookie(cookie
, open_for_write
);
226 if (open_for_write
) {
227 nfs_fscache_update_auxdata(&auxdata
, inode
);
228 fscache_invalidate(cookie
, &auxdata
, i_size_read(inode
),
229 FSCACHE_INVAL_DIO_WRITE
);
232 EXPORT_SYMBOL_GPL(nfs_fscache_open_file
);
234 void nfs_fscache_release_file(struct inode
*inode
, struct file
*filp
)
236 struct nfs_fscache_inode_auxdata auxdata
;
237 struct fscache_cookie
*cookie
= netfs_i_cookie(netfs_inode(inode
));
238 loff_t i_size
= i_size_read(inode
);
240 nfs_fscache_update_auxdata(&auxdata
, inode
);
241 fscache_unuse_cookie(cookie
, &auxdata
, &i_size
);
244 int nfs_netfs_read_folio(struct file
*file
, struct folio
*folio
)
246 if (!netfs_inode(folio_inode(folio
))->cache
)
249 return netfs_read_folio(file
, folio
);
252 int nfs_netfs_readahead(struct readahead_control
*ractl
)
254 struct inode
*inode
= ractl
->mapping
->host
;
256 if (!netfs_inode(inode
)->cache
)
259 netfs_readahead(ractl
);
263 static atomic_t nfs_netfs_debug_id
;
264 static int nfs_netfs_init_request(struct netfs_io_request
*rreq
, struct file
*file
)
267 if (WARN_ON_ONCE(rreq
->origin
!= NETFS_PGPRIV2_COPY_TO_CACHE
))
272 rreq
->netfs_priv
= get_nfs_open_context(nfs_file_open_context(file
));
273 rreq
->debug_id
= atomic_inc_return(&nfs_netfs_debug_id
);
274 /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
275 __set_bit(NETFS_RREQ_USE_PGPRIV2
, &rreq
->flags
);
276 rreq
->io_streams
[0].sreq_max_len
= NFS_SB(rreq
->inode
->i_sb
)->rsize
;
281 static void nfs_netfs_free_request(struct netfs_io_request
*rreq
)
283 if (rreq
->netfs_priv
)
284 put_nfs_open_context(rreq
->netfs_priv
);
287 static struct nfs_netfs_io_data
*nfs_netfs_alloc(struct netfs_io_subrequest
*sreq
)
289 struct nfs_netfs_io_data
*netfs
;
291 netfs
= kzalloc(sizeof(*netfs
), GFP_KERNEL_ACCOUNT
);
295 refcount_set(&netfs
->refcount
, 1);
299 static void nfs_netfs_issue_read(struct netfs_io_subrequest
*sreq
)
301 struct nfs_netfs_io_data
*netfs
;
302 struct nfs_pageio_descriptor pgio
;
303 struct inode
*inode
= sreq
->rreq
->inode
;
304 struct nfs_open_context
*ctx
= sreq
->rreq
->netfs_priv
;
310 start
= (sreq
->start
+ sreq
->transferred
) >> PAGE_SHIFT
;
311 last
= ((sreq
->start
+ sreq
->len
- sreq
->transferred
- 1) >> PAGE_SHIFT
);
313 nfs_pageio_init_read(&pgio
, inode
, false,
314 &nfs_async_read_completion_ops
);
316 netfs
= nfs_netfs_alloc(sreq
);
318 return netfs_read_subreq_terminated(sreq
, -ENOMEM
, false);
320 pgio
.pg_netfs
= netfs
; /* used in completion */
322 xa_for_each_range(&sreq
->rreq
->mapping
->i_pages
, idx
, page
, start
, last
) {
323 /* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */
324 err
= nfs_read_add_folio(&pgio
, ctx
, page_folio(page
));
331 nfs_pageio_complete_read(&pgio
);
332 nfs_netfs_put(netfs
);
335 void nfs_netfs_initiate_read(struct nfs_pgio_header
*hdr
)
337 struct nfs_netfs_io_data
*netfs
= hdr
->netfs
;
342 nfs_netfs_get(netfs
);
345 int nfs_netfs_folio_unlock(struct folio
*folio
)
347 struct inode
*inode
= folio
->mapping
->host
;
350 * If fscache is enabled, netfs will unlock pages.
352 if (netfs_inode(inode
)->cache
)
358 void nfs_netfs_read_completion(struct nfs_pgio_header
*hdr
)
360 struct nfs_netfs_io_data
*netfs
= hdr
->netfs
;
361 struct netfs_io_subrequest
*sreq
;
367 if (test_bit(NFS_IOHDR_EOF
, &hdr
->flags
) &&
368 sreq
->rreq
->origin
!= NETFS_DIO_READ
)
369 __set_bit(NETFS_SREQ_CLEAR_TAIL
, &sreq
->flags
);
372 netfs
->error
= hdr
->error
;
374 atomic64_add(hdr
->res
.count
, &netfs
->transferred
);
376 nfs_netfs_put(netfs
);
380 const struct netfs_request_ops nfs_netfs_ops
= {
381 .init_request
= nfs_netfs_init_request
,
382 .free_request
= nfs_netfs_free_request
,
383 .issue_read
= nfs_netfs_issue_read
,