1 // SPDX-License-Identifier: GPL-2.0
3 * Request reply cache. This is currently a global cache, but this may
4 * change in the future and be a per-client cache.
6 * This code is heavily inspired by the 44BSD implementation, although
7 * it does things a bit differently.
9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
12 #include <linux/sunrpc/svc_xprt.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sunrpc/addr.h>
16 #include <linux/highmem.h>
17 #include <linux/log2.h>
18 #include <linux/hash.h>
19 #include <net/checksum.h>
24 #define NFSDDBG_FACILITY NFSDDBG_REPCACHE
27 * We use this value to determine the number of hash buckets from the max
28 * cache size, the idea being that when the cache is at its maximum number
29 * of entries, then this should be the average number of entries per bucket.
31 #define TARGET_BUCKET_SIZE 64
33 struct nfsd_drc_bucket
{
34 struct rb_root rb_head
;
35 struct list_head lru_head
;
36 spinlock_t cache_lock
;
39 static int nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*vec
);
40 static unsigned long nfsd_reply_cache_count(struct shrinker
*shrink
,
41 struct shrink_control
*sc
);
42 static unsigned long nfsd_reply_cache_scan(struct shrinker
*shrink
,
43 struct shrink_control
*sc
);
46 * Put a cap on the size of the DRC based on the amount of available
47 * low memory in the machine.
59 * ...with a hard cap of 256k entries. In the worst case, each entry will be
60 * ~1k, so the above numbers should give a rough max of the amount of memory
63 * XXX: these limits are per-container, so memory used will increase
64 * linearly with number of containers. Maybe that's OK.
67 nfsd_cache_size_limit(void)
70 unsigned long low_pages
= totalram_pages() - totalhigh_pages();
72 limit
= (16 * int_sqrt(low_pages
)) << (PAGE_SHIFT
-10);
73 return min_t(unsigned int, limit
, 256*1024);
77 * Compute the number of hash buckets we need. Divide the max cachesize by
78 * the "target" max bucket size, and round up to next power of two.
81 nfsd_hashsize(unsigned int limit
)
83 return roundup_pow_of_two(limit
/ TARGET_BUCKET_SIZE
);
87 nfsd_cache_hash(__be32 xid
, struct nfsd_net
*nn
)
89 return hash_32(be32_to_cpu(xid
), nn
->maskbits
);
92 static struct svc_cacherep
*
93 nfsd_reply_cache_alloc(struct svc_rqst
*rqstp
, __wsum csum
,
96 struct svc_cacherep
*rp
;
98 rp
= kmem_cache_alloc(nn
->drc_slab
, GFP_KERNEL
);
100 rp
->c_state
= RC_UNUSED
;
101 rp
->c_type
= RC_NOCACHE
;
102 RB_CLEAR_NODE(&rp
->c_node
);
103 INIT_LIST_HEAD(&rp
->c_lru
);
105 memset(&rp
->c_key
, 0, sizeof(rp
->c_key
));
106 rp
->c_key
.k_xid
= rqstp
->rq_xid
;
107 rp
->c_key
.k_proc
= rqstp
->rq_proc
;
108 rpc_copy_addr((struct sockaddr
*)&rp
->c_key
.k_addr
, svc_addr(rqstp
));
109 rpc_set_port((struct sockaddr
*)&rp
->c_key
.k_addr
, rpc_get_port(svc_addr(rqstp
)));
110 rp
->c_key
.k_prot
= rqstp
->rq_prot
;
111 rp
->c_key
.k_vers
= rqstp
->rq_vers
;
112 rp
->c_key
.k_len
= rqstp
->rq_arg
.len
;
113 rp
->c_key
.k_csum
= csum
;
119 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket
*b
, struct svc_cacherep
*rp
,
122 if (rp
->c_type
== RC_REPLBUFF
&& rp
->c_replvec
.iov_base
) {
123 nn
->drc_mem_usage
-= rp
->c_replvec
.iov_len
;
124 kfree(rp
->c_replvec
.iov_base
);
126 if (rp
->c_state
!= RC_UNUSED
) {
127 rb_erase(&rp
->c_node
, &b
->rb_head
);
128 list_del(&rp
->c_lru
);
129 atomic_dec(&nn
->num_drc_entries
);
130 nn
->drc_mem_usage
-= sizeof(*rp
);
132 kmem_cache_free(nn
->drc_slab
, rp
);
136 nfsd_reply_cache_free(struct nfsd_drc_bucket
*b
, struct svc_cacherep
*rp
,
139 spin_lock(&b
->cache_lock
);
140 nfsd_reply_cache_free_locked(b
, rp
, nn
);
141 spin_unlock(&b
->cache_lock
);
144 int nfsd_reply_cache_init(struct nfsd_net
*nn
)
146 unsigned int hashsize
;
150 nn
->max_drc_entries
= nfsd_cache_size_limit();
151 atomic_set(&nn
->num_drc_entries
, 0);
152 hashsize
= nfsd_hashsize(nn
->max_drc_entries
);
153 nn
->maskbits
= ilog2(hashsize
);
155 nn
->nfsd_reply_cache_shrinker
.scan_objects
= nfsd_reply_cache_scan
;
156 nn
->nfsd_reply_cache_shrinker
.count_objects
= nfsd_reply_cache_count
;
157 nn
->nfsd_reply_cache_shrinker
.seeks
= 1;
158 status
= register_shrinker(&nn
->nfsd_reply_cache_shrinker
);
162 nn
->drc_slab
= kmem_cache_create("nfsd_drc",
163 sizeof(struct svc_cacherep
), 0, 0, NULL
);
167 nn
->drc_hashtbl
= kcalloc(hashsize
,
168 sizeof(*nn
->drc_hashtbl
), GFP_KERNEL
);
169 if (!nn
->drc_hashtbl
) {
170 nn
->drc_hashtbl
= vzalloc(array_size(hashsize
,
171 sizeof(*nn
->drc_hashtbl
)));
172 if (!nn
->drc_hashtbl
)
176 for (i
= 0; i
< hashsize
; i
++) {
177 INIT_LIST_HEAD(&nn
->drc_hashtbl
[i
].lru_head
);
178 spin_lock_init(&nn
->drc_hashtbl
[i
].cache_lock
);
180 nn
->drc_hashsize
= hashsize
;
184 kmem_cache_destroy(nn
->drc_slab
);
186 unregister_shrinker(&nn
->nfsd_reply_cache_shrinker
);
188 printk(KERN_ERR
"nfsd: failed to allocate reply cache\n");
192 void nfsd_reply_cache_shutdown(struct nfsd_net
*nn
)
194 struct svc_cacherep
*rp
;
197 unregister_shrinker(&nn
->nfsd_reply_cache_shrinker
);
199 for (i
= 0; i
< nn
->drc_hashsize
; i
++) {
200 struct list_head
*head
= &nn
->drc_hashtbl
[i
].lru_head
;
201 while (!list_empty(head
)) {
202 rp
= list_first_entry(head
, struct svc_cacherep
, c_lru
);
203 nfsd_reply_cache_free_locked(&nn
->drc_hashtbl
[i
],
208 kvfree(nn
->drc_hashtbl
);
209 nn
->drc_hashtbl
= NULL
;
210 nn
->drc_hashsize
= 0;
212 kmem_cache_destroy(nn
->drc_slab
);
217 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
218 * not already scheduled.
221 lru_put_end(struct nfsd_drc_bucket
*b
, struct svc_cacherep
*rp
)
223 rp
->c_timestamp
= jiffies
;
224 list_move_tail(&rp
->c_lru
, &b
->lru_head
);
228 prune_bucket(struct nfsd_drc_bucket
*b
, struct nfsd_net
*nn
)
230 struct svc_cacherep
*rp
, *tmp
;
233 list_for_each_entry_safe(rp
, tmp
, &b
->lru_head
, c_lru
) {
235 * Don't free entries attached to calls that are still
236 * in-progress, but do keep scanning the list.
238 if (rp
->c_state
== RC_INPROG
)
240 if (atomic_read(&nn
->num_drc_entries
) <= nn
->max_drc_entries
&&
241 time_before(jiffies
, rp
->c_timestamp
+ RC_EXPIRE
))
243 nfsd_reply_cache_free_locked(b
, rp
, nn
);
250 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
251 * Also prune the oldest ones when the total exceeds the max number of entries.
254 prune_cache_entries(struct nfsd_net
*nn
)
259 for (i
= 0; i
< nn
->drc_hashsize
; i
++) {
260 struct nfsd_drc_bucket
*b
= &nn
->drc_hashtbl
[i
];
262 if (list_empty(&b
->lru_head
))
264 spin_lock(&b
->cache_lock
);
265 freed
+= prune_bucket(b
, nn
);
266 spin_unlock(&b
->cache_lock
);
272 nfsd_reply_cache_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
274 struct nfsd_net
*nn
= container_of(shrink
,
275 struct nfsd_net
, nfsd_reply_cache_shrinker
);
277 return atomic_read(&nn
->num_drc_entries
);
281 nfsd_reply_cache_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
283 struct nfsd_net
*nn
= container_of(shrink
,
284 struct nfsd_net
, nfsd_reply_cache_shrinker
);
286 return prune_cache_entries(nn
);
289 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
292 nfsd_cache_csum(struct svc_rqst
*rqstp
)
297 struct xdr_buf
*buf
= &rqstp
->rq_arg
;
298 const unsigned char *p
= buf
->head
[0].iov_base
;
299 size_t csum_len
= min_t(size_t, buf
->head
[0].iov_len
+ buf
->page_len
,
301 size_t len
= min(buf
->head
[0].iov_len
, csum_len
);
303 /* rq_arg.head first */
304 csum
= csum_partial(p
, len
, 0);
307 /* Continue into page array */
308 idx
= buf
->page_base
/ PAGE_SIZE
;
309 base
= buf
->page_base
& ~PAGE_MASK
;
311 p
= page_address(buf
->pages
[idx
]) + base
;
312 len
= min_t(size_t, PAGE_SIZE
- base
, csum_len
);
313 csum
= csum_partial(p
, len
, csum
);
322 nfsd_cache_key_cmp(const struct svc_cacherep
*key
,
323 const struct svc_cacherep
*rp
, struct nfsd_net
*nn
)
325 if (key
->c_key
.k_xid
== rp
->c_key
.k_xid
&&
326 key
->c_key
.k_csum
!= rp
->c_key
.k_csum
)
327 ++nn
->payload_misses
;
329 return memcmp(&key
->c_key
, &rp
->c_key
, sizeof(key
->c_key
));
333 * Search the request hash for an entry that matches the given rqstp.
334 * Must be called with cache_lock held. Returns the found entry or
335 * inserts an empty key on failure.
337 static struct svc_cacherep
*
338 nfsd_cache_insert(struct nfsd_drc_bucket
*b
, struct svc_cacherep
*key
,
341 struct svc_cacherep
*rp
, *ret
= key
;
342 struct rb_node
**p
= &b
->rb_head
.rb_node
,
344 unsigned int entries
= 0;
350 rp
= rb_entry(parent
, struct svc_cacherep
, c_node
);
352 cmp
= nfsd_cache_key_cmp(key
, rp
, nn
);
354 p
= &parent
->rb_left
;
356 p
= &parent
->rb_right
;
362 rb_link_node(&key
->c_node
, parent
, p
);
363 rb_insert_color(&key
->c_node
, &b
->rb_head
);
365 /* tally hash chain length stats */
366 if (entries
> nn
->longest_chain
) {
367 nn
->longest_chain
= entries
;
368 nn
->longest_chain_cachesize
= atomic_read(&nn
->num_drc_entries
);
369 } else if (entries
== nn
->longest_chain
) {
370 /* prefer to keep the smallest cachesize possible here */
371 nn
->longest_chain_cachesize
= min_t(unsigned int,
372 nn
->longest_chain_cachesize
,
373 atomic_read(&nn
->num_drc_entries
));
381 * Try to find an entry matching the current call in the cache. When none
382 * is found, we try to grab the oldest expired entry off the LRU list. If
383 * a suitable one isn't there, then drop the cache_lock and allocate a
384 * new one, then search again in case one got inserted while this thread
385 * didn't hold the lock.
388 nfsd_cache_lookup(struct svc_rqst
*rqstp
)
390 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
391 struct svc_cacherep
*rp
, *found
;
392 __be32 xid
= rqstp
->rq_xid
;
394 u32 hash
= nfsd_cache_hash(xid
, nn
);
395 struct nfsd_drc_bucket
*b
= &nn
->drc_hashtbl
[hash
];
396 int type
= rqstp
->rq_cachetype
;
399 rqstp
->rq_cacherep
= NULL
;
400 if (type
== RC_NOCACHE
) {
401 nfsdstats
.rcnocache
++;
405 csum
= nfsd_cache_csum(rqstp
);
408 * Since the common case is a cache miss followed by an insert,
409 * preallocate an entry.
411 rp
= nfsd_reply_cache_alloc(rqstp
, csum
, nn
);
413 dprintk("nfsd: unable to allocate DRC entry!\n");
417 spin_lock(&b
->cache_lock
);
418 found
= nfsd_cache_insert(b
, rp
, nn
);
420 nfsd_reply_cache_free_locked(NULL
, rp
, nn
);
425 nfsdstats
.rcmisses
++;
426 rqstp
->rq_cacherep
= rp
;
427 rp
->c_state
= RC_INPROG
;
429 atomic_inc(&nn
->num_drc_entries
);
430 nn
->drc_mem_usage
+= sizeof(*rp
);
432 /* go ahead and prune the cache */
435 spin_unlock(&b
->cache_lock
);
439 /* We found a matching entry which is either in progress or done. */
443 /* Request being processed */
444 if (rp
->c_state
== RC_INPROG
)
447 /* From the hall of fame of impractical attacks:
448 * Is this a user who tries to snoop on the cache? */
450 if (!test_bit(RQ_SECURE
, &rqstp
->rq_flags
) && rp
->c_secure
)
453 /* Compose RPC reply header */
454 switch (rp
->c_type
) {
458 svc_putu32(&rqstp
->rq_res
.head
[0], rp
->c_replstat
);
462 if (!nfsd_cache_append(rqstp
, &rp
->c_replvec
))
463 goto out
; /* should not happen */
467 printk(KERN_WARNING
"nfsd: bad repcache type %d\n", rp
->c_type
);
468 nfsd_reply_cache_free_locked(b
, rp
, nn
);
475 * Update a cache entry. This is called from nfsd_dispatch when
476 * the procedure has been executed and the complete reply is in
479 * We're copying around data here rather than swapping buffers because
480 * the toplevel loop requires max-sized buffers, which would be a waste
481 * of memory for a cache with a max reply size of 100 bytes (diropokres).
483 * If we should start to use different types of cache entries tailored
484 * specifically for attrstat and fh's, we may save even more space.
486 * Also note that a cachetype of RC_NOCACHE can legally be passed when
487 * nfsd failed to encode a reply that otherwise would have been cached.
488 * In this case, nfsd_cache_update is called with statp == NULL.
491 nfsd_cache_update(struct svc_rqst
*rqstp
, int cachetype
, __be32
*statp
)
493 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
494 struct svc_cacherep
*rp
= rqstp
->rq_cacherep
;
495 struct kvec
*resv
= &rqstp
->rq_res
.head
[0], *cachv
;
497 struct nfsd_drc_bucket
*b
;
504 hash
= nfsd_cache_hash(rp
->c_key
.k_xid
, nn
);
505 b
= &nn
->drc_hashtbl
[hash
];
507 len
= resv
->iov_len
- ((char*)statp
- (char*)resv
->iov_base
);
510 /* Don't cache excessive amounts of data and XDR failures */
511 if (!statp
|| len
> (256 >> 2)) {
512 nfsd_reply_cache_free(b
, rp
, nn
);
519 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len
);
520 rp
->c_replstat
= *statp
;
523 cachv
= &rp
->c_replvec
;
525 cachv
->iov_base
= kmalloc(bufsize
, GFP_KERNEL
);
526 if (!cachv
->iov_base
) {
527 nfsd_reply_cache_free(b
, rp
, nn
);
530 cachv
->iov_len
= bufsize
;
531 memcpy(cachv
->iov_base
, statp
, bufsize
);
534 nfsd_reply_cache_free(b
, rp
, nn
);
537 spin_lock(&b
->cache_lock
);
538 nn
->drc_mem_usage
+= bufsize
;
540 rp
->c_secure
= test_bit(RQ_SECURE
, &rqstp
->rq_flags
);
541 rp
->c_type
= cachetype
;
542 rp
->c_state
= RC_DONE
;
543 spin_unlock(&b
->cache_lock
);
548 * Copy cached reply to current reply buffer. Should always fit.
549 * FIXME as reply is in a page, we should just attach the page, and
550 * keep a refcount....
553 nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*data
)
555 struct kvec
*vec
= &rqstp
->rq_res
.head
[0];
557 if (vec
->iov_len
+ data
->iov_len
> PAGE_SIZE
) {
558 printk(KERN_WARNING
"nfsd: cached reply too large (%zd).\n",
562 memcpy((char*)vec
->iov_base
+ vec
->iov_len
, data
->iov_base
, data
->iov_len
);
563 vec
->iov_len
+= data
->iov_len
;
568 * Note that fields may be added, removed or reordered in the future. Programs
569 * scraping this file for info should test the labels to ensure they're
570 * getting the correct field.
572 static int nfsd_reply_cache_stats_show(struct seq_file
*m
, void *v
)
574 struct nfsd_net
*nn
= m
->private;
576 seq_printf(m
, "max entries: %u\n", nn
->max_drc_entries
);
577 seq_printf(m
, "num entries: %u\n",
578 atomic_read(&nn
->num_drc_entries
));
579 seq_printf(m
, "hash buckets: %u\n", 1 << nn
->maskbits
);
580 seq_printf(m
, "mem usage: %u\n", nn
->drc_mem_usage
);
581 seq_printf(m
, "cache hits: %u\n", nfsdstats
.rchits
);
582 seq_printf(m
, "cache misses: %u\n", nfsdstats
.rcmisses
);
583 seq_printf(m
, "not cached: %u\n", nfsdstats
.rcnocache
);
584 seq_printf(m
, "payload misses: %u\n", nn
->payload_misses
);
585 seq_printf(m
, "longest chain len: %u\n", nn
->longest_chain
);
586 seq_printf(m
, "cachesize at longest: %u\n", nn
->longest_chain_cachesize
);
590 int nfsd_reply_cache_stats_open(struct inode
*inode
, struct file
*file
)
592 struct nfsd_net
*nn
= net_generic(file_inode(file
)->i_sb
->s_fs_info
,
595 return single_open(file
, nfsd_reply_cache_stats_show
, nn
);