1 // SPDX-License-Identifier: GPL-2.0
3 * Request reply cache. This is currently a global cache, but this may
4 * change in the future and be a per-client cache.
6 * This code is heavily inspired by the 44BSD implementation, although
7 * it does things a bit differently.
9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
12 #include <linux/sunrpc/svc_xprt.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sunrpc/addr.h>
16 #include <linux/highmem.h>
17 #include <linux/log2.h>
18 #include <linux/hash.h>
19 #include <net/checksum.h>
26 * We use this value to determine the number of hash buckets from the max
27 * cache size, the idea being that when the cache is at its maximum number
28 * of entries, then this should be the average number of entries per bucket.
30 #define TARGET_BUCKET_SIZE 64
32 struct nfsd_drc_bucket
{
33 struct rb_root rb_head
;
34 struct list_head lru_head
;
35 spinlock_t cache_lock
;
38 static struct kmem_cache
*drc_slab
;
40 static int nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*vec
);
41 static unsigned long nfsd_reply_cache_count(struct shrinker
*shrink
,
42 struct shrink_control
*sc
);
43 static unsigned long nfsd_reply_cache_scan(struct shrinker
*shrink
,
44 struct shrink_control
*sc
);
47 * Put a cap on the size of the DRC based on the amount of available
48 * low memory in the machine.
60 * ...with a hard cap of 256k entries. In the worst case, each entry will be
61 * ~1k, so the above numbers should give a rough max of the amount of memory
64 * XXX: these limits are per-container, so memory used will increase
65 * linearly with number of containers. Maybe that's OK.
68 nfsd_cache_size_limit(void)
71 unsigned long low_pages
= totalram_pages() - totalhigh_pages();
73 limit
= (16 * int_sqrt(low_pages
)) << (PAGE_SHIFT
-10);
74 return min_t(unsigned int, limit
, 256*1024);
78 * Compute the number of hash buckets we need. Divide the max cachesize by
79 * the "target" max bucket size, and round up to next power of two.
82 nfsd_hashsize(unsigned int limit
)
84 return roundup_pow_of_two(limit
/ TARGET_BUCKET_SIZE
);
88 nfsd_cache_hash(__be32 xid
, struct nfsd_net
*nn
)
90 return hash_32(be32_to_cpu(xid
), nn
->maskbits
);
93 static struct svc_cacherep
*
94 nfsd_reply_cache_alloc(struct svc_rqst
*rqstp
, __wsum csum
,
97 struct svc_cacherep
*rp
;
99 rp
= kmem_cache_alloc(drc_slab
, GFP_KERNEL
);
101 rp
->c_state
= RC_UNUSED
;
102 rp
->c_type
= RC_NOCACHE
;
103 RB_CLEAR_NODE(&rp
->c_node
);
104 INIT_LIST_HEAD(&rp
->c_lru
);
106 memset(&rp
->c_key
, 0, sizeof(rp
->c_key
));
107 rp
->c_key
.k_xid
= rqstp
->rq_xid
;
108 rp
->c_key
.k_proc
= rqstp
->rq_proc
;
109 rpc_copy_addr((struct sockaddr
*)&rp
->c_key
.k_addr
, svc_addr(rqstp
));
110 rpc_set_port((struct sockaddr
*)&rp
->c_key
.k_addr
, rpc_get_port(svc_addr(rqstp
)));
111 rp
->c_key
.k_prot
= rqstp
->rq_prot
;
112 rp
->c_key
.k_vers
= rqstp
->rq_vers
;
113 rp
->c_key
.k_len
= rqstp
->rq_arg
.len
;
114 rp
->c_key
.k_csum
= csum
;
120 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket
*b
, struct svc_cacherep
*rp
,
123 if (rp
->c_type
== RC_REPLBUFF
&& rp
->c_replvec
.iov_base
) {
124 nn
->drc_mem_usage
-= rp
->c_replvec
.iov_len
;
125 kfree(rp
->c_replvec
.iov_base
);
127 if (rp
->c_state
!= RC_UNUSED
) {
128 rb_erase(&rp
->c_node
, &b
->rb_head
);
129 list_del(&rp
->c_lru
);
130 atomic_dec(&nn
->num_drc_entries
);
131 nn
->drc_mem_usage
-= sizeof(*rp
);
133 kmem_cache_free(drc_slab
, rp
);
137 nfsd_reply_cache_free(struct nfsd_drc_bucket
*b
, struct svc_cacherep
*rp
,
140 spin_lock(&b
->cache_lock
);
141 nfsd_reply_cache_free_locked(b
, rp
, nn
);
142 spin_unlock(&b
->cache_lock
);
145 int nfsd_drc_slab_create(void)
147 drc_slab
= kmem_cache_create("nfsd_drc",
148 sizeof(struct svc_cacherep
), 0, 0, NULL
);
149 return drc_slab
? 0: -ENOMEM
;
152 void nfsd_drc_slab_free(void)
154 kmem_cache_destroy(drc_slab
);
157 int nfsd_reply_cache_init(struct nfsd_net
*nn
)
159 unsigned int hashsize
;
163 nn
->max_drc_entries
= nfsd_cache_size_limit();
164 atomic_set(&nn
->num_drc_entries
, 0);
165 hashsize
= nfsd_hashsize(nn
->max_drc_entries
);
166 nn
->maskbits
= ilog2(hashsize
);
168 nn
->nfsd_reply_cache_shrinker
.scan_objects
= nfsd_reply_cache_scan
;
169 nn
->nfsd_reply_cache_shrinker
.count_objects
= nfsd_reply_cache_count
;
170 nn
->nfsd_reply_cache_shrinker
.seeks
= 1;
171 status
= register_shrinker(&nn
->nfsd_reply_cache_shrinker
);
175 nn
->drc_hashtbl
= kvzalloc(array_size(hashsize
,
176 sizeof(*nn
->drc_hashtbl
)), GFP_KERNEL
);
177 if (!nn
->drc_hashtbl
)
180 for (i
= 0; i
< hashsize
; i
++) {
181 INIT_LIST_HEAD(&nn
->drc_hashtbl
[i
].lru_head
);
182 spin_lock_init(&nn
->drc_hashtbl
[i
].cache_lock
);
184 nn
->drc_hashsize
= hashsize
;
188 unregister_shrinker(&nn
->nfsd_reply_cache_shrinker
);
190 printk(KERN_ERR
"nfsd: failed to allocate reply cache\n");
194 void nfsd_reply_cache_shutdown(struct nfsd_net
*nn
)
196 struct svc_cacherep
*rp
;
199 unregister_shrinker(&nn
->nfsd_reply_cache_shrinker
);
201 for (i
= 0; i
< nn
->drc_hashsize
; i
++) {
202 struct list_head
*head
= &nn
->drc_hashtbl
[i
].lru_head
;
203 while (!list_empty(head
)) {
204 rp
= list_first_entry(head
, struct svc_cacherep
, c_lru
);
205 nfsd_reply_cache_free_locked(&nn
->drc_hashtbl
[i
],
210 kvfree(nn
->drc_hashtbl
);
211 nn
->drc_hashtbl
= NULL
;
212 nn
->drc_hashsize
= 0;
217 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
218 * not already scheduled.
221 lru_put_end(struct nfsd_drc_bucket
*b
, struct svc_cacherep
*rp
)
223 rp
->c_timestamp
= jiffies
;
224 list_move_tail(&rp
->c_lru
, &b
->lru_head
);
228 prune_bucket(struct nfsd_drc_bucket
*b
, struct nfsd_net
*nn
)
230 struct svc_cacherep
*rp
, *tmp
;
233 list_for_each_entry_safe(rp
, tmp
, &b
->lru_head
, c_lru
) {
235 * Don't free entries attached to calls that are still
236 * in-progress, but do keep scanning the list.
238 if (rp
->c_state
== RC_INPROG
)
240 if (atomic_read(&nn
->num_drc_entries
) <= nn
->max_drc_entries
&&
241 time_before(jiffies
, rp
->c_timestamp
+ RC_EXPIRE
))
243 nfsd_reply_cache_free_locked(b
, rp
, nn
);
250 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
251 * Also prune the oldest ones when the total exceeds the max number of entries.
254 prune_cache_entries(struct nfsd_net
*nn
)
259 for (i
= 0; i
< nn
->drc_hashsize
; i
++) {
260 struct nfsd_drc_bucket
*b
= &nn
->drc_hashtbl
[i
];
262 if (list_empty(&b
->lru_head
))
264 spin_lock(&b
->cache_lock
);
265 freed
+= prune_bucket(b
, nn
);
266 spin_unlock(&b
->cache_lock
);
272 nfsd_reply_cache_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
274 struct nfsd_net
*nn
= container_of(shrink
,
275 struct nfsd_net
, nfsd_reply_cache_shrinker
);
277 return atomic_read(&nn
->num_drc_entries
);
281 nfsd_reply_cache_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
283 struct nfsd_net
*nn
= container_of(shrink
,
284 struct nfsd_net
, nfsd_reply_cache_shrinker
);
286 return prune_cache_entries(nn
);
289 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
292 nfsd_cache_csum(struct svc_rqst
*rqstp
)
297 struct xdr_buf
*buf
= &rqstp
->rq_arg
;
298 const unsigned char *p
= buf
->head
[0].iov_base
;
299 size_t csum_len
= min_t(size_t, buf
->head
[0].iov_len
+ buf
->page_len
,
301 size_t len
= min(buf
->head
[0].iov_len
, csum_len
);
303 /* rq_arg.head first */
304 csum
= csum_partial(p
, len
, 0);
307 /* Continue into page array */
308 idx
= buf
->page_base
/ PAGE_SIZE
;
309 base
= buf
->page_base
& ~PAGE_MASK
;
311 p
= page_address(buf
->pages
[idx
]) + base
;
312 len
= min_t(size_t, PAGE_SIZE
- base
, csum_len
);
313 csum
= csum_partial(p
, len
, csum
);
322 nfsd_cache_key_cmp(const struct svc_cacherep
*key
,
323 const struct svc_cacherep
*rp
, struct nfsd_net
*nn
)
325 if (key
->c_key
.k_xid
== rp
->c_key
.k_xid
&&
326 key
->c_key
.k_csum
!= rp
->c_key
.k_csum
) {
327 ++nn
->payload_misses
;
328 trace_nfsd_drc_mismatch(nn
, key
, rp
);
331 return memcmp(&key
->c_key
, &rp
->c_key
, sizeof(key
->c_key
));
335 * Search the request hash for an entry that matches the given rqstp.
336 * Must be called with cache_lock held. Returns the found entry or
337 * inserts an empty key on failure.
339 static struct svc_cacherep
*
340 nfsd_cache_insert(struct nfsd_drc_bucket
*b
, struct svc_cacherep
*key
,
343 struct svc_cacherep
*rp
, *ret
= key
;
344 struct rb_node
**p
= &b
->rb_head
.rb_node
,
346 unsigned int entries
= 0;
352 rp
= rb_entry(parent
, struct svc_cacherep
, c_node
);
354 cmp
= nfsd_cache_key_cmp(key
, rp
, nn
);
356 p
= &parent
->rb_left
;
358 p
= &parent
->rb_right
;
364 rb_link_node(&key
->c_node
, parent
, p
);
365 rb_insert_color(&key
->c_node
, &b
->rb_head
);
367 /* tally hash chain length stats */
368 if (entries
> nn
->longest_chain
) {
369 nn
->longest_chain
= entries
;
370 nn
->longest_chain_cachesize
= atomic_read(&nn
->num_drc_entries
);
371 } else if (entries
== nn
->longest_chain
) {
372 /* prefer to keep the smallest cachesize possible here */
373 nn
->longest_chain_cachesize
= min_t(unsigned int,
374 nn
->longest_chain_cachesize
,
375 atomic_read(&nn
->num_drc_entries
));
383 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
384 * @rqstp: Incoming Call to find
386 * Try to find an entry matching the current call in the cache. When none
387 * is found, we try to grab the oldest expired entry off the LRU list. If
388 * a suitable one isn't there, then drop the cache_lock and allocate a
389 * new one, then search again in case one got inserted while this thread
390 * didn't hold the lock.
393 * %RC_DOIT: Process the request normally
394 * %RC_REPLY: Reply from cache
395 * %RC_DROPIT: Do not process the request further
397 int nfsd_cache_lookup(struct svc_rqst
*rqstp
)
399 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
400 struct svc_cacherep
*rp
, *found
;
401 __be32 xid
= rqstp
->rq_xid
;
403 u32 hash
= nfsd_cache_hash(xid
, nn
);
404 struct nfsd_drc_bucket
*b
= &nn
->drc_hashtbl
[hash
];
405 int type
= rqstp
->rq_cachetype
;
408 rqstp
->rq_cacherep
= NULL
;
409 if (type
== RC_NOCACHE
) {
410 nfsdstats
.rcnocache
++;
414 csum
= nfsd_cache_csum(rqstp
);
417 * Since the common case is a cache miss followed by an insert,
418 * preallocate an entry.
420 rp
= nfsd_reply_cache_alloc(rqstp
, csum
, nn
);
424 spin_lock(&b
->cache_lock
);
425 found
= nfsd_cache_insert(b
, rp
, nn
);
427 nfsd_reply_cache_free_locked(NULL
, rp
, nn
);
432 nfsdstats
.rcmisses
++;
433 rqstp
->rq_cacherep
= rp
;
434 rp
->c_state
= RC_INPROG
;
436 atomic_inc(&nn
->num_drc_entries
);
437 nn
->drc_mem_usage
+= sizeof(*rp
);
439 /* go ahead and prune the cache */
443 spin_unlock(&b
->cache_lock
);
448 /* We found a matching entry which is either in progress or done. */
452 /* Request being processed */
453 if (rp
->c_state
== RC_INPROG
)
456 /* From the hall of fame of impractical attacks:
457 * Is this a user who tries to snoop on the cache? */
459 if (!test_bit(RQ_SECURE
, &rqstp
->rq_flags
) && rp
->c_secure
)
462 /* Compose RPC reply header */
463 switch (rp
->c_type
) {
467 svc_putu32(&rqstp
->rq_res
.head
[0], rp
->c_replstat
);
471 if (!nfsd_cache_append(rqstp
, &rp
->c_replvec
))
472 goto out_unlock
; /* should not happen */
476 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp
->c_type
);
480 trace_nfsd_drc_found(nn
, rqstp
, rtn
);
485 * nfsd_cache_update - Update an entry in the duplicate reply cache.
486 * @rqstp: svc_rqst with a finished Reply
487 * @cachetype: which cache to update
488 * @statp: Reply's status code
490 * This is called from nfsd_dispatch when the procedure has been
491 * executed and the complete reply is in rqstp->rq_res.
493 * We're copying around data here rather than swapping buffers because
494 * the toplevel loop requires max-sized buffers, which would be a waste
495 * of memory for a cache with a max reply size of 100 bytes (diropokres).
497 * If we should start to use different types of cache entries tailored
498 * specifically for attrstat and fh's, we may save even more space.
500 * Also note that a cachetype of RC_NOCACHE can legally be passed when
501 * nfsd failed to encode a reply that otherwise would have been cached.
502 * In this case, nfsd_cache_update is called with statp == NULL.
504 void nfsd_cache_update(struct svc_rqst
*rqstp
, int cachetype
, __be32
*statp
)
506 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
507 struct svc_cacherep
*rp
= rqstp
->rq_cacherep
;
508 struct kvec
*resv
= &rqstp
->rq_res
.head
[0], *cachv
;
510 struct nfsd_drc_bucket
*b
;
517 hash
= nfsd_cache_hash(rp
->c_key
.k_xid
, nn
);
518 b
= &nn
->drc_hashtbl
[hash
];
520 len
= resv
->iov_len
- ((char*)statp
- (char*)resv
->iov_base
);
523 /* Don't cache excessive amounts of data and XDR failures */
524 if (!statp
|| len
> (256 >> 2)) {
525 nfsd_reply_cache_free(b
, rp
, nn
);
532 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len
);
533 rp
->c_replstat
= *statp
;
536 cachv
= &rp
->c_replvec
;
538 cachv
->iov_base
= kmalloc(bufsize
, GFP_KERNEL
);
539 if (!cachv
->iov_base
) {
540 nfsd_reply_cache_free(b
, rp
, nn
);
543 cachv
->iov_len
= bufsize
;
544 memcpy(cachv
->iov_base
, statp
, bufsize
);
547 nfsd_reply_cache_free(b
, rp
, nn
);
550 spin_lock(&b
->cache_lock
);
551 nn
->drc_mem_usage
+= bufsize
;
553 rp
->c_secure
= test_bit(RQ_SECURE
, &rqstp
->rq_flags
);
554 rp
->c_type
= cachetype
;
555 rp
->c_state
= RC_DONE
;
556 spin_unlock(&b
->cache_lock
);
561 * Copy cached reply to current reply buffer. Should always fit.
562 * FIXME as reply is in a page, we should just attach the page, and
563 * keep a refcount....
566 nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*data
)
568 struct kvec
*vec
= &rqstp
->rq_res
.head
[0];
570 if (vec
->iov_len
+ data
->iov_len
> PAGE_SIZE
) {
571 printk(KERN_WARNING
"nfsd: cached reply too large (%zd).\n",
575 memcpy((char*)vec
->iov_base
+ vec
->iov_len
, data
->iov_base
, data
->iov_len
);
576 vec
->iov_len
+= data
->iov_len
;
581 * Note that fields may be added, removed or reordered in the future. Programs
582 * scraping this file for info should test the labels to ensure they're
583 * getting the correct field.
585 static int nfsd_reply_cache_stats_show(struct seq_file
*m
, void *v
)
587 struct nfsd_net
*nn
= m
->private;
589 seq_printf(m
, "max entries: %u\n", nn
->max_drc_entries
);
590 seq_printf(m
, "num entries: %u\n",
591 atomic_read(&nn
->num_drc_entries
));
592 seq_printf(m
, "hash buckets: %u\n", 1 << nn
->maskbits
);
593 seq_printf(m
, "mem usage: %u\n", nn
->drc_mem_usage
);
594 seq_printf(m
, "cache hits: %u\n", nfsdstats
.rchits
);
595 seq_printf(m
, "cache misses: %u\n", nfsdstats
.rcmisses
);
596 seq_printf(m
, "not cached: %u\n", nfsdstats
.rcnocache
);
597 seq_printf(m
, "payload misses: %u\n", nn
->payload_misses
);
598 seq_printf(m
, "longest chain len: %u\n", nn
->longest_chain
);
599 seq_printf(m
, "cachesize at longest: %u\n", nn
->longest_chain_cachesize
);
603 int nfsd_reply_cache_stats_open(struct inode
*inode
, struct file
*file
)
605 struct nfsd_net
*nn
= net_generic(file_inode(file
)->i_sb
->s_fs_info
,
608 return single_open(file
, nfsd_reply_cache_stats_show
, nn
);