2 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
11 #include <linux/slab.h>
12 #include <linux/sunrpc/addr.h>
13 #include <linux/highmem.h>
14 #include <linux/log2.h>
15 #include <linux/hash.h>
16 #include <net/checksum.h>
21 #define NFSDDBG_FACILITY NFSDDBG_REPCACHE
24 * We use this value to determine the number of hash buckets from the max
25 * cache size, the idea being that when the cache is at its maximum number
26 * of entries, then this should be the average number of entries per bucket.
28 #define TARGET_BUCKET_SIZE 64
30 struct nfsd_drc_bucket
{
31 struct list_head lru_head
;
32 spinlock_t cache_lock
;
35 static struct nfsd_drc_bucket
*drc_hashtbl
;
36 static struct kmem_cache
*drc_slab
;
38 /* max number of entries allowed in the cache */
39 static unsigned int max_drc_entries
;
41 /* number of significant bits in the hash value */
42 static unsigned int maskbits
;
43 static unsigned int drc_hashsize
;
46 * Stats and other tracking of on the duplicate reply cache. All of these and
47 * the "rc" fields in nfsdstats are protected by the cache_lock
50 /* total number of entries */
51 static atomic_t num_drc_entries
;
53 /* cache misses due only to checksum comparison failures */
54 static unsigned int payload_misses
;
56 /* amount of memory (in bytes) currently consumed by the DRC */
57 static unsigned int drc_mem_usage
;
59 /* longest hash chain seen */
60 static unsigned int longest_chain
;
62 /* size of cache when we saw the longest hash chain */
63 static unsigned int longest_chain_cachesize
;
65 static int nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*vec
);
66 static void cache_cleaner_func(struct work_struct
*unused
);
67 static unsigned long nfsd_reply_cache_count(struct shrinker
*shrink
,
68 struct shrink_control
*sc
);
69 static unsigned long nfsd_reply_cache_scan(struct shrinker
*shrink
,
70 struct shrink_control
*sc
);
72 static struct shrinker nfsd_reply_cache_shrinker
= {
73 .scan_objects
= nfsd_reply_cache_scan
,
74 .count_objects
= nfsd_reply_cache_count
,
79 * locking for the reply cache:
80 * A cache entry is "single use" if c_state == RC_INPROG
81 * Otherwise, it when accessing _prev or _next, the lock must be held.
83 static DECLARE_DELAYED_WORK(cache_cleaner
, cache_cleaner_func
);
86 * Put a cap on the size of the DRC based on the amount of available
87 * low memory in the machine.
99 * ...with a hard cap of 256k entries. In the worst case, each entry will be
100 * ~1k, so the above numbers should give a rough max of the amount of memory
104 nfsd_cache_size_limit(void)
107 unsigned long low_pages
= totalram_pages
- totalhigh_pages
;
109 limit
= (16 * int_sqrt(low_pages
)) << (PAGE_SHIFT
-10);
110 return min_t(unsigned int, limit
, 256*1024);
114 * Compute the number of hash buckets we need. Divide the max cachesize by
115 * the "target" max bucket size, and round up to next power of two.
118 nfsd_hashsize(unsigned int limit
)
120 return roundup_pow_of_two(limit
/ TARGET_BUCKET_SIZE
);
124 nfsd_cache_hash(__be32 xid
)
126 return hash_32(be32_to_cpu(xid
), maskbits
);
129 static struct svc_cacherep
*
130 nfsd_reply_cache_alloc(void)
132 struct svc_cacherep
*rp
;
134 rp
= kmem_cache_alloc(drc_slab
, GFP_KERNEL
);
136 rp
->c_state
= RC_UNUSED
;
137 rp
->c_type
= RC_NOCACHE
;
138 INIT_LIST_HEAD(&rp
->c_lru
);
144 nfsd_reply_cache_free_locked(struct svc_cacherep
*rp
)
146 if (rp
->c_type
== RC_REPLBUFF
&& rp
->c_replvec
.iov_base
) {
147 drc_mem_usage
-= rp
->c_replvec
.iov_len
;
148 kfree(rp
->c_replvec
.iov_base
);
150 list_del(&rp
->c_lru
);
151 atomic_dec(&num_drc_entries
);
152 drc_mem_usage
-= sizeof(*rp
);
153 kmem_cache_free(drc_slab
, rp
);
157 nfsd_reply_cache_free(struct nfsd_drc_bucket
*b
, struct svc_cacherep
*rp
)
159 spin_lock(&b
->cache_lock
);
160 nfsd_reply_cache_free_locked(rp
);
161 spin_unlock(&b
->cache_lock
);
164 int nfsd_reply_cache_init(void)
166 unsigned int hashsize
;
170 max_drc_entries
= nfsd_cache_size_limit();
171 atomic_set(&num_drc_entries
, 0);
172 hashsize
= nfsd_hashsize(max_drc_entries
);
173 maskbits
= ilog2(hashsize
);
175 status
= register_shrinker(&nfsd_reply_cache_shrinker
);
179 drc_slab
= kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep
),
184 drc_hashtbl
= kcalloc(hashsize
, sizeof(*drc_hashtbl
), GFP_KERNEL
);
187 for (i
= 0; i
< hashsize
; i
++) {
188 INIT_LIST_HEAD(&drc_hashtbl
[i
].lru_head
);
189 spin_lock_init(&drc_hashtbl
[i
].cache_lock
);
191 drc_hashsize
= hashsize
;
195 printk(KERN_ERR
"nfsd: failed to allocate reply cache\n");
196 nfsd_reply_cache_shutdown();
200 void nfsd_reply_cache_shutdown(void)
202 struct svc_cacherep
*rp
;
205 unregister_shrinker(&nfsd_reply_cache_shrinker
);
206 cancel_delayed_work_sync(&cache_cleaner
);
208 for (i
= 0; i
< drc_hashsize
; i
++) {
209 struct list_head
*head
= &drc_hashtbl
[i
].lru_head
;
210 while (!list_empty(head
)) {
211 rp
= list_first_entry(head
, struct svc_cacherep
, c_lru
);
212 nfsd_reply_cache_free_locked(rp
);
221 kmem_cache_destroy(drc_slab
);
227 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
228 * not already scheduled.
231 lru_put_end(struct nfsd_drc_bucket
*b
, struct svc_cacherep
*rp
)
233 rp
->c_timestamp
= jiffies
;
234 list_move_tail(&rp
->c_lru
, &b
->lru_head
);
235 schedule_delayed_work(&cache_cleaner
, RC_EXPIRE
);
239 prune_bucket(struct nfsd_drc_bucket
*b
)
241 struct svc_cacherep
*rp
, *tmp
;
244 list_for_each_entry_safe(rp
, tmp
, &b
->lru_head
, c_lru
) {
246 * Don't free entries attached to calls that are still
247 * in-progress, but do keep scanning the list.
249 if (rp
->c_state
== RC_INPROG
)
251 if (atomic_read(&num_drc_entries
) <= max_drc_entries
&&
252 time_before(jiffies
, rp
->c_timestamp
+ RC_EXPIRE
))
254 nfsd_reply_cache_free_locked(rp
);
261 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
262 * Also prune the oldest ones when the total exceeds the max number of entries.
265 prune_cache_entries(void)
271 for (i
= 0; i
< drc_hashsize
; i
++) {
272 struct nfsd_drc_bucket
*b
= &drc_hashtbl
[i
];
274 if (list_empty(&b
->lru_head
))
276 spin_lock(&b
->cache_lock
);
277 freed
+= prune_bucket(b
);
278 if (!list_empty(&b
->lru_head
))
280 spin_unlock(&b
->cache_lock
);
284 * Conditionally rearm the job to run in RC_EXPIRE since we just
288 mod_delayed_work(system_wq
, &cache_cleaner
, RC_EXPIRE
);
293 cache_cleaner_func(struct work_struct
*unused
)
295 prune_cache_entries();
299 nfsd_reply_cache_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
301 return atomic_read(&num_drc_entries
);
305 nfsd_reply_cache_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
307 return prune_cache_entries();
310 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
313 nfsd_cache_csum(struct svc_rqst
*rqstp
)
318 struct xdr_buf
*buf
= &rqstp
->rq_arg
;
319 const unsigned char *p
= buf
->head
[0].iov_base
;
320 size_t csum_len
= min_t(size_t, buf
->head
[0].iov_len
+ buf
->page_len
,
322 size_t len
= min(buf
->head
[0].iov_len
, csum_len
);
324 /* rq_arg.head first */
325 csum
= csum_partial(p
, len
, 0);
328 /* Continue into page array */
329 idx
= buf
->page_base
/ PAGE_SIZE
;
330 base
= buf
->page_base
& ~PAGE_MASK
;
332 p
= page_address(buf
->pages
[idx
]) + base
;
333 len
= min_t(size_t, PAGE_SIZE
- base
, csum_len
);
334 csum
= csum_partial(p
, len
, csum
);
343 nfsd_cache_match(struct svc_rqst
*rqstp
, __wsum csum
, struct svc_cacherep
*rp
)
345 /* Check RPC XID first */
346 if (rqstp
->rq_xid
!= rp
->c_xid
)
348 /* compare checksum of NFS data */
349 if (csum
!= rp
->c_csum
) {
354 /* Other discriminators */
355 if (rqstp
->rq_proc
!= rp
->c_proc
||
356 rqstp
->rq_prot
!= rp
->c_prot
||
357 rqstp
->rq_vers
!= rp
->c_vers
||
358 rqstp
->rq_arg
.len
!= rp
->c_len
||
359 !rpc_cmp_addr(svc_addr(rqstp
), (struct sockaddr
*)&rp
->c_addr
) ||
360 rpc_get_port(svc_addr(rqstp
)) != rpc_get_port((struct sockaddr
*)&rp
->c_addr
))
367 * Search the request hash for an entry that matches the given rqstp.
368 * Must be called with cache_lock held. Returns the found entry or
371 static struct svc_cacherep
*
372 nfsd_cache_search(struct nfsd_drc_bucket
*b
, struct svc_rqst
*rqstp
,
375 struct svc_cacherep
*rp
, *ret
= NULL
;
376 struct list_head
*rh
= &b
->lru_head
;
377 unsigned int entries
= 0;
379 list_for_each_entry(rp
, rh
, c_lru
) {
381 if (nfsd_cache_match(rqstp
, csum
, rp
)) {
387 /* tally hash chain length stats */
388 if (entries
> longest_chain
) {
389 longest_chain
= entries
;
390 longest_chain_cachesize
= atomic_read(&num_drc_entries
);
391 } else if (entries
== longest_chain
) {
392 /* prefer to keep the smallest cachesize possible here */
393 longest_chain_cachesize
= min_t(unsigned int,
394 longest_chain_cachesize
,
395 atomic_read(&num_drc_entries
));
402 * Try to find an entry matching the current call in the cache. When none
403 * is found, we try to grab the oldest expired entry off the LRU list. If
404 * a suitable one isn't there, then drop the cache_lock and allocate a
405 * new one, then search again in case one got inserted while this thread
406 * didn't hold the lock.
409 nfsd_cache_lookup(struct svc_rqst
*rqstp
)
411 struct svc_cacherep
*rp
, *found
;
412 __be32 xid
= rqstp
->rq_xid
;
413 u32 proto
= rqstp
->rq_prot
,
414 vers
= rqstp
->rq_vers
,
415 proc
= rqstp
->rq_proc
;
417 u32 hash
= nfsd_cache_hash(xid
);
418 struct nfsd_drc_bucket
*b
= &drc_hashtbl
[hash
];
420 int type
= rqstp
->rq_cachetype
;
423 rqstp
->rq_cacherep
= NULL
;
424 if (type
== RC_NOCACHE
) {
425 nfsdstats
.rcnocache
++;
429 csum
= nfsd_cache_csum(rqstp
);
432 * Since the common case is a cache miss followed by an insert,
433 * preallocate an entry.
435 rp
= nfsd_reply_cache_alloc();
436 spin_lock(&b
->cache_lock
);
438 atomic_inc(&num_drc_entries
);
439 drc_mem_usage
+= sizeof(*rp
);
442 /* go ahead and prune the cache */
445 found
= nfsd_cache_search(b
, rqstp
, csum
);
448 nfsd_reply_cache_free_locked(rp
);
454 dprintk("nfsd: unable to allocate DRC entry!\n");
458 nfsdstats
.rcmisses
++;
459 rqstp
->rq_cacherep
= rp
;
460 rp
->c_state
= RC_INPROG
;
463 rpc_copy_addr((struct sockaddr
*)&rp
->c_addr
, svc_addr(rqstp
));
464 rpc_set_port((struct sockaddr
*)&rp
->c_addr
, rpc_get_port(svc_addr(rqstp
)));
467 rp
->c_len
= rqstp
->rq_arg
.len
;
472 /* release any buffer */
473 if (rp
->c_type
== RC_REPLBUFF
) {
474 drc_mem_usage
-= rp
->c_replvec
.iov_len
;
475 kfree(rp
->c_replvec
.iov_base
);
476 rp
->c_replvec
.iov_base
= NULL
;
478 rp
->c_type
= RC_NOCACHE
;
480 spin_unlock(&b
->cache_lock
);
485 /* We found a matching entry which is either in progress or done. */
486 age
= jiffies
- rp
->c_timestamp
;
490 /* Request being processed or excessive rexmits */
491 if (rp
->c_state
== RC_INPROG
|| age
< RC_DELAY
)
494 /* From the hall of fame of impractical attacks:
495 * Is this a user who tries to snoop on the cache? */
497 if (!test_bit(RQ_SECURE
, &rqstp
->rq_flags
) && rp
->c_secure
)
500 /* Compose RPC reply header */
501 switch (rp
->c_type
) {
505 svc_putu32(&rqstp
->rq_res
.head
[0], rp
->c_replstat
);
509 if (!nfsd_cache_append(rqstp
, &rp
->c_replvec
))
510 goto out
; /* should not happen */
514 printk(KERN_WARNING
"nfsd: bad repcache type %d\n", rp
->c_type
);
515 nfsd_reply_cache_free_locked(rp
);
522 * Update a cache entry. This is called from nfsd_dispatch when
523 * the procedure has been executed and the complete reply is in
526 * We're copying around data here rather than swapping buffers because
527 * the toplevel loop requires max-sized buffers, which would be a waste
528 * of memory for a cache with a max reply size of 100 bytes (diropokres).
530 * If we should start to use different types of cache entries tailored
531 * specifically for attrstat and fh's, we may save even more space.
533 * Also note that a cachetype of RC_NOCACHE can legally be passed when
534 * nfsd failed to encode a reply that otherwise would have been cached.
535 * In this case, nfsd_cache_update is called with statp == NULL.
538 nfsd_cache_update(struct svc_rqst
*rqstp
, int cachetype
, __be32
*statp
)
540 struct svc_cacherep
*rp
= rqstp
->rq_cacherep
;
541 struct kvec
*resv
= &rqstp
->rq_res
.head
[0], *cachv
;
543 struct nfsd_drc_bucket
*b
;
550 hash
= nfsd_cache_hash(rp
->c_xid
);
551 b
= &drc_hashtbl
[hash
];
553 len
= resv
->iov_len
- ((char*)statp
- (char*)resv
->iov_base
);
556 /* Don't cache excessive amounts of data and XDR failures */
557 if (!statp
|| len
> (256 >> 2)) {
558 nfsd_reply_cache_free(b
, rp
);
565 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len
);
566 rp
->c_replstat
= *statp
;
569 cachv
= &rp
->c_replvec
;
571 cachv
->iov_base
= kmalloc(bufsize
, GFP_KERNEL
);
572 if (!cachv
->iov_base
) {
573 nfsd_reply_cache_free(b
, rp
);
576 cachv
->iov_len
= bufsize
;
577 memcpy(cachv
->iov_base
, statp
, bufsize
);
580 nfsd_reply_cache_free(b
, rp
);
583 spin_lock(&b
->cache_lock
);
584 drc_mem_usage
+= bufsize
;
586 rp
->c_secure
= test_bit(RQ_SECURE
, &rqstp
->rq_flags
);
587 rp
->c_type
= cachetype
;
588 rp
->c_state
= RC_DONE
;
589 spin_unlock(&b
->cache_lock
);
594 * Copy cached reply to current reply buffer. Should always fit.
595 * FIXME as reply is in a page, we should just attach the page, and
596 * keep a refcount....
599 nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*data
)
601 struct kvec
*vec
= &rqstp
->rq_res
.head
[0];
603 if (vec
->iov_len
+ data
->iov_len
> PAGE_SIZE
) {
604 printk(KERN_WARNING
"nfsd: cached reply too large (%Zd).\n",
608 memcpy((char*)vec
->iov_base
+ vec
->iov_len
, data
->iov_base
, data
->iov_len
);
609 vec
->iov_len
+= data
->iov_len
;
614 * Note that fields may be added, removed or reordered in the future. Programs
615 * scraping this file for info should test the labels to ensure they're
616 * getting the correct field.
618 static int nfsd_reply_cache_stats_show(struct seq_file
*m
, void *v
)
620 seq_printf(m
, "max entries: %u\n", max_drc_entries
);
621 seq_printf(m
, "num entries: %u\n",
622 atomic_read(&num_drc_entries
));
623 seq_printf(m
, "hash buckets: %u\n", 1 << maskbits
);
624 seq_printf(m
, "mem usage: %u\n", drc_mem_usage
);
625 seq_printf(m
, "cache hits: %u\n", nfsdstats
.rchits
);
626 seq_printf(m
, "cache misses: %u\n", nfsdstats
.rcmisses
);
627 seq_printf(m
, "not cached: %u\n", nfsdstats
.rcnocache
);
628 seq_printf(m
, "payload misses: %u\n", payload_misses
);
629 seq_printf(m
, "longest chain len: %u\n", longest_chain
);
630 seq_printf(m
, "cachesize at longest: %u\n", longest_chain_cachesize
);
634 int nfsd_reply_cache_stats_open(struct inode
*inode
, struct file
*file
)
636 return single_open(file
, nfsd_reply_cache_stats_show
, NULL
);