1 /* $NetBSD: nfs_srvcache.c,v 1.44 2009/03/14 15:36:24 dsl Exp $ */
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)nfs_srvcache.c 8.3 (Berkeley) 3/30/95
38 * Reference: Chet Juszczak, "Improving the Performance and Correctness
39 * of an NFS Server", in Proc. Winter 1989 USENIX Conference,
40 * pages 53-63. San Diego, February 1989.
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: nfs_srvcache.c,v 1.44 2009/03/14 15:36:24 dsl Exp $");
46 #include <sys/param.h>
47 #include <sys/vnode.h>
48 #include <sys/condvar.h>
49 #include <sys/mount.h>
50 #include <sys/kernel.h>
51 #include <sys/systm.h>
56 #include <sys/mutex.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
60 #include <netinet/in.h>
61 #include <nfs/nfsm_subs.h>
62 #include <nfs/rpcv2.h>
63 #include <nfs/nfsproto.h>
65 #include <nfs/nfsrvcache.h>
66 #include <nfs/nfs_var.h>
68 extern struct nfsstats nfsstats
;
69 extern const int nfsv2_procid
[NFS_NPROCS
];
70 long numnfsrvcache
, desirednfsrvcache
= NFSRVCACHESIZ
;
71 struct pool nfs_reqcache_pool
;
73 #define NFSRCHASH(xid) \
74 (&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash])
75 LIST_HEAD(nfsrvhash
, nfsrvcache
) *nfsrvhashtbl
;
76 TAILQ_HEAD(nfsrvlru
, nfsrvcache
) nfsrvlruhead
;
77 kmutex_t nfsrv_reqcache_lock
;
80 #if defined(MBUFTRACE)
81 static struct mowner nfsd_cache_mowner
= MOWNER_INIT("nfsd", "cache");
82 #endif /* defined(MBUFTRACE) */
84 #define NETFAMILY(rp) \
85 (((rp)->rc_flags & RC_INETADDR) ? AF_INET : -1)
87 static struct nfsrvcache
*nfsrv_lookupcache(struct nfsrv_descript
*nd
);
88 static void nfsrv_unlockcache(struct nfsrvcache
*rp
);
91 * Static array that defines which nfs rpc's are nonidempotent
93 const int nonidempotent
[NFS_NPROCS
] = {
111 false, /* READDIRPLUS */
114 false, /* PATHCONF */
119 /* True iff the rpc reply is an nfs status ONLY! */
120 static const int nfsv2_repstat
[NFS_NPROCS
] = {
126 false, /* READLINK */
128 false, /* Obsolete WRITECACHE */
142 cleanentry(struct nfsrvcache
*rp
)
145 if ((rp
->rc_flags
& RC_REPMBUF
) != 0) {
146 m_freem(rp
->rc_reply
);
148 if ((rp
->rc_flags
& RC_NAM
) != 0) {
151 rp
->rc_flags
&= ~(RC_REPSTATUS
|RC_REPMBUF
);
155 * Initialize the server request cache list
158 nfsrv_initcache(void)
161 mutex_init(&nfsrv_reqcache_lock
, MUTEX_DEFAULT
, IPL_NONE
);
162 nfsrvhashtbl
= hashinit(desirednfsrvcache
, HASH_LIST
, true,
164 TAILQ_INIT(&nfsrvlruhead
);
165 pool_init(&nfs_reqcache_pool
, sizeof(struct nfsrvcache
), 0, 0, 0,
166 "nfsreqcachepl", &pool_allocator_nointr
, IPL_NONE
);
167 MOWNER_ATTACH(&nfsd_cache_mowner
);
171 nfsrv_finicache(void)
175 KASSERT(TAILQ_EMPTY(&nfsrvlruhead
));
176 pool_destroy(&nfs_reqcache_pool
);
177 hashdone(nfsrvhashtbl
, HASH_LIST
, nfsrvhash
);
178 MOWNER_DETACH(&nfsd_cache_mowner
);
179 mutex_destroy(&nfsrv_reqcache_lock
);
183 * Lookup a cache and lock it
185 static struct nfsrvcache
*
186 nfsrv_lookupcache(struct nfsrv_descript
*nd
)
188 struct nfsrvcache
*rp
;
190 KASSERT(mutex_owned(&nfsrv_reqcache_lock
));
193 LIST_FOREACH(rp
, NFSRCHASH(nd
->nd_retxid
), rc_hash
) {
194 if (nd
->nd_retxid
== rp
->rc_xid
&&
195 nd
->nd_procnum
== rp
->rc_proc
&&
196 netaddr_match(NETFAMILY(rp
), &rp
->rc_haddr
, nd
->nd_nam
)) {
197 if ((rp
->rc_gflags
& RC_G_LOCKED
) != 0) {
198 cv_wait(&rp
->rc_cv
, &nfsrv_reqcache_lock
);
201 rp
->rc_gflags
|= RC_G_LOCKED
;
213 nfsrv_unlockcache(struct nfsrvcache
*rp
)
216 KASSERT(mutex_owned(&nfsrv_reqcache_lock
));
218 KASSERT((rp
->rc_gflags
& RC_G_LOCKED
) != 0);
219 rp
->rc_gflags
&= ~RC_G_LOCKED
;
220 cv_broadcast(&rp
->rc_cv
);
224 * Look for the request in the cache
226 * return action and optionally reply
228 * insert it in the cache
230 * The rules are as follows:
231 * - if in progress, return DROP request
232 * - if completed within DELAY of the current time, return DROP it
233 * - if completed a longer time ago return REPLY if the reply was cached or
235 * Update/add new request at end of lru list
238 nfsrv_getcache(struct nfsrv_descript
*nd
, struct nfssvc_sock
*slp
, struct mbuf
**repp
)
240 struct nfsrvcache
*rp
, *rpdup
;
242 struct sockaddr_in
*saddr
;
246 mutex_enter(&nfsrv_reqcache_lock
);
247 rp
= nfsrv_lookupcache(nd
);
249 mutex_exit(&nfsrv_reqcache_lock
);
251 /* If not at end of LRU chain, move it there */
252 if (TAILQ_NEXT(rp
, rc_lru
)) { /* racy but ok */
253 mutex_enter(&nfsrv_reqcache_lock
);
254 TAILQ_REMOVE(&nfsrvlruhead
, rp
, rc_lru
);
255 TAILQ_INSERT_TAIL(&nfsrvlruhead
, rp
, rc_lru
);
256 mutex_exit(&nfsrv_reqcache_lock
);
258 if (rp
->rc_state
== RC_UNUSED
)
259 panic("nfsrv cache");
260 if (rp
->rc_state
== RC_INPROG
) {
261 nfsstats
.srvcache_inproghits
++;
263 } else if (rp
->rc_flags
& RC_REPSTATUS
) {
264 nfsstats
.srvcache_nonidemdonehits
++;
265 nfs_rephead(0, nd
, slp
, rp
->rc_status
,
266 0, (u_quad_t
*)0, repp
, &mb
, &bpos
);
268 } else if (rp
->rc_flags
& RC_REPMBUF
) {
269 nfsstats
.srvcache_nonidemdonehits
++;
270 *repp
= m_copym(rp
->rc_reply
, 0, M_COPYALL
,
274 nfsstats
.srvcache_idemdonehits
++;
275 rp
->rc_state
= RC_INPROG
;
278 mutex_enter(&nfsrv_reqcache_lock
);
279 nfsrv_unlockcache(rp
);
280 mutex_exit(&nfsrv_reqcache_lock
);
283 nfsstats
.srvcache_misses
++;
284 if (numnfsrvcache
< desirednfsrvcache
) {
286 mutex_exit(&nfsrv_reqcache_lock
);
287 rp
= pool_get(&nfs_reqcache_pool
, PR_WAITOK
);
288 memset(rp
, 0, sizeof *rp
);
289 cv_init(&rp
->rc_cv
, "nfsdrc");
290 rp
->rc_gflags
= RC_G_LOCKED
;
292 rp
= TAILQ_FIRST(&nfsrvlruhead
);
293 while ((rp
->rc_gflags
& RC_G_LOCKED
) != 0) {
294 cv_wait(&rp
->rc_cv
, &nfsrv_reqcache_lock
);
295 rp
= TAILQ_FIRST(&nfsrvlruhead
);
297 rp
->rc_gflags
|= RC_G_LOCKED
;
298 LIST_REMOVE(rp
, rc_hash
);
299 TAILQ_REMOVE(&nfsrvlruhead
, rp
, rc_lru
);
300 mutex_exit(&nfsrv_reqcache_lock
);
304 rp
->rc_state
= RC_INPROG
;
305 rp
->rc_xid
= nd
->nd_retxid
;
306 saddr
= mtod(nd
->nd_nam
, struct sockaddr_in
*);
307 switch (saddr
->sin_family
) {
309 rp
->rc_flags
|= RC_INETADDR
;
310 rp
->rc_inetaddr
= saddr
->sin_addr
.s_addr
;
313 rp
->rc_flags
|= RC_NAM
;
314 rp
->rc_nam
= m_copym(nd
->nd_nam
, 0, M_COPYALL
, M_WAIT
);
315 m_claimm(rp
->rc_nam
, &nfsd_cache_mowner
);
318 rp
->rc_proc
= nd
->nd_procnum
;
319 mutex_enter(&nfsrv_reqcache_lock
);
320 rpdup
= nfsrv_lookupcache(nd
);
323 * other thread made duplicate cache entry.
325 KASSERT(numnfsrvcache
> 0);
327 mutex_exit(&nfsrv_reqcache_lock
);
329 cv_destroy(&rp
->rc_cv
);
330 pool_put(&nfs_reqcache_pool
, rp
);
334 TAILQ_INSERT_TAIL(&nfsrvlruhead
, rp
, rc_lru
);
335 LIST_INSERT_HEAD(NFSRCHASH(nd
->nd_retxid
), rp
, rc_hash
);
336 nfsrv_unlockcache(rp
);
337 mutex_exit(&nfsrv_reqcache_lock
);
342 * Update a request cache entry after the rpc has been done
345 nfsrv_updatecache(struct nfsrv_descript
*nd
, int repvalid
, struct mbuf
*repmbuf
)
347 struct nfsrvcache
*rp
;
349 mutex_enter(&nfsrv_reqcache_lock
);
350 rp
= nfsrv_lookupcache(nd
);
351 mutex_exit(&nfsrv_reqcache_lock
);
354 rp
->rc_state
= RC_DONE
;
356 * If we have a valid reply update status and save
357 * the reply for non-idempotent rpc's.
359 if (repvalid
&& nonidempotent
[nd
->nd_procnum
]) {
360 if ((nd
->nd_flag
& ND_NFSV3
) == 0 &&
361 nfsv2_repstat
[nfsv2_procid
[nd
->nd_procnum
]]) {
362 rp
->rc_status
= nd
->nd_repstat
;
363 rp
->rc_flags
|= RC_REPSTATUS
;
365 rp
->rc_reply
= m_copym(repmbuf
,
366 0, M_COPYALL
, M_WAIT
);
367 m_claimm(rp
->rc_reply
, &nfsd_cache_mowner
);
368 rp
->rc_flags
|= RC_REPMBUF
;
371 mutex_enter(&nfsrv_reqcache_lock
);
372 nfsrv_unlockcache(rp
);
373 mutex_exit(&nfsrv_reqcache_lock
);
378 * Clean out the cache. Called when the last nfsd terminates.
381 nfsrv_cleancache(void)
383 struct nfsrvcache
*rp
;
385 mutex_enter(&nfsrv_reqcache_lock
);
386 while ((rp
= TAILQ_FIRST(&nfsrvlruhead
)) != NULL
) {
387 KASSERT((rp
->rc_gflags
& RC_G_LOCKED
) == 0);
388 LIST_REMOVE(rp
, rc_hash
);
389 TAILQ_REMOVE(&nfsrvlruhead
, rp
, rc_lru
);
390 KASSERT(numnfsrvcache
> 0);
392 mutex_exit(&nfsrv_reqcache_lock
);
394 cv_destroy(&rp
->rc_cv
);
395 pool_put(&nfs_reqcache_pool
, rp
);
396 mutex_enter(&nfsrv_reqcache_lock
);
398 KASSERT(numnfsrvcache
== 0);
399 mutex_exit(&nfsrv_reqcache_lock
);