2 * Copyright (c) 2001 Dag-Erling Coïdan Smørgrav
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer
10 * in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include "opt_pseudofs.h"
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/eventhandler.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
43 #include <sys/vnode.h>
45 #include <fs/pseudofs/pseudofs.h>
46 #include <fs/pseudofs/pseudofs_internal.h>
48 static MALLOC_DEFINE(M_PFSVNCACHE
, "pfs_vncache", "pseudofs vnode cache");
50 static struct mtx pfs_vncache_mutex
;
51 static struct pfs_vdata
*pfs_vncache
;
52 static eventhandler_tag pfs_exit_tag
;
53 static void pfs_exit(void *arg
, struct proc
*p
);
55 SYSCTL_NODE(_vfs_pfs
, OID_AUTO
, vncache
, CTLFLAG_RW
, 0,
56 "pseudofs vnode cache");
58 static int pfs_vncache_entries
;
59 SYSCTL_INT(_vfs_pfs_vncache
, OID_AUTO
, entries
, CTLFLAG_RD
,
60 &pfs_vncache_entries
, 0,
61 "number of entries in the vnode cache");
63 static int pfs_vncache_maxentries
;
64 SYSCTL_INT(_vfs_pfs_vncache
, OID_AUTO
, maxentries
, CTLFLAG_RD
,
65 &pfs_vncache_maxentries
, 0,
66 "highest number of entries in the vnode cache");
68 static int pfs_vncache_hits
;
69 SYSCTL_INT(_vfs_pfs_vncache
, OID_AUTO
, hits
, CTLFLAG_RD
,
71 "number of cache hits since initialization");
73 static int pfs_vncache_misses
;
74 SYSCTL_INT(_vfs_pfs_vncache
, OID_AUTO
, misses
, CTLFLAG_RD
,
75 &pfs_vncache_misses
, 0,
76 "number of cache misses since initialization");
78 extern struct vop_vector pfs_vnodeops
; /* XXX -> .h file */
81 * Initialize vnode cache
84 pfs_vncache_load(void)
87 mtx_assert(&Giant
, MA_OWNED
);
88 mtx_init(&pfs_vncache_mutex
, "pfs_vncache", NULL
, MTX_DEF
);
89 pfs_exit_tag
= EVENTHANDLER_REGISTER(process_exit
, pfs_exit
, NULL
,
90 EVENTHANDLER_PRI_ANY
);
94 * Tear down vnode cache
97 pfs_vncache_unload(void)
100 mtx_assert(&Giant
, MA_OWNED
);
101 EVENTHANDLER_DEREGISTER(process_exit
, pfs_exit_tag
);
102 KASSERT(pfs_vncache_entries
== 0,
103 ("%d vncache entries remaining", pfs_vncache_entries
));
104 mtx_destroy(&pfs_vncache_mutex
);
111 pfs_vncache_alloc(struct mount
*mp
, struct vnode
**vpp
,
112 struct pfs_node
*pn
, pid_t pid
)
114 struct pfs_vdata
*pvd
;
119 * See if the vnode is in the cache.
120 * XXX linear search is not very efficient.
123 mtx_lock(&pfs_vncache_mutex
);
124 for (pvd
= pfs_vncache
; pvd
; pvd
= pvd
->pvd_next
) {
125 if (pvd
->pvd_pn
== pn
&& pvd
->pvd_pid
== pid
&&
126 pvd
->pvd_vnode
->v_mount
== mp
) {
129 mtx_unlock(&pfs_vncache_mutex
);
130 if (vget(vp
, LK_EXCLUSIVE
| LK_INTERLOCK
, curthread
) == 0) {
134 * Some callers cache_enter(vp) later, so
135 * we have to make sure it's not in the
136 * VFS cache so it doesn't get entered
137 * twice. A better solution would be to
138 * make pfs_vncache_alloc() responsible
139 * for entering the vnode in the VFS
148 mtx_unlock(&pfs_vncache_mutex
);
149 ++pfs_vncache_misses
;
151 /* nope, get a new one */
152 MALLOC(pvd
, struct pfs_vdata
*, sizeof *pvd
, M_PFSVNCACHE
, M_WAITOK
);
153 mtx_lock(&pfs_vncache_mutex
);
154 if (++pfs_vncache_entries
> pfs_vncache_maxentries
)
155 pfs_vncache_maxentries
= pfs_vncache_entries
;
156 mtx_unlock(&pfs_vncache_mutex
);
157 error
= getnewvnode("pseudofs", mp
, &pfs_vnodeops
, vpp
);
159 mtx_lock(&pfs_vncache_mutex
);
160 --pfs_vncache_entries
;
161 mtx_unlock(&pfs_vncache_mutex
);
162 FREE(pvd
, M_PFSVNCACHE
);
167 (*vpp
)->v_data
= pvd
;
168 switch (pn
->pn_type
) {
170 (*vpp
)->v_vflag
= VV_ROOT
;
172 printf("root vnode allocated\n");
178 case pfstype_procdir
:
179 (*vpp
)->v_type
= VDIR
;
182 (*vpp
)->v_type
= VREG
;
184 case pfstype_symlink
:
185 (*vpp
)->v_type
= VLNK
;
188 KASSERT(0, ("pfs_vncache_alloc called for null node\n"));
190 panic("%s has unexpected type: %d", pn
->pn_name
, pn
->pn_type
);
193 * Propagate flag through to vnode so users know it can change
194 * if the process changes (i.e. execve)
196 if ((pn
->pn_flags
& PFS_PROCDEP
) != 0)
197 (*vpp
)->v_vflag
|= VV_PROCDEP
;
198 pvd
->pvd_vnode
= *vpp
;
200 vn_lock(*vpp
, LK_EXCLUSIVE
| LK_RETRY
);
201 error
= insmntque(*vpp
, mp
);
203 mtx_lock(&pfs_vncache_mutex
);
204 --pfs_vncache_entries
;
205 mtx_unlock(&pfs_vncache_mutex
);
206 FREE(pvd
, M_PFSVNCACHE
);
210 mtx_lock(&pfs_vncache_mutex
);
211 pvd
->pvd_prev
= NULL
;
212 pvd
->pvd_next
= pfs_vncache
;
214 pvd
->pvd_next
->pvd_prev
= pvd
;
216 mtx_unlock(&pfs_vncache_mutex
);
224 pfs_vncache_free(struct vnode
*vp
)
226 struct pfs_vdata
*pvd
;
228 mtx_lock(&pfs_vncache_mutex
);
229 pvd
= (struct pfs_vdata
*)vp
->v_data
;
230 KASSERT(pvd
!= NULL
, ("pfs_vncache_free(): no vnode data\n"));
232 pvd
->pvd_next
->pvd_prev
= pvd
->pvd_prev
;
234 pvd
->pvd_prev
->pvd_next
= pvd
->pvd_next
;
236 pfs_vncache
= pvd
->pvd_next
;
237 --pfs_vncache_entries
;
238 mtx_unlock(&pfs_vncache_mutex
);
240 FREE(pvd
, M_PFSVNCACHE
);
246 * Purge the cache of dead entries
248 * This is extremely inefficient due to the fact that vgone() not only
249 * indirectly modifies the vnode cache, but may also sleep. We can
250 * neither hold pfs_vncache_mutex across a vgone() call, nor make any
251 * assumptions about the state of the cache after vgone() returns. In
252 * consequence, we must start over after every vgone() call, and keep
253 * trying until we manage to traverse the entire cache.
255 * The only way to improve this situation is to change the data structure
256 * used to implement the cache.
259 pfs_purge(struct pfs_node
*pn
)
261 struct pfs_vdata
*pvd
;
264 mtx_lock(&pfs_vncache_mutex
);
266 while (pvd
!= NULL
) {
267 if (pvd
->pvd_dead
|| (pn
!= NULL
&& pvd
->pvd_pn
== pn
)) {
268 vnp
= pvd
->pvd_vnode
;
270 mtx_unlock(&pfs_vncache_mutex
);
271 VOP_LOCK(vnp
, LK_EXCLUSIVE
);
275 mtx_lock(&pfs_vncache_mutex
);
281 mtx_unlock(&pfs_vncache_mutex
);
285 * Free all vnodes associated with a defunct process
287 * XXXRW: It is unfortunate that pfs_exit() always acquires and releases two
288 * mutexes (one of which is Giant) for every process exit, even if procfs
292 pfs_exit(void *arg
, struct proc
*p
)
294 struct pfs_vdata
*pvd
;
297 if (pfs_vncache
== NULL
)
300 mtx_lock(&pfs_vncache_mutex
);
301 for (pvd
= pfs_vncache
, dead
= 0; pvd
!= NULL
; pvd
= pvd
->pvd_next
)
302 if (pvd
->pvd_pid
== p
->p_pid
)
303 dead
= pvd
->pvd_dead
= 1;
304 mtx_unlock(&pfs_vncache_mutex
);