1 /* $NetBSD: bufcache.c,v 1.21 2008/01/24 17:32:58 ad Exp $ */
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
34 __RCSID("$NetBSD: bufcache.c,v 1.21 2008/01/24 17:32:58 ad Exp $");
37 #include <sys/param.h>
39 #include <sys/mount.h>
40 #include <sys/sysctl.h>
41 #include <sys/vnode.h>
43 #include <uvm/uvm_extern.h>
54 #include <miscfs/specfs/specdev.h>
59 #define VCACHE_SIZE 50
60 #define PAGEINFO_ROWS 5
64 struct vnode
*vc_addr
;
72 struct mount
*ml_addr
;
73 LIST_ENTRY(ml_entry
) ml_entries
;
74 struct mount ml_mount
;
77 static struct nlist namelist
[] = {
79 { .n_name
= "_bufmem" },
83 static struct vcache vcache
[VCACHE_SIZE
];
84 static LIST_HEAD(mount_list
, ml_entry
) mount_list
;
87 static u_int nbuf
, pgwidth
, kbwidth
;
88 static struct uvmexp_sysctl uvmexp
;
90 static void vc_init(void);
91 static void ml_init(void);
92 static struct vnode
*vc_lookup(struct vnode
*);
93 static struct mount
*ml_lookup(struct mount
*, int, int);
94 static void fetchuvmexp(void);
101 return (subwin(stdscr
, -1, 0, 5, 0));
105 closebufcache(WINDOW
*w
)
113 ml_init(); /* Clear out mount list */
121 for (i
= 0; i
<= PAGEINFO_ROWS
; i
++) {
125 mvwaddstr(wnd
, PAGEINFO_ROWS
+ 1, 0, "File System Bufs used"
126 " % kB in use % Bufsize kB % Util %");
133 int tbuf
, i
, lastrow
;
134 double tvalid
, tsize
;
137 NREAD(X_BUFMEM
, &bufmem
, sizeof(bufmem
));
140 " %*d metadata buffers using %*ld kBytes of "
142 pgwidth
, nbuf
, kbwidth
, bufmem
/ 1024,
143 ((bufmem
* 100.0) + 0.5) / getpagesize() / uvmexp
.npages
);
146 " %*" PRIu64
" pages for cached file data using %*"
147 PRIu64
" kBytes of memory (%2.0f%%).",
148 pgwidth
, uvmexp
.filepages
,
149 kbwidth
, uvmexp
.filepages
* getpagesize() / 1024,
150 (uvmexp
.filepages
* 100 + 0.5) / uvmexp
.npages
);
153 " %*" PRIu64
" pages for executables using %*"
154 PRIu64
" kBytes of memory (%2.0f%%).",
155 pgwidth
, uvmexp
.execpages
,
156 kbwidth
, uvmexp
.execpages
* getpagesize() / 1024,
157 (uvmexp
.execpages
* 100 + 0.5) / uvmexp
.npages
);
160 " %*" PRIu64
" pages for anon (non-file) data %*"
161 PRIu64
" kBytes of memory (%2.0f%%).",
162 pgwidth
, uvmexp
.anonpages
,
163 kbwidth
, uvmexp
.anonpages
* getpagesize() / 1024,
164 (uvmexp
.anonpages
* 100 + 0.5) / uvmexp
.npages
);
167 " %*" PRIu64
" free pages %*"
168 PRIu64
" kBytes of memory (%2.0f%%).",
169 pgwidth
, uvmexp
.free
,
170 kbwidth
, uvmexp
.free
* getpagesize() / 1024,
171 (uvmexp
.free
* 100 + 0.5) / uvmexp
.npages
);
174 if (nbuf
== 0 || bufmem
== 0) {
181 lastrow
= PAGEINFO_ROWS
+ 2; /* Leave room for header. */
182 for (i
= lastrow
, ml
= LIST_FIRST(&mount_list
); ml
!= NULL
;
183 i
++, ml
= LIST_NEXT(ml
, ml_entries
)) {
185 int cnt
= ml
->ml_count
;
186 double v
= ml
->ml_valid
;
187 double s
= ml
->ml_size
;
189 /* Display in window if enough room. */
190 if (i
< getmaxy(wnd
) - 2) {
191 mvwprintw(wnd
, i
, 0, "%-20.20s", ml
->ml_addr
== NULL
?
192 "NULL" : ml
->ml_mount
.mnt_stat
.f_mntonname
);
194 " %6d %3d %8ld %3.0f %8ld %3.0f %3.0f",
195 cnt
, (100 * cnt
) / nbuf
,
196 (long)(v
/1024), 100 * v
/ bufmem
,
197 (long)(s
/1024), 100 * s
/ bufmem
,
203 /* Update statistics. */
210 mvwprintw(wnd
, lastrow
+ 2, 0,
211 "%-20s %6d %3d %8ld %3.0f %8ld %3.0f %3.0f",
212 "Total:", tbuf
, (100 * tbuf
) / nbuf
,
213 (long)(tvalid
/1024), 100 * tvalid
/ bufmem
,
214 (long)(tsize
/1024), 100 * tsize
/ bufmem
,
215 tsize
!= 0 ? ((100 * tvalid
) / tsize
) : 0);
221 if (namelist
[0].n_type
== 0) {
222 if (kvm_nlist(kd
, namelist
)) {
229 pgwidth
= (int)(floor(log10((double)uvmexp
.npages
)) + 1);
230 kbwidth
= (int)(floor(log10(uvmexp
.npages
* getpagesize() / 1024.0)) +
242 /* Re-read pages used for vnodes & executables */
243 size
= sizeof(uvmexp
);
246 if (sysctl(mib
, 2, &uvmexp
, &size
, NULL
, 0) < 0) {
247 error("can't get uvmexp: %s\n", strerror(errno
));
248 memset(&uvmexp
, 0, sizeof(uvmexp
));
256 struct buf_sysctl
*bp
, *buffers
;
264 /* Re-read pages used for vnodes & executables */
267 /* Initialise vnode cache and mount list. */
271 /* Get metadata buffers */
276 mib
[2] = KERN_BUF_ALL
;
277 mib
[3] = KERN_BUF_ALL
;
278 mib
[4] = (int)sizeof(struct buf_sysctl
);
279 mib
[5] = INT_MAX
; /* we want them all */
281 if (sysctl(mib
, 6, NULL
, &size
, NULL
, 0) < 0) {
282 error("can't get buffers size: %s\n", strerror(errno
));
288 size
+= extraslop
* sizeof(struct buf_sysctl
);
289 buffers
= malloc(size
);
290 if (buffers
== NULL
) {
291 error("can't allocate buffers: %s\n", strerror(errno
));
294 if (sysctl(mib
, 6, buffers
, &size
, NULL
, 0) < 0) {
296 if (extraslop
== 0) {
300 error("can't get buffers: %s\n", strerror(errno
));
304 nbuf
= size
/ sizeof(struct buf_sysctl
);
305 for (bp
= buffers
; bp
< buffers
+ nbuf
; bp
++) {
306 if (UINT64TOPTR(bp
->b_vp
) != NULL
) {
308 vn
= vc_lookup(UINT64TOPTR(bp
->b_vp
));
314 * References to mounted-on vnodes should be
315 * counted towards the mounted filesystem.
317 if (vn
->v_type
== VBLK
&& vn
->v_specnode
!= NULL
) {
320 if (!KREAD(vn
->v_specnode
, &sn
, sizeof(sn
)))
322 if (!KREAD(sn
.sn_dev
, &sd
, sizeof(sd
)))
324 if (sd
.sd_mountpoint
)
325 mp
= sd
.sd_mountpoint
;
334 /* simple sort - there's not that many entries */
336 if ((ml
= LIST_FIRST(&mount_list
)) == NULL
||
337 LIST_NEXT(ml
, ml_entries
) == NULL
)
341 for (ml
= LIST_FIRST(&mount_list
); ml
!= NULL
;
342 ml
= LIST_NEXT(ml
, ml_entries
)) {
343 if (LIST_NEXT(ml
, ml_entries
) == NULL
)
345 if (ml
->ml_count
< LIST_NEXT(ml
, ml_entries
)->ml_count
) {
346 ml
= LIST_NEXT(ml
, ml_entries
);
347 LIST_REMOVE(ml
, ml_entries
);
348 LIST_INSERT_HEAD(&mount_list
, ml
, ml_entries
);
352 } while (count
!= 0);
362 /* vc_addr == NULL for unused cache entry. */
363 for (i
= 0; i
< VCACHE_SIZE
; i
++)
364 vcache
[i
].vc_addr
= NULL
;
372 /* Throw out the current mount list and start again. */
373 while ((ml
= LIST_FIRST(&mount_list
)) != NULL
) {
374 LIST_REMOVE(ml
, ml_entries
);
380 static struct vnode
*
381 vc_lookup(struct vnode
*vaddr
)
388 for (i
= 0; i
< VCACHE_SIZE
; i
++) {
389 if (vcache
[i
].vc_addr
== NULL
)
392 if (vcache
[i
].vc_age
< vcache
[oldest
].vc_age
)
394 if (vcache
[i
].vc_addr
== vaddr
) {
395 vcache
[i
].vc_age
= 0;
396 ret
= &vcache
[i
].vc_node
;
400 /* Find an entry in the cache? */
404 /* Go past the end of the cache? */
405 if (i
>= VCACHE_SIZE
)
408 /* Read in new vnode and reset age counter. */
409 if (KREAD(vaddr
, &vcache
[i
].vc_node
, sizeof(struct vnode
)) == 0)
411 vcache
[i
].vc_addr
= vaddr
;
412 vcache
[i
].vc_age
= 0;
414 return(&vcache
[i
].vc_node
);
417 static struct mount
*
418 ml_lookup(struct mount
*maddr
, int size
, int valid
)
422 for (ml
= LIST_FIRST(&mount_list
); ml
!= NULL
;
423 ml
= LIST_NEXT(ml
, ml_entries
))
424 if (ml
->ml_addr
== maddr
) {
427 ml
->ml_valid
+= valid
;
428 if (ml
->ml_addr
== NULL
)
431 return(&ml
->ml_mount
);
434 if ((ml
= malloc(sizeof(struct ml_entry
))) == NULL
) {
435 error("out of memory");
438 LIST_INSERT_HEAD(&mount_list
, ml
, ml_entries
);
441 ml
->ml_valid
= valid
;
446 KREAD(maddr
, &ml
->ml_mount
, sizeof(struct mount
));
447 return(&ml
->ml_mount
);