No empty .Rs/.Re
[netbsd-mini2440.git] / sys / kern / subr_prof.c
blob58abae2e9f9bd96dc8251d7a723b685ac812c129
1 /* $NetBSD: subr_prof.c,v 1.44 2009/12/12 17:48:54 dsl Exp $ */
3 /*-
4 * Copyright (c) 1982, 1986, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * @(#)subr_prof.c 8.4 (Berkeley) 2/14/95
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: subr_prof.c,v 1.44 2009/12/12 17:48:54 dsl Exp $");
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/mount.h>
42 #include <sys/syscallargs.h>
43 #include <sys/sysctl.h>
45 #include <sys/cpu.h>
47 #ifdef GPROF
48 #include <sys/malloc.h>
49 #include <sys/gmon.h>
51 MALLOC_DEFINE(M_GPROF, "gprof", "kernel profiling buffer");
54 * Froms is actually a bunch of unsigned shorts indexing tos
56 struct gmonparam _gmonparam = { .state = GMON_PROF_OFF };
58 /* Actual start of the kernel text segment. */
59 extern char kernel_text[];
61 extern char etext[];
64 void
65 kmstartup(void)
67 char *cp;
68 struct gmonparam *p = &_gmonparam;
70 * Round lowpc and highpc to multiples of the density we're using
71 * so the rest of the scaling (here and in gprof) stays in ints.
73 p->lowpc = rounddown(((u_long)kernel_text),
74 HISTFRACTION * sizeof(HISTCOUNTER));
75 p->highpc = roundup((u_long)etext,
76 HISTFRACTION * sizeof(HISTCOUNTER));
77 p->textsize = p->highpc - p->lowpc;
78 printf("Profiling kernel, textsize=%ld [%lx..%lx]\n",
79 p->textsize, p->lowpc, p->highpc);
80 p->kcountsize = p->textsize / HISTFRACTION;
81 p->hashfraction = HASHFRACTION;
82 p->fromssize = p->textsize / HASHFRACTION;
83 p->tolimit = p->textsize * ARCDENSITY / 100;
84 if (p->tolimit < MINARCS)
85 p->tolimit = MINARCS;
86 else if (p->tolimit > MAXARCS)
87 p->tolimit = MAXARCS;
88 p->tossize = p->tolimit * sizeof(struct tostruct);
89 cp = (char *)malloc(p->kcountsize + p->fromssize + p->tossize,
90 M_GPROF, M_NOWAIT | M_ZERO);
91 if (cp == 0) {
92 printf("No memory for profiling.\n");
93 return;
95 p->tos = (struct tostruct *)cp;
96 cp += p->tossize;
97 p->kcount = (u_short *)cp;
98 cp += p->kcountsize;
99 p->froms = (u_short *)cp;
103 * Return kernel profiling information.
106 * sysctl helper routine for kern.profiling subtree. enables/disables
107 * kernel profiling and gives out copies of the profiling data.
109 static int
110 sysctl_kern_profiling(SYSCTLFN_ARGS)
112 struct gmonparam *gp = &_gmonparam;
113 int error;
114 struct sysctlnode node;
116 node = *rnode;
118 switch (node.sysctl_num) {
119 case GPROF_STATE:
120 node.sysctl_data = &gp->state;
121 break;
122 case GPROF_COUNT:
123 node.sysctl_data = gp->kcount;
124 node.sysctl_size = gp->kcountsize;
125 break;
126 case GPROF_FROMS:
127 node.sysctl_data = gp->froms;
128 node.sysctl_size = gp->fromssize;
129 break;
130 case GPROF_TOS:
131 node.sysctl_data = gp->tos;
132 node.sysctl_size = gp->tossize;
133 break;
134 case GPROF_GMONPARAM:
135 node.sysctl_data = gp;
136 node.sysctl_size = sizeof(*gp);
137 break;
138 default:
139 return (EOPNOTSUPP);
142 error = sysctl_lookup(SYSCTLFN_CALL(&node));
143 if (error || newp == NULL)
144 return (error);
146 if (node.sysctl_num == GPROF_STATE) {
147 mutex_spin_enter(&proc0.p_stmutex);
148 if (gp->state == GMON_PROF_OFF)
149 stopprofclock(&proc0);
150 else
151 startprofclock(&proc0);
152 mutex_spin_exit(&proc0.p_stmutex);
155 return (0);
158 SYSCTL_SETUP(sysctl_kern_gprof_setup, "sysctl kern.profiling subtree setup")
161 sysctl_createv(clog, 0, NULL, NULL,
162 CTLFLAG_PERMANENT,
163 CTLTYPE_NODE, "kern", NULL,
164 NULL, 0, NULL, 0,
165 CTL_KERN, CTL_EOL);
166 sysctl_createv(clog, 0, NULL, NULL,
167 CTLFLAG_PERMANENT,
168 CTLTYPE_NODE, "profiling",
169 SYSCTL_DESCR("Profiling information (available)"),
170 NULL, 0, NULL, 0,
171 CTL_KERN, KERN_PROF, CTL_EOL);
173 sysctl_createv(clog, 0, NULL, NULL,
174 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
175 CTLTYPE_INT, "state",
176 SYSCTL_DESCR("Profiling state"),
177 sysctl_kern_profiling, 0, NULL, 0,
178 CTL_KERN, KERN_PROF, GPROF_STATE, CTL_EOL);
179 sysctl_createv(clog, 0, NULL, NULL,
180 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
181 CTLTYPE_STRUCT, "count",
182 SYSCTL_DESCR("Array of statistical program counters"),
183 sysctl_kern_profiling, 0, NULL, 0,
184 CTL_KERN, KERN_PROF, GPROF_COUNT, CTL_EOL);
185 sysctl_createv(clog, 0, NULL, NULL,
186 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
187 CTLTYPE_STRUCT, "froms",
188 SYSCTL_DESCR("Array indexed by program counter of "
189 "call-from points"),
190 sysctl_kern_profiling, 0, NULL, 0,
191 CTL_KERN, KERN_PROF, GPROF_FROMS, CTL_EOL);
192 sysctl_createv(clog, 0, NULL, NULL,
193 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
194 CTLTYPE_STRUCT, "tos",
195 SYSCTL_DESCR("Array of structures describing "
196 "destination of calls and their counts"),
197 sysctl_kern_profiling, 0, NULL, 0,
198 CTL_KERN, KERN_PROF, GPROF_TOS, CTL_EOL);
199 sysctl_createv(clog, 0, NULL, NULL,
200 CTLFLAG_PERMANENT,
201 CTLTYPE_STRUCT, "gmonparam",
202 SYSCTL_DESCR("Structure giving the sizes of the above "
203 "arrays"),
204 sysctl_kern_profiling, 0, NULL, 0,
205 CTL_KERN, KERN_PROF, GPROF_GMONPARAM, CTL_EOL);
207 #endif /* GPROF */
210 * Profiling system call.
212 * The scale factor is a fixed point number with 16 bits of fraction, so that
213 * 1.0 is represented as 0x10000. A scale factor of 0 turns off profiling.
215 /* ARGSUSED */
217 sys_profil(struct lwp *l, const struct sys_profil_args *uap, register_t *retval)
219 /* {
220 syscallarg(char *) samples;
221 syscallarg(size_t) size;
222 syscallarg(u_long) offset;
223 syscallarg(u_int) scale;
224 } */
225 struct proc *p = l->l_proc;
226 struct uprof *upp;
228 if (SCARG(uap, scale) > (1 << 16))
229 return (EINVAL);
230 if (SCARG(uap, scale) == 0) {
231 mutex_spin_enter(&p->p_stmutex);
232 stopprofclock(p);
233 mutex_spin_exit(&p->p_stmutex);
234 return (0);
236 upp = &p->p_stats->p_prof;
238 /* Block profile interrupts while changing state. */
239 mutex_spin_enter(&p->p_stmutex);
240 upp->pr_off = SCARG(uap, offset);
241 upp->pr_scale = SCARG(uap, scale);
242 upp->pr_base = SCARG(uap, samples);
243 upp->pr_size = SCARG(uap, size);
244 startprofclock(p);
245 mutex_spin_exit(&p->p_stmutex);
247 return (0);
251 * Scale is a fixed-point number with the binary point 16 bits
252 * into the value, and is <= 1.0. pc is at most 32 bits, so the
253 * intermediate result is at most 48 bits.
255 #define PC_TO_INDEX(pc, prof) \
256 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
257 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
260 * Collect user-level profiling statistics; called on a profiling tick,
261 * when a process is running in user-mode. This routine may be called
262 * from an interrupt context. We try to update the user profiling buffers
263 * cheaply with fuswintr() and suswintr(). If that fails, we revert to
264 * an AST that will vector us to trap() with a context in which copyin
265 * and copyout will work. Trap will then call addupc_task().
267 * Note that we may (rarely) not get around to the AST soon enough, and
268 * lose profile ticks when the next tick overwrites this one, but in this
269 * case the system is overloaded and the profile is probably already
270 * inaccurate.
272 void
273 addupc_intr(struct lwp *l, u_long pc)
275 struct uprof *prof;
276 struct proc *p;
277 void *addr;
278 u_int i;
279 int v;
281 p = l->l_proc;
283 KASSERT(mutex_owned(&p->p_stmutex));
285 prof = &p->p_stats->p_prof;
286 if (pc < prof->pr_off ||
287 (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
288 return; /* out of range; ignore */
290 addr = prof->pr_base + i;
291 mutex_spin_exit(&p->p_stmutex);
292 if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + 1) == -1) {
293 /* XXXSMP */
294 prof->pr_addr = pc;
295 prof->pr_ticks++;
296 cpu_need_proftick(l);
298 mutex_spin_enter(&p->p_stmutex);
302 * Much like before, but we can afford to take faults here. If the
303 * update fails, we simply turn off profiling.
305 void
306 addupc_task(struct lwp *l, u_long pc, u_int ticks)
308 struct uprof *prof;
309 struct proc *p;
310 void *addr;
311 int error;
312 u_int i;
313 u_short v;
315 p = l->l_proc;
317 if (ticks == 0)
318 return;
320 mutex_spin_enter(&p->p_stmutex);
321 prof = &p->p_stats->p_prof;
323 /* Testing P_PROFIL may be unnecessary, but is certainly safe. */
324 if ((p->p_stflag & PST_PROFIL) == 0 || pc < prof->pr_off ||
325 (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
326 mutex_spin_exit(&p->p_stmutex);
327 return;
330 addr = prof->pr_base + i;
331 mutex_spin_exit(&p->p_stmutex);
332 if ((error = copyin(addr, (void *)&v, sizeof(v))) == 0) {
333 v += ticks;
334 error = copyout((void *)&v, addr, sizeof(v));
336 if (error != 0) {
337 mutex_spin_enter(&p->p_stmutex);
338 stopprofclock(p);
339 mutex_spin_exit(&p->p_stmutex);