4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 /* Copyright (c) 1988 AT&T */
28 /* All Rights Reserved */
31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 * The Regents of the University of California
35 * University Acknowledgment- Portions of this document are derived from
36 * software developed by the University of California, Berkeley, and its
40 #pragma ident "%Z%%M% %I% %E% SMI"
43 * Environment variable PROFDIR added such that:
44 * If PROFDIR doesn't exist, "mon.out" is produced as before.
45 * If PROFDIR = NULL, no profiling output is produced.
46 * If PROFDIR = string, "string/pid.progname" is produced,
47 * where name consists of argv[0] suitably massaged.
51 * (global) monitor init, cleanup for prof(1)iling
52 * (global) _mcount function call counter
53 * (global) _mcount_newent call count entry manager
54 * (static) _mnewblock call count block allocator
57 * Monitor(), coordinating with mcount(), mcount_newent() and mnewblock(),
58 * maintains a series of one or more blocks of prof-profiling
59 * information. These blocks are added in response to calls to
60 * monitor() (explicitly or via mcrt[01]'s _start) and, via mcount()'s
61 * calls to mcount_newent() thence to mnewblock().
62 * The blocks are tracked via a linked list of block anchors,
63 * which each point to a block.
66 * An anchor points forward, backward and 'down' (to a block).
67 * A block has the profiling information, and consists of
68 * three regions: a header, a function call count array region,
69 * and an optional execution histogram region, as illustrated below.
74 * prior<--| |-->next anchor
84 * + fcn call + // data collected by mcount
90 * + execution + // data collected by system call,
91 * + profile + // profil(2) (assumed ALWAYS specified
92 * + histogram + // by monitor()-caller, even if small;
93 * + + // never specified by mnewblock()).
96 * The first time monitor() is called, it sets up the chain
97 * by allocating an anchor and initializing countbase and countlimit
98 * to zero. Everyone assumes that they start out zeroed.
100 * When a user (or _start from mcrt[01]) calls monitor(), they
101 * register a buffer which contains the third region (either with
102 * a meaningful size, or so short that profil-ing is being shut off).
104 * For each fcn, the first time it calls mcount(), mcount calls
105 * mcount_newent(), which parcels out the fcn call count entries
106 * from the current block, until they are exausted; then it calls
109 * Mnewbloc() allocates a block Without a third region, and
110 * links in a new associated anchor, adding a new anchor&block pair
111 * to the linked list. Each new mnewblock() block or user block,
112 * is added to the list as it comes in, FIFO.
114 * When monitor() is called to close up shop, it writes out
115 * a summarizing header, ALL the fcn call counts from ALL
116 * the blocks, and the Last specified execution histogram
117 * (currently there is no neat way to accumulate that info).
118 * This preserves all call count information, even when
119 * new blocks are specified.
121 * NOTE - no block passed to monitor() may be freed, until
122 * it is called to clean up!!!!
126 #pragma weak _monitor = monitor
131 #include <sys/types.h>
142 #define PROFDIR "PROFDIR"
144 static mutex_t mon_lock
= DEFAULTMUTEX
;
146 char **___Argv
= NULL
; /* initialized to argv array by mcrt0 (if loaded) */
149 * countbase and countlimit are used to parcel out
150 * the pc,count cells from the current block one at
151 * a time to each profiled function, the first time
152 * that function is called.
153 * When countbase reaches countlimit, mcount() calls
154 * mnewblock() to link in a new block.
156 * Only monitor/mcount/mcount_newent/mnewblock() should change these!!
157 * Correct that: only these routines are ABLE to change these;
158 * countbase/countlimit are now STATIC!
160 static char *countbase
; /* addr of next pc,count cell to use in block */
161 static char *countlimit
; /* addr lim for cells (addr after last cell) */
163 typedef struct anchor ANCHOR
;
166 ANCHOR
*next
, *prior
; /* forward, backward ptrs for list */
167 struct hdr
*monBuffer
; /* 'down' ptr, to block */
168 short flags
; /* indicators - has histogram designation */
170 int histSize
; /* if has region3, this is size. */
173 #define HAS_HISTOGRAM 0x0001 /* this buffer has a histogram */
175 static ANCHOR
*curAnchor
= NULL
; /* addr of anchor for current block */
176 static ANCHOR firstAnchor
; /* the first anchor to use */
177 /* - hopefully the Only one needed */
178 /* a speedup for most cases. */
179 static char *mon_out
;
181 static int writeBlocks(void);
182 static void _mnewblock(void);
183 struct cnt
*_mcount_newent(void);
186 * int (*alowpc)(), (*ahighpc)(); boundaries of text to be monitored
187 * WORD *buffer; ptr to space for monitor data(WORDs)
188 * size_t bufsize; size of above space(in WORDs)
189 * size_t nfunc; max no. of functions whose calls are counted
190 * (default nfunc is 300 on PDP11, 600 on others)
193 monitor(int (*alowpc
)(void), int (*ahighpc
)(void), WORD
*buffer
,
194 size_t bufsize
, size_t nfunc
)
203 char *lowpc
= (char *)alowpc
;
204 char *highpc
= (char *)ahighpc
;
206 lmutex_lock(&mon_lock
);
208 if (lowpc
== NULL
) { /* true only at the end */
210 if (curAnchor
!= NULL
) { /* if anything was collected!.. */
211 profil(NULL
, 0, 0, 0);
212 if (writeBlocks() == 0)
215 lmutex_unlock(&mon_lock
);
224 * Ok - they want to submit a block for immediate use, for
225 * function call count consumption, and execution profile
226 * histogram computation.
227 * If the block fails sanity tests, just bag it.
228 * Next thing - get name to use. If PROFDIR is NULL, let's
229 * get out now - they want No Profiling done.
232 * Set the block hdr cells.
233 * Get an anchor for the block, and link the anchor+block onto
234 * the end of the chain.
235 * Init the grabba-cell externs (countbase/limit) for this block.
236 * Finally, call profil and return.
239 ssiz
= ((sizeof (struct hdr
) + nfunc
* sizeof (struct cnt
)) /
241 if (ssiz
>= bufsize
|| lowpc
>= highpc
) {
242 lmutex_unlock(&mon_lock
);
246 if ((s
= getenv(PROFDIR
)) == NULL
) { /* PROFDIR not in environment */
247 mon_out
= MON_OUT
; /* use default "mon.out" */
248 } else if (*s
== '\0') { /* value of PROFDIR is NULL */
249 lmutex_unlock(&mon_lock
);
250 return; /* no profiling on this run */
251 } else { /* construct "PROFDIR/pid.progname" */
258 /* 15 is space for /pid.mon.out\0, if necessary */
259 if ((mon_out
= libc_malloc(len
+ strlen(___Argv
[0]) + 15))
261 lmutex_unlock(&mon_lock
);
265 (void) strcpy(mon_out
, s
);
266 name
= mon_out
+ len
;
267 *name
++ = '/'; /* two slashes won't hurt */
269 if ((pid
= getpid()) <= 0) /* extra test just in case */
270 pid
= 1; /* getpid returns something inappropriate */
272 /* suppress leading zeros */
273 for (n
= 10000; n
> pid
; n
/= 10)
276 *name
++ = pid
/n
+ '0';
283 if (___Argv
!= NULL
) { /* mcrt0.s executed */
284 if ((s
= strrchr(___Argv
[0], '/')) != NULL
)
285 (void) strcpy(name
, s
+ 1);
287 (void) strcpy(name
, ___Argv
[0]);
289 (void) strcpy(name
, MON_OUT
);
294 hdrp
= (struct hdr
*)(uintptr_t)buffer
; /* initialize 1st region */
299 /* get an anchor for the block */
300 newanchp
= (curAnchor
== NULL
) ? &firstAnchor
:
301 (ANCHOR
*)libc_malloc(sizeof (ANCHOR
));
303 if (newanchp
== NULL
) {
304 lmutex_unlock(&mon_lock
);
309 /* link anchor+block into chain */
310 newanchp
->monBuffer
= hdrp
; /* new, down. */
311 newanchp
->next
= NULL
; /* new, forward to NULL. */
312 newanchp
->prior
= curAnchor
; /* new, backward. */
313 if (curAnchor
!= NULL
)
314 curAnchor
->next
= newanchp
; /* old, forward to new. */
315 newanchp
->flags
= HAS_HISTOGRAM
; /* note it has a histgm area */
317 /* got it - enable use by mcount() */
318 countbase
= (char *)buffer
+ sizeof (struct hdr
);
319 countlimit
= countbase
+ (nfunc
* sizeof (struct cnt
));
321 /* (set size of region 3) */
322 newanchp
->histSize
= (int)
323 (bufsize
* sizeof (WORD
) - (countlimit
- (char *)buffer
));
326 /* done w/regions 1 + 2: setup 3 to activate profil processing. */
327 buffer
+= ssiz
; /* move ptr past 2'nd region */
328 bufsize
-= ssiz
; /* no. WORDs in third region */
329 /* no. WORDs of text */
330 text
= (highpc
- lowpc
+ sizeof (WORD
) - 1) / sizeof (WORD
);
333 * scale is a 16 bit fixed point fraction with the decimal
336 if (bufsize
< text
) {
337 /* make sure cast is done first! */
338 double temp
= (double)bufsize
;
339 scale
= (uint_t
)((temp
* (long)0200000L) / text
);
341 /* scale must be less than 1 */
344 bufsize
*= sizeof (WORD
); /* bufsize into # bytes */
345 profil(buffer
, bufsize
, (ulong_t
)lowpc
, scale
);
348 curAnchor
= newanchp
; /* make latest addition, the cur anchor */
349 lmutex_unlock(&mon_lock
);
353 * writeBlocks() - write accumulated profiling info, std fmt.
355 * This routine collects the function call counts, and the
356 * last specified profil buffer, and writes out one combined
357 * 'pseudo-block', as expected by current and former versions
365 ANCHOR
*ap
; /* temp anchor ptr */
366 struct hdr sum
; /* summary header (for 'pseudo' block) */
367 ANCHOR
*histp
; /* anchor with histogram to use */
369 if ((fd
= creat(mon_out
, 0666)) < 0)
373 * this loop (1) computes # funct cts total
374 * (2) finds anchor of last block w / hist(histp)
377 for (sum
.nfns
= 0, ap
= &firstAnchor
; ap
!= NULL
; ap
= ap
->next
) {
378 sum
.nfns
+= ap
->monBuffer
->nfns
; /* accum num of cells */
379 if (ap
->flags
& HAS_HISTOGRAM
)
380 histp
= ap
; /* remember lastone with a histgm */
384 /* copy pc range from effective histgm */
385 sum
.lpc
= histp
->monBuffer
->lpc
;
386 sum
.hpc
= histp
->monBuffer
->hpc
;
388 ok
= (write(fd
, (char *)&sum
, sizeof (sum
)) == sizeof (sum
));
390 if (ok
) { /* if the hdr went out ok.. */
394 /* write out the count arrays (region 2's) */
395 for (ap
= &firstAnchor
; ok
&& ap
!= NULL
; ap
= ap
->next
) {
396 amt
= ap
->monBuffer
->nfns
* sizeof (struct cnt
);
397 p
= (char *)ap
->monBuffer
+ sizeof (struct hdr
);
399 ok
= (write(fd
, p
, amt
) == amt
);
402 /* count arrays out; write out histgm area */
404 p
= (char *)histp
->monBuffer
+ sizeof (struct hdr
) +
405 (histp
->monBuffer
->nfns
* sizeof (struct cnt
));
406 amt
= histp
->histSize
;
408 ok
= (write(fd
, p
, amt
) == amt
);
415 return (ok
); /* indicate success */
420 * mnewblock()-allocate and link in a new region1&2 block.
422 * This routine, called by mcount_newent(), allocates a new block
423 * containing only regions 1 & 2 (hdr and fcn call count array),
424 * and an associated anchor (see header comments), inits the
425 * header (region 1) of the block, links the anchor into the
426 * list, and resets the countbase/limit pointers.
428 * This routine cannot be called recursively, since (each) mcount
429 * has a local lock which prevents recursive calls to mcount_newent.
430 * See mcount_newent for more details.
434 #define THISMANYFCNS (MPROGS0*2)
437 * call libc_malloc() to get an anchor & a regn1&2 block, together
439 #define GETTHISMUCH (sizeof (ANCHOR) + /* get an ANCHOR */ \
440 (sizeof (struct hdr) + /* get Region 1 */ \
441 THISMANYFCNS * sizeof (struct cnt))) /* Region 2 */ \
442 /* but No region 3 */
452 /* get anchor And block, together */
453 p
= libc_malloc(GETTHISMUCH
);
455 perror("mcount(mnewblock)");
460 hdrp
= (struct hdr
*)(p
+ 1);
462 /* initialize 1st region to dflts */
465 hdrp
->nfns
= THISMANYFCNS
;
467 /* link anchor+block into chain */
468 newanchp
->monBuffer
= hdrp
; /* new, down. */
469 newanchp
->next
= NULL
; /* new, forward to NULL. */
470 newanchp
->prior
= curAnchor
; /* new, backward. */
471 if (curAnchor
!= NULL
)
472 curAnchor
->next
= newanchp
; /* old, forward to new. */
473 newanchp
->flags
= 0; /* note that it has NO histgm area */
475 /* got it - enable use by mcount() */
476 countbase
= (char *)hdrp
+ sizeof (struct hdr
);
477 countlimit
= countbase
+ (THISMANYFCNS
* sizeof (struct cnt
));
479 newanchp
->histSize
= 0; /* (set size of region 3.. to 0) */
482 curAnchor
= newanchp
; /* make latest addition, cur anchor */
486 * mcount_newent() -- call to get a new mcount call count entry.
488 * this function is called by _mcount to get a new call count entry
489 * (struct cnt, in the region allocated by monitor()), or to return
490 * zero if profiling is off.
492 * This function acts as a funnel, an access function to make sure
493 * that all instances of mcount (the one in the a.out, and any in
494 * any shared objects) all get entries from the same array, and
495 * all know when profiling is off.
497 * NOTE: when mcount calls this function, it sets a private flag
498 * so that it does not call again until this function returns,
499 * thus preventing recursion.
501 * At Worst, the mcount in either a shared object or the a.out
502 * could call once, and then the mcount living in the shared object
503 * with monitor could call a second time (i.e. libc.so.1, although
504 * presently it does not have mcount in it). This worst case
505 * would involve Two active calls to mcount_newent, which it can
506 * handle, since the second one would find a already-set value
509 * The only unfortunate result is that No new call counts
510 * will be handed out until this function returns.
511 * Thus if libc_malloc or other routines called inductively by
512 * this routine have not yet been provided with a call count entry,
513 * they will not get one until this function call is completed.
514 * Thus a few calls to library routines during the course of
515 * profiling setup, may not be counted.
517 * NOTE: countbase points at the next available entry, and
518 * countlimit points past the last valid entry, in the current
519 * function call counts array.
522 * if profiling is off // countbase==0
526 * if need more entries // because countbase points last valid entry
527 * link in a new block, resetting countbase and countlimit
529 * if Got more entries
530 * return pointer to the next available entry, and
531 * update pointer-to-next-slot before you return.
533 * else // failed to get more entries
546 if (countbase
>= countlimit
)
547 _mnewblock(); /* get a new block; set countbase */
549 if (countbase
!= 0) {
550 struct cnt
*cur_countbase
= (struct cnt
*)(uintptr_t)countbase
;
552 countbase
+= sizeof (struct cnt
);
553 return (cur_countbase
);