Add copy of .ttf font with .eot extension for testing
[wine-gecko.git] / tools / trace-malloc / lib / nsTraceMalloc.c
blob4b7e587c69c846130a16aa86934e74c0c02e0117
1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim:cindent:ts=8:et:sw=4:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
17 * The Original Code is nsTraceMalloc.c/bloatblame.c code, released
18 * April 19, 2000.
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 2000
23 * the Initial Developer. All Rights Reserved.
25 * Contributor(s):
26 * Brendan Eich, 14-April-2000
28 * Alternatively, the contents of this file may be used under the terms of
29 * either the GNU General Public License Version 2 or later (the "GPL"), or
30 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
31 * in which case the provisions of the GPL or the LGPL are applicable instead
32 * of those above. If you wish to allow use of your version of this file only
33 * under the terms of either the GPL or the LGPL, and not to allow others to
34 * use your version of this file under the terms of the MPL, indicate your
35 * decision by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL or the LGPL. If you do not delete
37 * the provisions above, a recipient may use your version of this file under
38 * the terms of any one of the MPL, the GPL or the LGPL.
40 * ***** END LICENSE BLOCK ***** */
41 #ifdef NS_TRACE_MALLOC
43 * TODO:
44 * - FIXME https://bugzilla.mozilla.org/show_bug.cgi?id=392008
45 * - extend logfile so 'F' record tells free stack
47 #include <errno.h>
48 #include <fcntl.h>
49 #include <stdio.h>
50 #include <string.h>
51 #ifdef XP_UNIX
52 #include <unistd.h>
53 #include <sys/stat.h>
54 #include <sys/time.h>
55 #endif
56 #include "plhash.h"
57 #include "pratom.h"
58 #include "prlog.h"
59 #include "prlock.h"
60 #include "prmon.h"
61 #include "prprf.h"
62 #include "prenv.h"
63 #include "prnetdb.h"
64 #include "nsTraceMalloc.h"
65 #include "nscore.h"
66 #include "prinit.h"
67 #include "prthread.h"
68 #include "nsStackWalk.h"
69 #include "nsTraceMallocCallbacks.h"
71 #if defined(XP_MACOSX)
73 #include <malloc/malloc.h>
75 #define WRITE_FLAGS "w"
77 #define __libc_malloc(x) malloc(x)
78 #define __libc_realloc(x, y) realloc(x, y)
79 #define __libc_free(x) free(x)
81 #elif defined(XP_UNIX)
83 #include <malloc.h>
85 #define WRITE_FLAGS "w"
87 #ifdef WRAP_SYSTEM_INCLUDES
88 #pragma GCC visibility push(default)
89 #endif
90 extern __ptr_t __libc_malloc(size_t);
91 extern __ptr_t __libc_calloc(size_t, size_t);
92 extern __ptr_t __libc_realloc(__ptr_t, size_t);
93 extern void __libc_free(__ptr_t);
94 extern __ptr_t __libc_memalign(size_t, size_t);
95 extern __ptr_t __libc_valloc(size_t);
96 #ifdef WRAP_SYSTEM_INCLUDES
97 #pragma GCC visibility pop
98 #endif
100 #elif defined(XP_WIN32)
102 #include <sys/timeb.h> /* for timeb */
103 #include <sys/stat.h> /* for fstat */
105 #include <io.h> /*for write*/
107 #define WRITE_FLAGS "w"
109 #define __libc_malloc(x) dhw_orig_malloc(x)
110 #define __libc_realloc(x, y) dhw_orig_realloc(x,y)
111 #define __libc_free(x) dhw_orig_free(x)
113 #else /* not XP_MACOSX, XP_UNIX, or XP_WIN32 */
115 # error "Unknown build configuration!"
117 #endif
119 typedef struct logfile logfile;
121 #define STARTUP_TMBUFSIZE (64 * 1024)
122 #define LOGFILE_TMBUFSIZE (16 * 1024)
124 struct logfile {
125 int fd;
126 int lfd; /* logical fd, dense among all logfiles */
127 char *buf;
128 int bufsize;
129 int pos;
130 uint32 size;
131 uint32 simsize;
132 logfile *next;
133 logfile **prevp;
136 static char default_buf[STARTUP_TMBUFSIZE];
137 static logfile default_logfile =
138 {-1, 0, default_buf, STARTUP_TMBUFSIZE, 0, 0, 0, NULL, NULL};
139 static logfile *logfile_list = NULL;
140 static logfile **logfile_tail = &logfile_list;
141 static logfile *logfp = &default_logfile;
142 static PRLock *tmlock = NULL;
143 #ifndef PATH_MAX
144 #define PATH_MAX 4096
145 #endif
146 static char sdlogname[PATH_MAX] = ""; /* filename for shutdown leak log */
149 * This enables/disables trace-malloc logging.
151 * It is separate from suppress_tracing so that we do not have to pay
152 * the performance cost of repeated TM_TLS_GET_DATA calls when
153 * trace-malloc is disabled (which is not as bad as the locking we used
154 * to have).
156 * It must default to zero, since it can be tested by the Linux malloc
157 * hooks before NS_TraceMallocStartup sets it.
159 static uint32 tracing_enabled = 0;
162 * This lock must be held while manipulating the calltree, the
163 * allocations table, the log, or the tmstats.
165 * Callers should not *enter* the lock without checking suppress_tracing
166 * first; otherwise they risk trying to re-enter on the same thread.
168 #define TM_ENTER_LOCK(t) \
169 PR_BEGIN_MACRO \
170 PR_ASSERT(t->suppress_tracing != 0); \
171 if (tmlock) \
172 PR_Lock(tmlock); \
173 PR_END_MACRO
175 #define TM_EXIT_LOCK(t) \
176 PR_BEGIN_MACRO \
177 PR_ASSERT(t->suppress_tracing != 0); \
178 if (tmlock) \
179 PR_Unlock(tmlock); \
180 PR_END_MACRO
182 #define TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t) \
183 PR_BEGIN_MACRO \
184 t->suppress_tracing++; \
185 TM_ENTER_LOCK(t); \
186 PR_END_MACRO
188 #define TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t) \
189 PR_BEGIN_MACRO \
190 TM_EXIT_LOCK(t); \
191 t->suppress_tracing--; \
192 PR_END_MACRO
196 * Thread-local storage.
198 * We can't use NSPR thread-local storage for this because it mallocs
199 * within PR_GetThreadPrivate (the first time) and PR_SetThreadPrivate
200 * (which can be worked around by protecting all uses of those functions
201 * with a monitor, ugh) and because it calls malloc/free when the
202 * thread-local storage is in an inconsistent state within
203 * PR_SetThreadPrivate (when expanding the thread-local storage array)
204 * and _PRI_DetachThread (when and after deleting the thread-local
205 * storage array).
208 #ifdef XP_WIN32
210 #include <windows.h>
212 #define TM_TLS_INDEX_TYPE DWORD
213 #define TM_CREATE_TLS_INDEX(i_) PR_BEGIN_MACRO \
214 (i_) = TlsAlloc(); \
215 PR_END_MACRO
216 #define TM_DESTROY_TLS_INDEX(i_) TlsFree((i_))
217 #define TM_GET_TLS_DATA(i_) TlsGetValue((i_))
218 #define TM_SET_TLS_DATA(i_, v_) TlsSetValue((i_), (v_))
220 #else
222 #include <pthread.h>
224 #define TM_TLS_INDEX_TYPE pthread_key_t
225 #define TM_CREATE_TLS_INDEX(i_) pthread_key_create(&(i_), NULL)
226 #define TM_DESTROY_TLS_INDEX(i_) pthread_key_delete((i_))
227 #define TM_GET_TLS_DATA(i_) pthread_getspecific((i_))
228 #define TM_SET_TLS_DATA(i_, v_) pthread_setspecific((i_), (v_))
230 #endif
232 static TM_TLS_INDEX_TYPE tls_index;
233 static tm_thread main_thread; /* 0-initialization is correct */
235 /* FIXME (maybe): This is currently unused; we leak the thread-local data. */
236 #if 0
237 static void
238 free_tm_thread(void *priv)
240 tm_thread *t = (tm_thread*) priv;
242 PR_ASSERT(t->suppress_tracing == 0);
244 if (t->in_heap) {
245 t->suppress_tracing = 1;
246 if (t->backtrace_buf.buffer)
247 __libc_free(t->backtrace_buf.buffer);
249 __libc_free(t);
252 #endif
254 tm_thread *
255 tm_get_thread(void)
257 tm_thread *t;
258 tm_thread stack_tm_thread;
260 if (!tmlock) {
261 return &main_thread;
264 t = TM_GET_TLS_DATA(tls_index);
266 if (!t) {
268 * First, store a tm_thread on the stack to suppress for the
269 * malloc below
271 stack_tm_thread.suppress_tracing = 1;
272 stack_tm_thread.backtrace_buf.buffer = NULL;
273 stack_tm_thread.backtrace_buf.size = 0;
274 stack_tm_thread.backtrace_buf.entries = 0;
275 TM_SET_TLS_DATA(tls_index, &stack_tm_thread);
277 t = (tm_thread*) __libc_malloc(sizeof(tm_thread));
278 t->suppress_tracing = 0;
279 t->backtrace_buf = stack_tm_thread.backtrace_buf;
280 TM_SET_TLS_DATA(tls_index, t);
282 PR_ASSERT(stack_tm_thread.suppress_tracing == 1); /* balanced */
285 return t;
288 /* We don't want more than 32 logfiles open at once, ok? */
289 typedef uint32 lfd_set;
291 #define LFD_SET_STATIC_INITIALIZER 0
292 #define LFD_SET_SIZE 32
294 #define LFD_ZERO(s) (*(s) = 0)
295 #define LFD_BIT(i) ((uint32)1 << (i))
296 #define LFD_TEST(i,s) (LFD_BIT(i) & *(s))
297 #define LFD_SET(i,s) (*(s) |= LFD_BIT(i))
298 #define LFD_CLR(i,s) (*(s) &= ~LFD_BIT(i))
300 static logfile *get_logfile(int fd)
302 logfile *fp;
303 int lfd;
305 for (fp = logfile_list; fp; fp = fp->next) {
306 if (fp->fd == fd)
307 return fp;
309 lfd = 0;
310 retry:
311 for (fp = logfile_list; fp; fp = fp->next) {
312 if (fp->fd == lfd) {
313 if (++lfd >= LFD_SET_SIZE)
314 return NULL;
315 goto retry;
318 fp = __libc_malloc(sizeof(logfile) + LOGFILE_TMBUFSIZE);
319 if (!fp)
320 return NULL;
321 fp->fd = fd;
322 fp->lfd = lfd;
323 fp->buf = (char*) (fp + 1);
324 fp->bufsize = LOGFILE_TMBUFSIZE;
325 fp->pos = 0;
326 fp->size = fp->simsize = 0;
327 fp->next = NULL;
328 fp->prevp = logfile_tail;
329 *logfile_tail = fp;
330 logfile_tail = &fp->next;
331 return fp;
334 static void flush_logfile(logfile *fp)
336 int len, cnt, fd;
337 char *bp;
339 len = fp->pos;
340 if (len == 0)
341 return;
342 fp->pos = 0;
343 fd = fp->fd;
344 if (fd >= 0) {
345 fp->size += len;
346 bp = fp->buf;
347 do {
348 cnt = write(fd, bp, len);
349 if (cnt <= 0) {
350 printf("### nsTraceMalloc: write failed or wrote 0 bytes!\n");
351 return;
353 bp += cnt;
354 len -= cnt;
355 } while (len > 0);
357 fp->simsize += len;
360 static void log_byte(logfile *fp, char byte)
362 if (fp->pos == fp->bufsize)
363 flush_logfile(fp);
364 fp->buf[fp->pos++] = byte;
367 static void log_string(logfile *fp, const char *str)
369 int len, rem, cnt;
371 len = strlen(str) + 1; /* include null terminator */
372 while ((rem = fp->pos + len - fp->bufsize) > 0) {
373 cnt = len - rem;
374 memcpy(&fp->buf[fp->pos], str, cnt);
375 str += cnt;
376 fp->pos += cnt;
377 flush_logfile(fp);
378 len = rem;
380 memcpy(&fp->buf[fp->pos], str, len);
381 fp->pos += len;
384 static void log_filename(logfile* fp, const char* filename)
386 if (strlen(filename) < 512) {
387 char *bp, *cp, buf[512];
389 bp = strstr(strcpy(buf, filename), "mozilla");
390 if (!bp)
391 bp = buf;
393 for (cp = bp; *cp; cp++) {
394 if (*cp == '\\')
395 *cp = '/';
398 filename = bp;
400 log_string(fp, filename);
403 static void log_uint32(logfile *fp, uint32 ival)
405 if (ival < 0x80) {
406 /* 0xxx xxxx */
407 log_byte(fp, (char) ival);
408 } else if (ival < 0x4000) {
409 /* 10xx xxxx xxxx xxxx */
410 log_byte(fp, (char) ((ival >> 8) | 0x80));
411 log_byte(fp, (char) (ival & 0xff));
412 } else if (ival < 0x200000) {
413 /* 110x xxxx xxxx xxxx xxxx xxxx */
414 log_byte(fp, (char) ((ival >> 16) | 0xc0));
415 log_byte(fp, (char) ((ival >> 8) & 0xff));
416 log_byte(fp, (char) (ival & 0xff));
417 } else if (ival < 0x10000000) {
418 /* 1110 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
419 log_byte(fp, (char) ((ival >> 24) | 0xe0));
420 log_byte(fp, (char) ((ival >> 16) & 0xff));
421 log_byte(fp, (char) ((ival >> 8) & 0xff));
422 log_byte(fp, (char) (ival & 0xff));
423 } else {
424 /* 1111 0000 xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
425 log_byte(fp, (char) 0xf0);
426 log_byte(fp, (char) ((ival >> 24) & 0xff));
427 log_byte(fp, (char) ((ival >> 16) & 0xff));
428 log_byte(fp, (char) ((ival >> 8) & 0xff));
429 log_byte(fp, (char) (ival & 0xff));
433 static void log_event1(logfile *fp, char event, uint32 serial)
435 log_byte(fp, event);
436 log_uint32(fp, (uint32) serial);
439 static void log_event2(logfile *fp, char event, uint32 serial, size_t size)
441 log_event1(fp, event, serial);
442 log_uint32(fp, (uint32) size);
445 static void log_event3(logfile *fp, char event, uint32 serial, size_t oldsize,
446 size_t size)
448 log_event2(fp, event, serial, oldsize);
449 log_uint32(fp, (uint32) size);
452 static void log_event4(logfile *fp, char event, uint32 serial, uint32 ui2,
453 uint32 ui3, uint32 ui4)
455 log_event3(fp, event, serial, ui2, ui3);
456 log_uint32(fp, ui4);
459 static void log_event5(logfile *fp, char event, uint32 serial, uint32 ui2,
460 uint32 ui3, uint32 ui4, uint32 ui5)
462 log_event4(fp, event, serial, ui2, ui3, ui4);
463 log_uint32(fp, ui5);
466 static void log_event6(logfile *fp, char event, uint32 serial, uint32 ui2,
467 uint32 ui3, uint32 ui4, uint32 ui5, uint32 ui6)
469 log_event5(fp, event, serial, ui2, ui3, ui4, ui5);
470 log_uint32(fp, ui6);
473 static void log_event7(logfile *fp, char event, uint32 serial, uint32 ui2,
474 uint32 ui3, uint32 ui4, uint32 ui5, uint32 ui6,
475 uint32 ui7)
477 log_event6(fp, event, serial, ui2, ui3, ui4, ui5, ui6);
478 log_uint32(fp, ui7);
481 static void log_event8(logfile *fp, char event, uint32 serial, uint32 ui2,
482 uint32 ui3, uint32 ui4, uint32 ui5, uint32 ui6,
483 uint32 ui7, uint32 ui8)
485 log_event7(fp, event, serial, ui2, ui3, ui4, ui5, ui6, ui7);
486 log_uint32(fp, ui8);
489 typedef struct callsite callsite;
491 struct callsite {
492 void* pc;
493 uint32 serial;
494 lfd_set lfdset;
495 const char *name; /* pointer to string owned by methods table */
496 const char *library; /* pointer to string owned by libraries table */
497 int offset;
498 callsite *parent;
499 callsite *siblings;
500 callsite *kids;
503 /* NB: these counters are incremented and decremented only within tmlock. */
504 static uint32 library_serial_generator = 0;
505 static uint32 method_serial_generator = 0;
506 static uint32 callsite_serial_generator = 0;
507 static uint32 tmstats_serial_generator = 0;
508 static uint32 filename_serial_generator = 0;
510 /* Root of the tree of callsites, the sum of all (cycle-compressed) stacks. */
511 static callsite calltree_root =
512 {0, 0, LFD_SET_STATIC_INITIALIZER, NULL, NULL, 0, NULL, NULL, NULL};
514 /* Basic instrumentation. */
515 static nsTMStats tmstats = NS_TMSTATS_STATIC_INITIALIZER;
517 /* Parent with the most kids (tmstats.calltree_maxkids). */
518 static callsite *calltree_maxkids_parent;
520 /* Calltree leaf for path with deepest stack backtrace. */
521 static callsite *calltree_maxstack_top;
523 /* Last site (i.e., calling pc) that recurred during a backtrace. */
524 static callsite *last_callsite_recurrence;
526 static void log_tmstats(logfile *fp)
528 log_event1(fp, TM_EVENT_STATS, ++tmstats_serial_generator);
529 log_uint32(fp, tmstats.calltree_maxstack);
530 log_uint32(fp, tmstats.calltree_maxdepth);
531 log_uint32(fp, tmstats.calltree_parents);
532 log_uint32(fp, tmstats.calltree_maxkids);
533 log_uint32(fp, tmstats.calltree_kidhits);
534 log_uint32(fp, tmstats.calltree_kidmisses);
535 log_uint32(fp, tmstats.calltree_kidsteps);
536 log_uint32(fp, tmstats.callsite_recurrences);
537 log_uint32(fp, tmstats.backtrace_calls);
538 log_uint32(fp, tmstats.backtrace_failures);
539 log_uint32(fp, tmstats.btmalloc_failures);
540 log_uint32(fp, tmstats.dladdr_failures);
541 log_uint32(fp, tmstats.malloc_calls);
542 log_uint32(fp, tmstats.malloc_failures);
543 log_uint32(fp, tmstats.calloc_calls);
544 log_uint32(fp, tmstats.calloc_failures);
545 log_uint32(fp, tmstats.realloc_calls);
546 log_uint32(fp, tmstats.realloc_failures);
547 log_uint32(fp, tmstats.free_calls);
548 log_uint32(fp, tmstats.null_free_calls);
549 log_uint32(fp, calltree_maxkids_parent ? calltree_maxkids_parent->serial
550 : 0);
551 log_uint32(fp, calltree_maxstack_top ? calltree_maxstack_top->serial : 0);
554 static void *generic_alloctable(void *pool, PRSize size)
556 return __libc_malloc(size);
559 static void generic_freetable(void *pool, void *item)
561 __libc_free(item);
564 typedef struct lfdset_entry {
565 PLHashEntry base;
566 lfd_set lfdset;
567 } lfdset_entry;
569 static PLHashEntry *lfdset_allocentry(void *pool, const void *key)
571 lfdset_entry *le = __libc_malloc(sizeof *le);
572 if (le)
573 LFD_ZERO(&le->lfdset);
574 return &le->base;
577 static void lfdset_freeentry(void *pool, PLHashEntry *he, PRUintn flag)
579 lfdset_entry *le;
581 if (flag != HT_FREE_ENTRY)
582 return;
583 le = (lfdset_entry*) he;
584 __libc_free((void*) le);
587 static PLHashAllocOps lfdset_hashallocops = {
588 generic_alloctable, generic_freetable,
589 lfdset_allocentry, lfdset_freeentry
592 /* Table of library pathnames mapped to to logged 'L' record serial numbers. */
593 static PLHashTable *libraries = NULL;
595 /* Table of filename pathnames mapped to logged 'G' record serial numbers. */
596 static PLHashTable *filenames = NULL;
598 /* Table mapping method names to logged 'N' record serial numbers. */
599 static PLHashTable *methods = NULL;
601 static callsite *
602 calltree(void **stack, size_t num_stack_entries, tm_thread *t)
604 logfile *fp = logfp;
605 void *pc;
606 uint32 nkids;
607 callsite *parent, *site, **csp, *tmp;
608 int maxstack;
609 uint32 library_serial, method_serial, filename_serial;
610 const char *library, *method, *filename;
611 char *slash;
612 PLHashNumber hash;
613 PLHashEntry **hep, *he;
614 lfdset_entry *le;
615 size_t stack_index;
616 nsCodeAddressDetails details;
617 nsresult rv;
620 * FIXME bug 391749: We should really lock only the minimum amount
621 * that we need to in this function, because it makes some calls
622 * that could lock in the system's shared library loader.
624 TM_ENTER_LOCK(t);
626 maxstack = (num_stack_entries > tmstats.calltree_maxstack);
627 if (maxstack) {
628 /* these two are the same, although that used to be less clear */
629 tmstats.calltree_maxstack = num_stack_entries;
630 tmstats.calltree_maxdepth = num_stack_entries;
633 /* Reverse the stack again, finding and building a path in the tree. */
634 parent = &calltree_root;
635 stack_index = num_stack_entries;
636 do {
637 --stack_index;
638 pc = stack[stack_index];
640 csp = &parent->kids;
641 while ((site = *csp) != NULL) {
642 if (site->pc == pc) {
643 tmstats.calltree_kidhits++;
645 /* Put the most recently used site at the front of siblings. */
646 *csp = site->siblings;
647 site->siblings = parent->kids;
648 parent->kids = site;
650 /* Check whether we've logged for this site and logfile yet. */
651 if (!LFD_TEST(fp->lfd, &site->lfdset)) {
653 * Some other logfile put this site in the calltree. We
654 * must log an event for site, and possibly first for its
655 * method and/or library. Note the code after the while
656 * loop that tests if (!site).
658 break;
661 /* Site already built and logged to fp -- go up the stack. */
662 goto upward;
664 tmstats.calltree_kidsteps++;
665 csp = &site->siblings;
668 if (!site) {
669 tmstats.calltree_kidmisses++;
671 /* Check for recursion: see if pc is on our ancestor line. */
672 for (site = parent; site; site = site->parent) {
673 if (site->pc == pc) {
674 tmstats.callsite_recurrences++;
675 last_callsite_recurrence = site;
676 goto upward;
682 * Not in tree at all, or not logged to fp: let's find our symbolic
683 * callsite info.
687 * NS_DescribeCodeAddress can (on Linux) acquire a lock inside
688 * the shared library loader. Another thread might call malloc
689 * while holding that lock (when loading a shared library). So
690 * we have to exit tmlock around this call. For details, see
691 * https://bugzilla.mozilla.org/show_bug.cgi?id=363334#c3
693 * We could be more efficient by building the nodes in the
694 * calltree, exiting the monitor once to describe all of them,
695 * and then filling in the descriptions for any that hadn't been
696 * described already. But this is easier for now.
698 TM_EXIT_LOCK(t);
699 rv = NS_DescribeCodeAddress(pc, &details);
700 TM_ENTER_LOCK(t);
701 if (NS_FAILED(rv)) {
702 tmstats.dladdr_failures++;
703 goto fail;
706 /* Check whether we need to emit a library trace record. */
707 library_serial = 0;
708 library = NULL;
709 if (details.library[0]) {
710 if (!libraries) {
711 libraries = PL_NewHashTable(100, PL_HashString,
712 PL_CompareStrings, PL_CompareValues,
713 &lfdset_hashallocops, NULL);
714 if (!libraries) {
715 tmstats.btmalloc_failures++;
716 goto fail;
719 hash = PL_HashString(details.library);
720 hep = PL_HashTableRawLookup(libraries, hash, details.library);
721 he = *hep;
722 if (he) {
723 library = (char*) he->key;
724 library_serial = (uint32) NS_PTR_TO_INT32(he->value);
725 le = (lfdset_entry *) he;
726 if (LFD_TEST(fp->lfd, &le->lfdset)) {
727 /* We already logged an event on fp for this library. */
728 le = NULL;
730 } else {
731 library = strdup(details.library);
732 if (library) {
733 library_serial = ++library_serial_generator;
734 he = PL_HashTableRawAdd(libraries, hep, hash, library,
735 (void*) library_serial);
737 if (!he) {
738 tmstats.btmalloc_failures++;
739 goto fail;
741 le = (lfdset_entry *) he;
743 if (le) {
744 /* Need to log an event to fp for this lib. */
745 slash = strrchr(library, '/');
746 log_event1(fp, TM_EVENT_LIBRARY, library_serial);
747 log_string(fp, slash ? slash + 1 : library);
748 LFD_SET(fp->lfd, &le->lfdset);
752 /* For compatibility with current log format, always emit a
753 * filename trace record, using "noname" / 0 when no file name
754 * is available. */
755 filename_serial = 0;
756 filename = details.filename[0] ? details.filename : "noname";
757 if (!filenames) {
758 filenames = PL_NewHashTable(100, PL_HashString,
759 PL_CompareStrings, PL_CompareValues,
760 &lfdset_hashallocops, NULL);
761 if (!filenames) {
762 tmstats.btmalloc_failures++;
763 return NULL;
766 hash = PL_HashString(filename);
767 hep = PL_HashTableRawLookup(filenames, hash, filename);
768 he = *hep;
769 if (he) {
770 filename = (char*) he->key;
771 filename_serial = (uint32) NS_PTR_TO_INT32(he->value);
772 le = (lfdset_entry *) he;
773 if (LFD_TEST(fp->lfd, &le->lfdset)) {
774 /* We already logged an event on fp for this filename. */
775 le = NULL;
777 } else {
778 filename = strdup(filename);
779 if (filename) {
780 filename_serial = ++filename_serial_generator;
781 he = PL_HashTableRawAdd(filenames, hep, hash, filename,
782 (void*) filename_serial);
784 if (!he) {
785 tmstats.btmalloc_failures++;
786 return NULL;
788 le = (lfdset_entry *) he;
790 if (le) {
791 /* Need to log an event to fp for this filename. */
792 log_event1(fp, TM_EVENT_FILENAME, filename_serial);
793 log_filename(fp, filename);
794 LFD_SET(fp->lfd, &le->lfdset);
797 if (!details.function[0]) {
798 PR_snprintf(details.function, sizeof(details.function),
799 "%s+%X", library ? library : "main", details.loffset);
802 /* Emit an 'N' (for New method, 'M' is for malloc!) event if needed. */
803 method_serial = 0;
804 if (!methods) {
805 methods = PL_NewHashTable(10000, PL_HashString,
806 PL_CompareStrings, PL_CompareValues,
807 &lfdset_hashallocops, NULL);
808 if (!methods) {
809 tmstats.btmalloc_failures++;
810 goto fail;
813 hash = PL_HashString(details.function);
814 hep = PL_HashTableRawLookup(methods, hash, details.function);
815 he = *hep;
816 if (he) {
817 method = (char*) he->key;
818 method_serial = (uint32) NS_PTR_TO_INT32(he->value);
819 le = (lfdset_entry *) he;
820 if (LFD_TEST(fp->lfd, &le->lfdset)) {
821 /* We already logged an event on fp for this method. */
822 le = NULL;
824 } else {
825 method = strdup(details.function);
826 if (method) {
827 method_serial = ++method_serial_generator;
828 he = PL_HashTableRawAdd(methods, hep, hash, method,
829 (void*) method_serial);
831 if (!he) {
832 tmstats.btmalloc_failures++;
833 return NULL;
835 le = (lfdset_entry *) he;
837 if (le) {
838 log_event4(fp, TM_EVENT_METHOD, method_serial, library_serial,
839 filename_serial, details.lineno);
840 log_string(fp, method);
841 LFD_SET(fp->lfd, &le->lfdset);
844 /* Create a new callsite record. */
845 if (!site) {
846 site = __libc_malloc(sizeof(callsite));
847 if (!site) {
848 tmstats.btmalloc_failures++;
849 goto fail;
852 /* Update parent and max-kids-per-parent stats. */
853 if (!parent->kids)
854 tmstats.calltree_parents++;
855 nkids = 1;
856 for (tmp = parent->kids; tmp; tmp = tmp->siblings)
857 nkids++;
858 if (nkids > tmstats.calltree_maxkids) {
859 tmstats.calltree_maxkids = nkids;
860 calltree_maxkids_parent = parent;
863 /* Insert the new site into the tree. */
864 site->pc = pc;
865 site->serial = ++callsite_serial_generator;
866 LFD_ZERO(&site->lfdset);
867 site->name = method;
868 site->library = library;
869 site->offset = details.loffset;
870 site->parent = parent;
871 site->siblings = parent->kids;
872 parent->kids = site;
873 site->kids = NULL;
876 /* Log the site with its parent, method, and offset. */
877 log_event4(fp, TM_EVENT_CALLSITE, site->serial, parent->serial,
878 method_serial, details.foffset);
879 LFD_SET(fp->lfd, &site->lfdset);
881 upward:
882 parent = site;
883 } while (stack_index > 0);
885 if (maxstack)
886 calltree_maxstack_top = site;
888 TM_EXIT_LOCK(t);
889 return site;
891 fail:
892 TM_EXIT_LOCK(t);
893 return NULL;
897 * Buffer the stack from top at low index to bottom at high, so that we can
898 * reverse it in calltree.
900 static void
901 stack_callback(void *pc, void *closure)
903 stack_buffer_info *info = (stack_buffer_info*) closure;
906 * If we run out of buffer, keep incrementing entries so that
907 * backtrace can call us again with a bigger buffer.
909 if (info->entries < info->size)
910 info->buffer[info->entries] = pc;
911 ++info->entries;
915 * The caller MUST NOT be holding tmlock when calling backtrace.
917 callsite *
918 backtrace(tm_thread *t, int skip)
920 callsite *site;
921 stack_buffer_info *info = &t->backtrace_buf;
922 void ** new_stack_buffer;
923 size_t new_stack_buffer_size;
925 t->suppress_tracing++;
928 * NS_StackWalk can (on Windows) acquire a lock the shared library
929 * loader. Another thread might call malloc while holding that lock
930 * (when loading a shared library). So we can't be in tmlock during
931 * this call. For details, see
932 * https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8
935 /* skip == 0 means |backtrace| should show up, so don't use skip + 1 */
936 /* NB: this call is repeated below if the buffer is too small */
937 info->entries = 0;
938 NS_StackWalk(stack_callback, skip, info);
941 * To avoid allocating in stack_callback (which, on Windows, is
942 * called on a different thread from the one we're running on here),
943 * reallocate here if it didn't have a big enough buffer (which
944 * includes the first call on any thread), and call it again.
946 if (info->entries > info->size) {
947 new_stack_buffer_size = 2 * info->entries;
948 new_stack_buffer = __libc_realloc(info->buffer,
949 new_stack_buffer_size * sizeof(void*));
950 if (!new_stack_buffer)
951 return NULL;
952 info->buffer = new_stack_buffer;
953 info->size = new_stack_buffer_size;
955 /* and call NS_StackWalk again */
956 info->entries = 0;
957 NS_StackWalk(stack_callback, skip, info);
959 PR_ASSERT(info->entries * 2 == new_stack_buffer_size); /* same stack */
962 if (info->entries == 0) {
963 t->suppress_tracing--;
964 return NULL;
967 site = calltree(info->buffer, info->entries, t);
969 TM_ENTER_LOCK(t);
970 tmstats.backtrace_calls++;
971 if (!site) {
972 tmstats.backtrace_failures++;
973 PR_ASSERT(tmstats.backtrace_failures < 100);
975 TM_EXIT_LOCK(t);
977 t->suppress_tracing--;
978 return site;
981 typedef struct allocation {
982 PLHashEntry entry;
983 size_t size;
984 FILE *trackfp; /* for allocation tracking */
985 } allocation;
987 #define ALLOC_HEAP_SIZE 150000
989 static allocation alloc_heap[ALLOC_HEAP_SIZE];
990 static allocation *alloc_freelist = NULL;
991 static int alloc_heap_initialized = 0;
993 static PLHashEntry *alloc_allocentry(void *pool, const void *key)
995 allocation **listp, *alloc;
996 int n;
998 if (!alloc_heap_initialized) {
999 n = ALLOC_HEAP_SIZE;
1000 listp = &alloc_freelist;
1001 for (alloc = alloc_heap; --n >= 0; alloc++) {
1002 *listp = alloc;
1003 listp = (allocation**) &alloc->entry.next;
1005 *listp = NULL;
1006 alloc_heap_initialized = 1;
1009 listp = &alloc_freelist;
1010 alloc = *listp;
1011 if (!alloc)
1012 return __libc_malloc(sizeof(allocation));
1013 *listp = (allocation*) alloc->entry.next;
1014 return &alloc->entry;
1017 static void alloc_freeentry(void *pool, PLHashEntry *he, PRUintn flag)
1019 allocation *alloc;
1021 if (flag != HT_FREE_ENTRY)
1022 return;
1023 alloc = (allocation*) he;
1024 if ((PRUptrdiff)(alloc - alloc_heap) < (PRUptrdiff)ALLOC_HEAP_SIZE) {
1025 alloc->entry.next = &alloc_freelist->entry;
1026 alloc_freelist = alloc;
1027 } else {
1028 __libc_free((void*) alloc);
1032 static PLHashAllocOps alloc_hashallocops = {
1033 generic_alloctable, generic_freetable,
1034 alloc_allocentry, alloc_freeentry
1037 static PLHashNumber hash_pointer(const void *key)
1039 return (PLHashNumber) key;
1042 static PLHashTable *allocations = NULL;
1044 static PLHashTable *new_allocations(void)
1046 allocations = PL_NewHashTable(200000, hash_pointer,
1047 PL_CompareValues, PL_CompareValues,
1048 &alloc_hashallocops, NULL);
1049 return allocations;
1052 #define get_allocations() (allocations ? allocations : new_allocations())
1054 #if defined(XP_MACOSX)
1056 /* from malloc.c in Libc */
1057 typedef void
1058 malloc_logger_t(unsigned type, unsigned arg1, unsigned arg2, unsigned arg3,
1059 unsigned result, unsigned num_hot_frames_to_skip);
1061 extern malloc_logger_t *malloc_logger;
1063 #define MALLOC_LOG_TYPE_ALLOCATE 2
1064 #define MALLOC_LOG_TYPE_DEALLOCATE 4
1065 #define MALLOC_LOG_TYPE_HAS_ZONE 8
1066 #define MALLOC_LOG_TYPE_CLEARED 64
1068 static void
1069 my_malloc_logger(unsigned type, unsigned arg1, unsigned arg2, unsigned arg3,
1070 unsigned result, unsigned num_hot_frames_to_skip)
1072 unsigned all_args[3] = { arg1, arg2, arg3 };
1073 unsigned *args = all_args + ((type & MALLOC_LOG_TYPE_HAS_ZONE) ? 1 : 0);
1075 unsigned alloc_type =
1076 type & (MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE);
1077 tm_thread *t = tm_get_thread();
1079 if (alloc_type == (MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE)) {
1080 ReallocCallback((void*)args[0], (void*)result, args[1], 0, 0, t);
1081 } else if (alloc_type == MALLOC_LOG_TYPE_ALLOCATE) {
1083 * We don't get size/count information for calloc, so just use
1084 * MallocCallback.
1086 MallocCallback((void*)result, args[0], 0, 0, t);
1087 } else if (alloc_type == MALLOC_LOG_TYPE_DEALLOCATE) {
1088 FreeCallback((void*)args[0], 0, 0, t);
1092 static void
1093 StartupHooker(void)
1095 PR_ASSERT(!malloc_logger);
1096 malloc_logger = my_malloc_logger;
1099 static void
1100 ShutdownHooker(void)
1102 PR_ASSERT(malloc_logger == my_malloc_logger);
1103 malloc_logger = NULL;
1106 #elif defined(XP_UNIX)
1109 * We can't use glibc's malloc hooks because they can't be used in a
1110 * threadsafe manner. They require unsetting the hooks to call into the
1111 * original malloc implementation, and then resetting them when the
1112 * original implementation returns. If another thread calls the same
1113 * allocation function while the hooks are unset, we have no chance to
1114 * intercept the call.
1117 NS_EXTERNAL_VIS_(__ptr_t)
1118 malloc(size_t size)
1120 PRUint32 start, end;
1121 __ptr_t ptr;
1122 tm_thread *t;
1124 if (!tracing_enabled || !PR_Initialized() ||
1125 (t = tm_get_thread())->suppress_tracing != 0) {
1126 return __libc_malloc(size);
1129 t->suppress_tracing++;
1130 start = PR_IntervalNow();
1131 ptr = __libc_malloc(size);
1132 end = PR_IntervalNow();
1133 t->suppress_tracing--;
1135 MallocCallback(ptr, size, start, end, t);
1137 return ptr;
1140 NS_EXTERNAL_VIS_(__ptr_t)
1141 calloc(size_t count, size_t size)
1143 PRUint32 start, end;
1144 __ptr_t ptr;
1145 tm_thread *t;
1147 if (!tracing_enabled || !PR_Initialized() ||
1148 (t = tm_get_thread())->suppress_tracing != 0) {
1149 return __libc_calloc(count, size);
1152 t->suppress_tracing++;
1153 start = PR_IntervalNow();
1154 ptr = __libc_calloc(count, size);
1155 end = PR_IntervalNow();
1156 t->suppress_tracing--;
1158 CallocCallback(ptr, count, size, start, end, t);
1160 return ptr;
1163 NS_EXTERNAL_VIS_(__ptr_t)
1164 realloc(__ptr_t oldptr, size_t size)
1166 PRUint32 start, end;
1167 __ptr_t ptr;
1168 tm_thread *t;
1170 if (!tracing_enabled || !PR_Initialized() ||
1171 (t = tm_get_thread())->suppress_tracing != 0) {
1172 return __libc_realloc(oldptr, size);
1175 t->suppress_tracing++;
1176 start = PR_IntervalNow();
1177 ptr = __libc_realloc(oldptr, size);
1178 end = PR_IntervalNow();
1179 t->suppress_tracing--;
1181 /* FIXME bug 392008: We could race with reallocation of oldptr. */
1182 ReallocCallback(oldptr, ptr, size, start, end, t);
1184 return ptr;
1187 NS_EXTERNAL_VIS_(void*)
1188 valloc(size_t size)
1190 PRUint32 start, end;
1191 __ptr_t ptr;
1192 tm_thread *t;
1194 if (!tracing_enabled || !PR_Initialized() ||
1195 (t = tm_get_thread())->suppress_tracing != 0) {
1196 return __libc_valloc(size);
1199 t->suppress_tracing++;
1200 start = PR_IntervalNow();
1201 ptr = __libc_valloc(size);
1202 end = PR_IntervalNow();
1203 t->suppress_tracing--;
1205 MallocCallback(ptr, size, start, end, t);
1207 return ptr;
1210 NS_EXTERNAL_VIS_(void*)
1211 memalign(size_t boundary, size_t size)
1213 PRUint32 start, end;
1214 __ptr_t ptr;
1215 tm_thread *t;
1217 if (!tracing_enabled || !PR_Initialized() ||
1218 (t = tm_get_thread())->suppress_tracing != 0) {
1219 return __libc_memalign(boundary, size);
1222 t->suppress_tracing++;
1223 start = PR_IntervalNow();
1224 ptr = __libc_memalign(boundary, size);
1225 end = PR_IntervalNow();
1226 t->suppress_tracing--;
1228 MallocCallback(ptr, size, start, end, t);
1230 return ptr;
1233 NS_EXTERNAL_VIS_(int)
1234 posix_memalign(void **memptr, size_t alignment, size_t size)
1236 __ptr_t ptr = memalign(alignment, size);
1237 if (!ptr)
1238 return ENOMEM;
1239 *memptr = ptr;
1240 return 0;
1243 NS_EXTERNAL_VIS_(void)
1244 free(__ptr_t ptr)
1246 PRUint32 start, end;
1247 tm_thread *t;
1249 if (!tracing_enabled || !PR_Initialized() ||
1250 (t = tm_get_thread())->suppress_tracing != 0) {
1251 __libc_free(ptr);
1252 return;
1255 t->suppress_tracing++;
1256 start = PR_IntervalNow();
1257 __libc_free(ptr);
1258 end = PR_IntervalNow();
1259 t->suppress_tracing--;
1261 /* FIXME bug 392008: We could race with reallocation of ptr. */
1263 FreeCallback(ptr, start, end, t);
1266 NS_EXTERNAL_VIS_(void)
1267 cfree(void *ptr)
1269 free(ptr);
1272 #define StartupHooker() PR_BEGIN_MACRO PR_END_MACRO
1273 #define ShutdownHooker() PR_BEGIN_MACRO PR_END_MACRO
1275 #elif defined(XP_WIN32)
1277 /* See nsWinTraceMalloc.cpp. */
1279 #endif
1281 static const char magic[] = NS_TRACE_MALLOC_MAGIC;
1283 static void
1284 log_header(int logfd)
1286 uint32 ticksPerSec = PR_htonl(PR_TicksPerSecond());
1287 (void) write(logfd, magic, NS_TRACE_MALLOC_MAGIC_SIZE);
1288 (void) write(logfd, &ticksPerSec, sizeof ticksPerSec);
1291 PR_IMPLEMENT(void)
1292 NS_TraceMallocStartup(int logfd)
1294 /* We must be running on the primordial thread. */
1295 PR_ASSERT(tracing_enabled == 0);
1296 PR_ASSERT(logfp == &default_logfile);
1297 tracing_enabled = (logfd >= 0);
1299 if (tracing_enabled) {
1300 PR_ASSERT(logfp->simsize == 0); /* didn't overflow startup buffer */
1302 /* Log everything in logfp (aka default_logfile)'s buffer to logfd. */
1303 logfp->fd = logfd;
1304 logfile_list = &default_logfile;
1305 logfp->prevp = &logfile_list;
1306 logfile_tail = &logfp->next;
1307 log_header(logfd);
1310 atexit(NS_TraceMallocShutdown);
1313 * We only allow one thread until NS_TraceMallocStartup is called.
1314 * When it is, we have to initialize tls_index before allocating tmlock
1315 * since get_tm_index uses NULL-tmlock to detect tls_index being
1316 * uninitialized.
1318 main_thread.suppress_tracing++;
1319 TM_CREATE_TLS_INDEX(tls_index);
1320 TM_SET_TLS_DATA(tls_index, &main_thread);
1321 tmlock = PR_NewLock();
1322 main_thread.suppress_tracing--;
1324 if (tracing_enabled)
1325 StartupHooker();
1329 * Options for log files, with the log file name either as the next option
1330 * or separated by '=' (e.g. "./mozilla --trace-malloc * malloc.log" or
1331 * "./mozilla --trace-malloc=malloc.log").
1333 static const char TMLOG_OPTION[] = "--trace-malloc";
1334 static const char SDLOG_OPTION[] = "--shutdown-leaks";
1336 #define SHOULD_PARSE_ARG(name_, log_, arg_) \
1337 (0 == strncmp(arg_, name_, sizeof(name_) - 1))
1339 #define PARSE_ARG(name_, log_, argv_, i_, consumed_) \
1340 PR_BEGIN_MACRO \
1341 char _nextchar = argv_[i_][sizeof(name_) - 1]; \
1342 if (_nextchar == '=') { \
1343 log_ = argv_[i_] + sizeof(name_); \
1344 consumed_ = 1; \
1345 } else if (_nextchar == '\0') { \
1346 log_ = argv_[i_+1]; \
1347 consumed_ = 2; \
1349 PR_END_MACRO
1351 PR_IMPLEMENT(int)
1352 NS_TraceMallocStartupArgs(int argc, char **argv)
1354 int i, logfd = -1, consumed, logflags;
1355 char *tmlogname = NULL, *sdlogname_local = NULL;
1358 * Look for the --trace-malloc <logfile> option early, to avoid missing
1359 * early mallocs (we miss static constructors whose output overflows the
1360 * log file's static 16K output buffer).
1362 for (i = 1; i < argc; i += consumed) {
1363 consumed = 0;
1364 if (SHOULD_PARSE_ARG(TMLOG_OPTION, tmlogname, argv[i]))
1365 PARSE_ARG(TMLOG_OPTION, tmlogname, argv, i, consumed);
1366 else if (SHOULD_PARSE_ARG(SDLOG_OPTION, sdlogname_local, argv[i]))
1367 PARSE_ARG(SDLOG_OPTION, sdlogname_local, argv, i, consumed);
1369 if (consumed) {
1370 #ifndef XP_WIN32 /* If we don't comment this out, it will crash Windows. */
1371 int j;
1372 /* Now remove --trace-malloc and its argument from argv. */
1373 argc -= consumed;
1374 for (j = i; j < argc; ++j)
1375 argv[j] = argv[j+consumed];
1376 argv[argc] = NULL;
1377 consumed = 0; /* don't advance next iteration */
1378 #endif
1379 } else {
1380 consumed = 1;
1384 if (tmlogname) {
1385 #ifdef XP_UNIX
1386 int pipefds[2];
1387 #endif
1389 switch (*tmlogname) {
1390 #ifdef XP_UNIX
1391 case '|':
1392 if (pipe(pipefds) == 0) {
1393 pid_t pid = fork();
1394 if (pid == 0) {
1395 /* In child: set up stdin, parse args, and exec. */
1396 int maxargc, nargc;
1397 char **nargv, *token;
1399 if (pipefds[0] != 0) {
1400 dup2(pipefds[0], 0);
1401 close(pipefds[0]);
1403 close(pipefds[1]);
1405 tmlogname = strtok(tmlogname + 1, " \t");
1406 maxargc = 3;
1407 nargv = (char **) malloc((maxargc+1) * sizeof(char *));
1408 if (!nargv) exit(1);
1409 nargc = 0;
1410 nargv[nargc++] = tmlogname;
1411 while ((token = strtok(NULL, " \t")) != NULL) {
1412 if (nargc == maxargc) {
1413 maxargc *= 2;
1414 nargv = (char**)
1415 realloc(nargv, (maxargc+1) * sizeof(char*));
1416 if (!nargv) exit(1);
1418 nargv[nargc++] = token;
1420 nargv[nargc] = NULL;
1422 (void) setsid();
1423 execvp(tmlogname, nargv);
1424 exit(127);
1427 if (pid > 0) {
1428 /* In parent: set logfd to the pipe's write side. */
1429 close(pipefds[0]);
1430 logfd = pipefds[1];
1433 if (logfd < 0) {
1434 fprintf(stderr,
1435 "%s: can't pipe to trace-malloc child process %s: %s\n",
1436 argv[0], tmlogname, strerror(errno));
1437 exit(1);
1439 break;
1440 #endif /*XP_UNIX*/
1441 case '-':
1442 /* Don't log from startup, but do prepare to log later. */
1443 /* XXX traditional meaning of '-' as option argument is "stdin" or "stdout" */
1444 if (tmlogname[1] == '\0')
1445 break;
1446 /* FALL THROUGH */
1448 default:
1449 logflags = O_CREAT | O_WRONLY | O_TRUNC;
1450 #if defined(XP_WIN32)
1452 * Avoid translations on WIN32.
1454 logflags |= O_BINARY;
1455 #endif
1456 logfd = open(tmlogname, logflags, 0644);
1457 if (logfd < 0) {
1458 fprintf(stderr,
1459 "%s: can't create trace-malloc log named %s: %s\n",
1460 argv[0], tmlogname, strerror(errno));
1461 exit(1);
1463 break;
1467 if (sdlogname_local) {
1468 strncpy(sdlogname, sdlogname_local, sizeof(sdlogname));
1469 sdlogname[sizeof(sdlogname) - 1] = '\0';
1472 NS_TraceMallocStartup(logfd);
1473 return argc;
1476 PR_IMPLEMENT(void)
1477 NS_TraceMallocShutdown(void)
1479 logfile *fp;
1481 if (sdlogname[0])
1482 NS_TraceMallocDumpAllocations(sdlogname);
1484 if (tmstats.backtrace_failures) {
1485 fprintf(stderr,
1486 "TraceMalloc backtrace failures: %lu (malloc %lu dladdr %lu)\n",
1487 (unsigned long) tmstats.backtrace_failures,
1488 (unsigned long) tmstats.btmalloc_failures,
1489 (unsigned long) tmstats.dladdr_failures);
1491 while ((fp = logfile_list) != NULL) {
1492 logfile_list = fp->next;
1493 log_tmstats(fp);
1494 flush_logfile(fp);
1495 if (fp->fd >= 0) {
1496 close(fp->fd);
1497 fp->fd = -1;
1499 if (fp != &default_logfile) {
1500 if (fp == logfp)
1501 logfp = &default_logfile;
1502 free((void*) fp);
1505 if (tmlock) {
1506 PRLock *lock = tmlock;
1507 tmlock = NULL;
1508 PR_DestroyLock(lock);
1510 if (tracing_enabled) {
1511 tracing_enabled = 0;
1512 ShutdownHooker();
1516 PR_IMPLEMENT(void)
1517 NS_TraceMallocDisable(void)
1519 tm_thread *t = tm_get_thread();
1520 logfile *fp;
1521 uint32 sample;
1523 /* Robustify in case of duplicate call. */
1524 PR_ASSERT(tracing_enabled);
1525 if (tracing_enabled == 0)
1526 return;
1528 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1529 for (fp = logfile_list; fp; fp = fp->next)
1530 flush_logfile(fp);
1531 sample = --tracing_enabled;
1532 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1533 if (sample == 0)
1534 ShutdownHooker();
1537 PR_IMPLEMENT(void)
1538 NS_TraceMallocEnable(void)
1540 tm_thread *t = tm_get_thread();
1541 uint32 sample;
1543 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1544 sample = ++tracing_enabled;
1545 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1546 if (sample == 1)
1547 StartupHooker();
1550 PR_IMPLEMENT(int)
1551 NS_TraceMallocChangeLogFD(int fd)
1553 logfile *oldfp, *fp;
1554 struct stat sb;
1555 tm_thread *t = tm_get_thread();
1557 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1558 oldfp = logfp;
1559 if (oldfp->fd != fd) {
1560 flush_logfile(oldfp);
1561 fp = get_logfile(fd);
1562 if (!fp) {
1563 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1564 return -2;
1566 if (fd >= 0 && fstat(fd, &sb) == 0 && sb.st_size == 0)
1567 log_header(fd);
1568 logfp = fp;
1570 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1571 return oldfp->fd;
1574 static PRIntn
1575 lfd_clr_enumerator(PLHashEntry *he, PRIntn i, void *arg)
1577 lfdset_entry *le = (lfdset_entry*) he;
1578 logfile *fp = (logfile*) arg;
1580 LFD_CLR(fp->lfd, &le->lfdset);
1581 return HT_ENUMERATE_NEXT;
1584 static void
1585 lfd_clr_walk(callsite *site, logfile *fp)
1587 callsite *kid;
1589 LFD_CLR(fp->lfd, &site->lfdset);
1590 for (kid = site->kids; kid; kid = kid->siblings)
1591 lfd_clr_walk(kid, fp);
1594 PR_IMPLEMENT(void)
1595 NS_TraceMallocCloseLogFD(int fd)
1597 logfile *fp;
1598 tm_thread *t = tm_get_thread();
1600 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1602 fp = get_logfile(fd);
1603 if (fp) {
1604 flush_logfile(fp);
1605 if (fp == &default_logfile) {
1606 /* Leave default_logfile in logfile_list with an fd of -1. */
1607 fp->fd = -1;
1609 /* NB: we can never free lfd 0, it belongs to default_logfile. */
1610 PR_ASSERT(fp->lfd == 0);
1611 } else {
1612 /* Clear fp->lfd in all possible lfdsets. */
1613 PL_HashTableEnumerateEntries(libraries, lfd_clr_enumerator, fp);
1614 PL_HashTableEnumerateEntries(methods, lfd_clr_enumerator, fp);
1615 lfd_clr_walk(&calltree_root, fp);
1617 /* Unlink fp from logfile_list, freeing lfd for reallocation. */
1618 *fp->prevp = fp->next;
1619 if (!fp->next) {
1620 PR_ASSERT(logfile_tail == &fp->next);
1621 logfile_tail = fp->prevp;
1624 /* Reset logfp if we must, then free fp. */
1625 if (fp == logfp)
1626 logfp = &default_logfile;
1627 free((void*) fp);
1631 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1632 close(fd);
1635 PR_IMPLEMENT(void)
1636 NS_TraceMallocLogTimestamp(const char *caption)
1638 logfile *fp;
1639 #ifdef XP_UNIX
1640 struct timeval tv;
1641 #endif
1642 #ifdef XP_WIN32
1643 struct _timeb tb;
1644 #endif
1645 tm_thread *t = tm_get_thread();
1647 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1649 fp = logfp;
1650 log_byte(fp, TM_EVENT_TIMESTAMP);
1652 #ifdef XP_UNIX
1653 gettimeofday(&tv, NULL);
1654 log_uint32(fp, (uint32) tv.tv_sec);
1655 log_uint32(fp, (uint32) tv.tv_usec);
1656 #endif
1657 #ifdef XP_WIN32
1658 _ftime(&tb);
1659 log_uint32(fp, (uint32) tb.time);
1660 log_uint32(fp, (uint32) tb.millitm);
1661 #endif
1662 log_string(fp, caption);
1664 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1667 static void
1668 print_stack(FILE *ofp, callsite *site)
1670 while (site) {
1671 if (site->name || site->parent) {
1672 fprintf(ofp, "%s[%s +0x%X]\n",
1673 site->name, site->library, site->offset);
1675 site = site->parent;
1679 static PRIntn
1680 allocation_enumerator(PLHashEntry *he, PRIntn i, void *arg)
1682 allocation *alloc = (allocation*) he;
1683 FILE *ofp = (FILE*) arg;
1684 callsite *site = (callsite*) he->value;
1686 extern const char* nsGetTypeName(const void* ptr);
1687 unsigned long *p, *end;
1689 fprintf(ofp, "%p <%s> (%lu)\n",
1690 he->key,
1691 nsGetTypeName(he->key),
1692 (unsigned long) alloc->size);
1694 for (p = (unsigned long*) he->key,
1695 end = (unsigned long*) ((char*)he->key + alloc->size);
1696 p < end; ++p) {
1697 fprintf(ofp, "\t0x%08lX\n", *p);
1700 print_stack(ofp, site);
1701 fputc('\n', ofp);
1702 return HT_ENUMERATE_NEXT;
1705 PR_IMPLEMENT(void)
1706 NS_TraceStack(int skip, FILE *ofp)
1708 callsite *site;
1709 tm_thread *t = tm_get_thread();
1711 site = backtrace(t, skip + 1);
1712 while (site) {
1713 if (site->name || site->parent) {
1714 fprintf(ofp, "%s[%s +0x%X]\n",
1715 site->name, site->library, site->offset);
1717 site = site->parent;
1721 PR_IMPLEMENT(int)
1722 NS_TraceMallocDumpAllocations(const char *pathname)
1724 FILE *ofp;
1725 int rv;
1727 tm_thread *t = tm_get_thread();
1729 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1731 ofp = fopen(pathname, WRITE_FLAGS);
1732 if (ofp) {
1733 if (allocations) {
1734 PL_HashTableEnumerateEntries(allocations, allocation_enumerator,
1735 ofp);
1737 rv = ferror(ofp) ? -1 : 0;
1738 fclose(ofp);
1739 } else {
1740 rv = -1;
1743 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1745 return rv;
1748 PR_IMPLEMENT(void)
1749 NS_TraceMallocFlushLogfiles(void)
1751 logfile *fp;
1752 tm_thread *t = tm_get_thread();
1754 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1756 for (fp = logfile_list; fp; fp = fp->next)
1757 flush_logfile(fp);
1759 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1762 PR_IMPLEMENT(void)
1763 NS_TrackAllocation(void* ptr, FILE *ofp)
1765 allocation *alloc;
1766 tm_thread *t = tm_get_thread();
1768 fprintf(ofp, "Trying to track %p\n", (void*) ptr);
1769 setlinebuf(ofp);
1771 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1772 if (get_allocations()) {
1773 alloc = (allocation*)
1774 *PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr);
1775 if (alloc) {
1776 fprintf(ofp, "Tracking %p\n", (void*) ptr);
1777 alloc->trackfp = ofp;
1778 } else {
1779 fprintf(ofp, "Not tracking %p\n", (void*) ptr);
1782 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1785 PR_IMPLEMENT(void)
1786 MallocCallback(void *ptr, size_t size, PRUint32 start, PRUint32 end, tm_thread *t)
1788 callsite *site;
1789 PLHashEntry *he;
1790 allocation *alloc;
1792 if (!tracing_enabled || t->suppress_tracing != 0)
1793 return;
1795 site = backtrace(t, 2);
1797 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1798 tmstats.malloc_calls++;
1799 if (!ptr) {
1800 tmstats.malloc_failures++;
1801 } else {
1802 if (site) {
1803 log_event5(logfp, TM_EVENT_MALLOC,
1804 site->serial, start, end - start,
1805 (uint32)NS_PTR_TO_INT32(ptr), size);
1807 if (get_allocations()) {
1808 he = PL_HashTableAdd(allocations, ptr, site);
1809 if (he) {
1810 alloc = (allocation*) he;
1811 alloc->size = size;
1812 alloc->trackfp = NULL;
1816 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1819 PR_IMPLEMENT(void)
1820 CallocCallback(void *ptr, size_t count, size_t size, PRUint32 start, PRUint32 end, tm_thread *t)
1822 callsite *site;
1823 PLHashEntry *he;
1824 allocation *alloc;
1826 if (!tracing_enabled || t->suppress_tracing != 0)
1827 return;
1829 site = backtrace(t, 2);
1831 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1832 tmstats.calloc_calls++;
1833 if (!ptr) {
1834 tmstats.calloc_failures++;
1835 } else {
1836 size *= count;
1837 if (site) {
1838 log_event5(logfp, TM_EVENT_CALLOC,
1839 site->serial, start, end - start,
1840 (uint32)NS_PTR_TO_INT32(ptr), size);
1842 if (get_allocations()) {
1843 he = PL_HashTableAdd(allocations, ptr, site);
1844 if (he) {
1845 alloc = (allocation*) he;
1846 alloc->size = size;
1847 alloc->trackfp = NULL;
1851 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1854 PR_IMPLEMENT(void)
1855 ReallocCallback(void * oldptr, void *ptr, size_t size,
1856 PRUint32 start, PRUint32 end, tm_thread *t)
1858 callsite *oldsite, *site;
1859 size_t oldsize;
1860 PLHashNumber hash;
1861 PLHashEntry **hep, *he;
1862 allocation *alloc;
1863 FILE *trackfp = NULL;
1865 if (!tracing_enabled || t->suppress_tracing != 0)
1866 return;
1868 site = backtrace(t, 2);
1870 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1871 tmstats.realloc_calls++;
1872 oldsite = NULL;
1873 oldsize = 0;
1874 hep = NULL;
1875 he = NULL;
1876 if (oldptr && get_allocations()) {
1877 hash = hash_pointer(oldptr);
1878 hep = PL_HashTableRawLookup(allocations, hash, oldptr);
1879 he = *hep;
1880 if (he) {
1881 oldsite = (callsite*) he->value;
1882 alloc = (allocation*) he;
1883 oldsize = alloc->size;
1884 trackfp = alloc->trackfp;
1885 if (trackfp) {
1886 fprintf(alloc->trackfp,
1887 "\nrealloc(%p, %lu), oldsize %lu, alloc site %p\n",
1888 (void*) ptr, (unsigned long) size,
1889 (unsigned long) oldsize, (void*) oldsite);
1890 NS_TraceStack(1, trackfp);
1894 if (!ptr && size) {
1896 * When realloc() fails, the original block is not freed or moved, so
1897 * we'll leave the allocation entry untouched.
1899 tmstats.realloc_failures++;
1900 } else {
1901 if (site) {
1902 log_event8(logfp, TM_EVENT_REALLOC,
1903 site->serial, start, end - start,
1904 (uint32)NS_PTR_TO_INT32(ptr), size,
1905 oldsite ? oldsite->serial : 0,
1906 (uint32)NS_PTR_TO_INT32(oldptr), oldsize);
1908 if (ptr && allocations) {
1909 if (ptr != oldptr) {
1911 * If we're reallocating (not allocating new space by passing
1912 * null to realloc) and realloc moved the block, free oldptr.
1914 if (he)
1915 PL_HashTableRawRemove(allocations, hep, he);
1917 /* Record the new allocation now, setting he. */
1918 he = PL_HashTableAdd(allocations, ptr, site);
1919 } else {
1921 * If we haven't yet recorded an allocation (possibly due to a
1922 * temporary memory shortage), do it now.
1924 if (!he)
1925 he = PL_HashTableAdd(allocations, ptr, site);
1927 if (he) {
1928 alloc = (allocation*) he;
1929 alloc->size = size;
1930 alloc->trackfp = trackfp;
1934 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1937 PR_IMPLEMENT(void)
1938 FreeCallback(void * ptr, PRUint32 start, PRUint32 end, tm_thread *t)
1940 PLHashEntry **hep, *he;
1941 callsite *site;
1942 allocation *alloc;
1944 if (!tracing_enabled || t->suppress_tracing != 0)
1945 return;
1947 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
1948 tmstats.free_calls++;
1949 if (!ptr) {
1950 tmstats.null_free_calls++;
1951 } else {
1952 if (get_allocations()) {
1953 hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr);
1954 he = *hep;
1955 if (he) {
1956 site = (callsite*) he->value;
1957 if (site) {
1958 alloc = (allocation*) he;
1959 if (alloc->trackfp) {
1960 fprintf(alloc->trackfp, "\nfree(%p), alloc site %p\n",
1961 (void*) ptr, (void*) site);
1962 NS_TraceStack(1, alloc->trackfp);
1964 log_event5(logfp, TM_EVENT_FREE,
1965 site->serial, start, end - start,
1966 (uint32)NS_PTR_TO_INT32(ptr), alloc->size);
1968 PL_HashTableRawRemove(allocations, hep, he);
1972 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
1975 PR_IMPLEMENT(nsTMStackTraceID)
1976 NS_TraceMallocGetStackTrace(void)
1978 callsite *site;
1979 tm_thread *t = tm_get_thread();
1981 site = backtrace(t, 2);
1982 return (nsTMStackTraceID) site;
1985 PR_IMPLEMENT(void)
1986 NS_TraceMallocPrintStackTrace(FILE *ofp, nsTMStackTraceID id)
1988 print_stack(ofp, (callsite *)id);
1991 #endif /* NS_TRACE_MALLOC */