1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim:cindent:ts=8:et:sw=4:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
17 * The Original Code is nsTraceMalloc.c/bloatblame.c code, released
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 2000
23 * the Initial Developer. All Rights Reserved.
26 * Brendan Eich, 14-April-2000
28 * Alternatively, the contents of this file may be used under the terms of
29 * either the GNU General Public License Version 2 or later (the "GPL"), or
30 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
31 * in which case the provisions of the GPL or the LGPL are applicable instead
32 * of those above. If you wish to allow use of your version of this file only
33 * under the terms of either the GPL or the LGPL, and not to allow others to
34 * use your version of this file under the terms of the MPL, indicate your
35 * decision by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL or the LGPL. If you do not delete
37 * the provisions above, a recipient may use your version of this file under
38 * the terms of any one of the MPL, the GPL or the LGPL.
40 * ***** END LICENSE BLOCK ***** */
41 #ifdef NS_TRACE_MALLOC
44 * - FIXME https://bugzilla.mozilla.org/show_bug.cgi?id=392008
45 * - extend logfile so 'F' record tells free stack
64 #include "nsTraceMalloc.h"
68 #include "nsStackWalk.h"
69 #include "nsTraceMallocCallbacks.h"
71 #if defined(XP_MACOSX)
73 #include <malloc/malloc.h>
75 #define WRITE_FLAGS "w"
77 #define __libc_malloc(x) malloc(x)
78 #define __libc_realloc(x, y) realloc(x, y)
79 #define __libc_free(x) free(x)
81 #elif defined(XP_UNIX)
85 #define WRITE_FLAGS "w"
87 #ifdef WRAP_SYSTEM_INCLUDES
88 #pragma GCC visibility push(default)
90 extern __ptr_t
__libc_malloc(size_t);
91 extern __ptr_t
__libc_calloc(size_t, size_t);
92 extern __ptr_t
__libc_realloc(__ptr_t
, size_t);
93 extern void __libc_free(__ptr_t
);
94 extern __ptr_t
__libc_memalign(size_t, size_t);
95 extern __ptr_t
__libc_valloc(size_t);
96 #ifdef WRAP_SYSTEM_INCLUDES
97 #pragma GCC visibility pop
100 #elif defined(XP_WIN32)
102 #include <sys/timeb.h> /* for timeb */
103 #include <sys/stat.h> /* for fstat */
105 #include <io.h> /*for write*/
107 #define WRITE_FLAGS "w"
109 #define __libc_malloc(x) dhw_orig_malloc(x)
110 #define __libc_realloc(x, y) dhw_orig_realloc(x,y)
111 #define __libc_free(x) dhw_orig_free(x)
113 #else /* not XP_MACOSX, XP_UNIX, or XP_WIN32 */
115 # error "Unknown build configuration!"
119 typedef struct logfile logfile
;
121 #define STARTUP_TMBUFSIZE (64 * 1024)
122 #define LOGFILE_TMBUFSIZE (16 * 1024)
126 int lfd
; /* logical fd, dense among all logfiles */
136 static char default_buf
[STARTUP_TMBUFSIZE
];
137 static logfile default_logfile
=
138 {-1, 0, default_buf
, STARTUP_TMBUFSIZE
, 0, 0, 0, NULL
, NULL
};
139 static logfile
*logfile_list
= NULL
;
140 static logfile
**logfile_tail
= &logfile_list
;
141 static logfile
*logfp
= &default_logfile
;
142 static PRLock
*tmlock
= NULL
;
144 #define PATH_MAX 4096
146 static char sdlogname
[PATH_MAX
] = ""; /* filename for shutdown leak log */
149 * This enables/disables trace-malloc logging.
151 * It is separate from suppress_tracing so that we do not have to pay
152 * the performance cost of repeated TM_TLS_GET_DATA calls when
153 * trace-malloc is disabled (which is not as bad as the locking we used
156 * It must default to zero, since it can be tested by the Linux malloc
157 * hooks before NS_TraceMallocStartup sets it.
159 static uint32 tracing_enabled
= 0;
162 * This lock must be held while manipulating the calltree, the
163 * allocations table, the log, or the tmstats.
165 * Callers should not *enter* the lock without checking suppress_tracing
166 * first; otherwise they risk trying to re-enter on the same thread.
168 #define TM_ENTER_LOCK(t) \
170 PR_ASSERT(t->suppress_tracing != 0); \
175 #define TM_EXIT_LOCK(t) \
177 PR_ASSERT(t->suppress_tracing != 0); \
182 #define TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t) \
184 t->suppress_tracing++; \
188 #define TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t) \
191 t->suppress_tracing--; \
196 * Thread-local storage.
198 * We can't use NSPR thread-local storage for this because it mallocs
199 * within PR_GetThreadPrivate (the first time) and PR_SetThreadPrivate
200 * (which can be worked around by protecting all uses of those functions
201 * with a monitor, ugh) and because it calls malloc/free when the
202 * thread-local storage is in an inconsistent state within
203 * PR_SetThreadPrivate (when expanding the thread-local storage array)
204 * and _PRI_DetachThread (when and after deleting the thread-local
212 #define TM_TLS_INDEX_TYPE DWORD
213 #define TM_CREATE_TLS_INDEX(i_) PR_BEGIN_MACRO \
216 #define TM_DESTROY_TLS_INDEX(i_) TlsFree((i_))
217 #define TM_GET_TLS_DATA(i_) TlsGetValue((i_))
218 #define TM_SET_TLS_DATA(i_, v_) TlsSetValue((i_), (v_))
224 #define TM_TLS_INDEX_TYPE pthread_key_t
225 #define TM_CREATE_TLS_INDEX(i_) pthread_key_create(&(i_), NULL)
226 #define TM_DESTROY_TLS_INDEX(i_) pthread_key_delete((i_))
227 #define TM_GET_TLS_DATA(i_) pthread_getspecific((i_))
228 #define TM_SET_TLS_DATA(i_, v_) pthread_setspecific((i_), (v_))
232 static TM_TLS_INDEX_TYPE tls_index
;
233 static tm_thread main_thread
; /* 0-initialization is correct */
235 /* FIXME (maybe): This is currently unused; we leak the thread-local data. */
238 free_tm_thread(void *priv
)
240 tm_thread
*t
= (tm_thread
*) priv
;
242 PR_ASSERT(t
->suppress_tracing
== 0);
245 t
->suppress_tracing
= 1;
246 if (t
->backtrace_buf
.buffer
)
247 __libc_free(t
->backtrace_buf
.buffer
);
258 tm_thread stack_tm_thread
;
264 t
= TM_GET_TLS_DATA(tls_index
);
268 * First, store a tm_thread on the stack to suppress for the
271 stack_tm_thread
.suppress_tracing
= 1;
272 stack_tm_thread
.backtrace_buf
.buffer
= NULL
;
273 stack_tm_thread
.backtrace_buf
.size
= 0;
274 stack_tm_thread
.backtrace_buf
.entries
= 0;
275 TM_SET_TLS_DATA(tls_index
, &stack_tm_thread
);
277 t
= (tm_thread
*) __libc_malloc(sizeof(tm_thread
));
278 t
->suppress_tracing
= 0;
279 t
->backtrace_buf
= stack_tm_thread
.backtrace_buf
;
280 TM_SET_TLS_DATA(tls_index
, t
);
282 PR_ASSERT(stack_tm_thread
.suppress_tracing
== 1); /* balanced */
288 /* We don't want more than 32 logfiles open at once, ok? */
289 typedef uint32 lfd_set
;
291 #define LFD_SET_STATIC_INITIALIZER 0
292 #define LFD_SET_SIZE 32
294 #define LFD_ZERO(s) (*(s) = 0)
295 #define LFD_BIT(i) ((uint32)1 << (i))
296 #define LFD_TEST(i,s) (LFD_BIT(i) & *(s))
297 #define LFD_SET(i,s) (*(s) |= LFD_BIT(i))
298 #define LFD_CLR(i,s) (*(s) &= ~LFD_BIT(i))
300 static logfile
*get_logfile(int fd
)
305 for (fp
= logfile_list
; fp
; fp
= fp
->next
) {
311 for (fp
= logfile_list
; fp
; fp
= fp
->next
) {
313 if (++lfd
>= LFD_SET_SIZE
)
318 fp
= __libc_malloc(sizeof(logfile
) + LOGFILE_TMBUFSIZE
);
323 fp
->buf
= (char*) (fp
+ 1);
324 fp
->bufsize
= LOGFILE_TMBUFSIZE
;
326 fp
->size
= fp
->simsize
= 0;
328 fp
->prevp
= logfile_tail
;
330 logfile_tail
= &fp
->next
;
334 static void flush_logfile(logfile
*fp
)
348 cnt
= write(fd
, bp
, len
);
350 printf("### nsTraceMalloc: write failed or wrote 0 bytes!\n");
360 static void log_byte(logfile
*fp
, char byte
)
362 if (fp
->pos
== fp
->bufsize
)
364 fp
->buf
[fp
->pos
++] = byte
;
367 static void log_string(logfile
*fp
, const char *str
)
371 len
= strlen(str
) + 1; /* include null terminator */
372 while ((rem
= fp
->pos
+ len
- fp
->bufsize
) > 0) {
374 memcpy(&fp
->buf
[fp
->pos
], str
, cnt
);
380 memcpy(&fp
->buf
[fp
->pos
], str
, len
);
384 static void log_filename(logfile
* fp
, const char* filename
)
386 if (strlen(filename
) < 512) {
387 char *bp
, *cp
, buf
[512];
389 bp
= strstr(strcpy(buf
, filename
), "mozilla");
393 for (cp
= bp
; *cp
; cp
++) {
400 log_string(fp
, filename
);
403 static void log_uint32(logfile
*fp
, uint32 ival
)
407 log_byte(fp
, (char) ival
);
408 } else if (ival
< 0x4000) {
409 /* 10xx xxxx xxxx xxxx */
410 log_byte(fp
, (char) ((ival
>> 8) | 0x80));
411 log_byte(fp
, (char) (ival
& 0xff));
412 } else if (ival
< 0x200000) {
413 /* 110x xxxx xxxx xxxx xxxx xxxx */
414 log_byte(fp
, (char) ((ival
>> 16) | 0xc0));
415 log_byte(fp
, (char) ((ival
>> 8) & 0xff));
416 log_byte(fp
, (char) (ival
& 0xff));
417 } else if (ival
< 0x10000000) {
418 /* 1110 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
419 log_byte(fp
, (char) ((ival
>> 24) | 0xe0));
420 log_byte(fp
, (char) ((ival
>> 16) & 0xff));
421 log_byte(fp
, (char) ((ival
>> 8) & 0xff));
422 log_byte(fp
, (char) (ival
& 0xff));
424 /* 1111 0000 xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
425 log_byte(fp
, (char) 0xf0);
426 log_byte(fp
, (char) ((ival
>> 24) & 0xff));
427 log_byte(fp
, (char) ((ival
>> 16) & 0xff));
428 log_byte(fp
, (char) ((ival
>> 8) & 0xff));
429 log_byte(fp
, (char) (ival
& 0xff));
433 static void log_event1(logfile
*fp
, char event
, uint32 serial
)
436 log_uint32(fp
, (uint32
) serial
);
439 static void log_event2(logfile
*fp
, char event
, uint32 serial
, size_t size
)
441 log_event1(fp
, event
, serial
);
442 log_uint32(fp
, (uint32
) size
);
445 static void log_event3(logfile
*fp
, char event
, uint32 serial
, size_t oldsize
,
448 log_event2(fp
, event
, serial
, oldsize
);
449 log_uint32(fp
, (uint32
) size
);
452 static void log_event4(logfile
*fp
, char event
, uint32 serial
, uint32 ui2
,
453 uint32 ui3
, uint32 ui4
)
455 log_event3(fp
, event
, serial
, ui2
, ui3
);
459 static void log_event5(logfile
*fp
, char event
, uint32 serial
, uint32 ui2
,
460 uint32 ui3
, uint32 ui4
, uint32 ui5
)
462 log_event4(fp
, event
, serial
, ui2
, ui3
, ui4
);
466 static void log_event6(logfile
*fp
, char event
, uint32 serial
, uint32 ui2
,
467 uint32 ui3
, uint32 ui4
, uint32 ui5
, uint32 ui6
)
469 log_event5(fp
, event
, serial
, ui2
, ui3
, ui4
, ui5
);
473 static void log_event7(logfile
*fp
, char event
, uint32 serial
, uint32 ui2
,
474 uint32 ui3
, uint32 ui4
, uint32 ui5
, uint32 ui6
,
477 log_event6(fp
, event
, serial
, ui2
, ui3
, ui4
, ui5
, ui6
);
481 static void log_event8(logfile
*fp
, char event
, uint32 serial
, uint32 ui2
,
482 uint32 ui3
, uint32 ui4
, uint32 ui5
, uint32 ui6
,
483 uint32 ui7
, uint32 ui8
)
485 log_event7(fp
, event
, serial
, ui2
, ui3
, ui4
, ui5
, ui6
, ui7
);
489 typedef struct callsite callsite
;
495 const char *name
; /* pointer to string owned by methods table */
496 const char *library
; /* pointer to string owned by libraries table */
503 /* NB: these counters are incremented and decremented only within tmlock. */
504 static uint32 library_serial_generator
= 0;
505 static uint32 method_serial_generator
= 0;
506 static uint32 callsite_serial_generator
= 0;
507 static uint32 tmstats_serial_generator
= 0;
508 static uint32 filename_serial_generator
= 0;
510 /* Root of the tree of callsites, the sum of all (cycle-compressed) stacks. */
511 static callsite calltree_root
=
512 {0, 0, LFD_SET_STATIC_INITIALIZER
, NULL
, NULL
, 0, NULL
, NULL
, NULL
};
514 /* Basic instrumentation. */
515 static nsTMStats tmstats
= NS_TMSTATS_STATIC_INITIALIZER
;
517 /* Parent with the most kids (tmstats.calltree_maxkids). */
518 static callsite
*calltree_maxkids_parent
;
520 /* Calltree leaf for path with deepest stack backtrace. */
521 static callsite
*calltree_maxstack_top
;
523 /* Last site (i.e., calling pc) that recurred during a backtrace. */
524 static callsite
*last_callsite_recurrence
;
526 static void log_tmstats(logfile
*fp
)
528 log_event1(fp
, TM_EVENT_STATS
, ++tmstats_serial_generator
);
529 log_uint32(fp
, tmstats
.calltree_maxstack
);
530 log_uint32(fp
, tmstats
.calltree_maxdepth
);
531 log_uint32(fp
, tmstats
.calltree_parents
);
532 log_uint32(fp
, tmstats
.calltree_maxkids
);
533 log_uint32(fp
, tmstats
.calltree_kidhits
);
534 log_uint32(fp
, tmstats
.calltree_kidmisses
);
535 log_uint32(fp
, tmstats
.calltree_kidsteps
);
536 log_uint32(fp
, tmstats
.callsite_recurrences
);
537 log_uint32(fp
, tmstats
.backtrace_calls
);
538 log_uint32(fp
, tmstats
.backtrace_failures
);
539 log_uint32(fp
, tmstats
.btmalloc_failures
);
540 log_uint32(fp
, tmstats
.dladdr_failures
);
541 log_uint32(fp
, tmstats
.malloc_calls
);
542 log_uint32(fp
, tmstats
.malloc_failures
);
543 log_uint32(fp
, tmstats
.calloc_calls
);
544 log_uint32(fp
, tmstats
.calloc_failures
);
545 log_uint32(fp
, tmstats
.realloc_calls
);
546 log_uint32(fp
, tmstats
.realloc_failures
);
547 log_uint32(fp
, tmstats
.free_calls
);
548 log_uint32(fp
, tmstats
.null_free_calls
);
549 log_uint32(fp
, calltree_maxkids_parent
? calltree_maxkids_parent
->serial
551 log_uint32(fp
, calltree_maxstack_top
? calltree_maxstack_top
->serial
: 0);
554 static void *generic_alloctable(void *pool
, PRSize size
)
556 return __libc_malloc(size
);
559 static void generic_freetable(void *pool
, void *item
)
564 typedef struct lfdset_entry
{
569 static PLHashEntry
*lfdset_allocentry(void *pool
, const void *key
)
571 lfdset_entry
*le
= __libc_malloc(sizeof *le
);
573 LFD_ZERO(&le
->lfdset
);
577 static void lfdset_freeentry(void *pool
, PLHashEntry
*he
, PRUintn flag
)
581 if (flag
!= HT_FREE_ENTRY
)
583 le
= (lfdset_entry
*) he
;
584 __libc_free((void*) le
);
587 static PLHashAllocOps lfdset_hashallocops
= {
588 generic_alloctable
, generic_freetable
,
589 lfdset_allocentry
, lfdset_freeentry
592 /* Table of library pathnames mapped to to logged 'L' record serial numbers. */
593 static PLHashTable
*libraries
= NULL
;
595 /* Table of filename pathnames mapped to logged 'G' record serial numbers. */
596 static PLHashTable
*filenames
= NULL
;
598 /* Table mapping method names to logged 'N' record serial numbers. */
599 static PLHashTable
*methods
= NULL
;
602 calltree(void **stack
, size_t num_stack_entries
, tm_thread
*t
)
607 callsite
*parent
, *site
, **csp
, *tmp
;
609 uint32 library_serial
, method_serial
, filename_serial
;
610 const char *library
, *method
, *filename
;
613 PLHashEntry
**hep
, *he
;
616 nsCodeAddressDetails details
;
620 * FIXME bug 391749: We should really lock only the minimum amount
621 * that we need to in this function, because it makes some calls
622 * that could lock in the system's shared library loader.
626 maxstack
= (num_stack_entries
> tmstats
.calltree_maxstack
);
628 /* these two are the same, although that used to be less clear */
629 tmstats
.calltree_maxstack
= num_stack_entries
;
630 tmstats
.calltree_maxdepth
= num_stack_entries
;
633 /* Reverse the stack again, finding and building a path in the tree. */
634 parent
= &calltree_root
;
635 stack_index
= num_stack_entries
;
638 pc
= stack
[stack_index
];
641 while ((site
= *csp
) != NULL
) {
642 if (site
->pc
== pc
) {
643 tmstats
.calltree_kidhits
++;
645 /* Put the most recently used site at the front of siblings. */
646 *csp
= site
->siblings
;
647 site
->siblings
= parent
->kids
;
650 /* Check whether we've logged for this site and logfile yet. */
651 if (!LFD_TEST(fp
->lfd
, &site
->lfdset
)) {
653 * Some other logfile put this site in the calltree. We
654 * must log an event for site, and possibly first for its
655 * method and/or library. Note the code after the while
656 * loop that tests if (!site).
661 /* Site already built and logged to fp -- go up the stack. */
664 tmstats
.calltree_kidsteps
++;
665 csp
= &site
->siblings
;
669 tmstats
.calltree_kidmisses
++;
671 /* Check for recursion: see if pc is on our ancestor line. */
672 for (site
= parent
; site
; site
= site
->parent
) {
673 if (site
->pc
== pc
) {
674 tmstats
.callsite_recurrences
++;
675 last_callsite_recurrence
= site
;
682 * Not in tree at all, or not logged to fp: let's find our symbolic
687 * NS_DescribeCodeAddress can (on Linux) acquire a lock inside
688 * the shared library loader. Another thread might call malloc
689 * while holding that lock (when loading a shared library). So
690 * we have to exit tmlock around this call. For details, see
691 * https://bugzilla.mozilla.org/show_bug.cgi?id=363334#c3
693 * We could be more efficient by building the nodes in the
694 * calltree, exiting the monitor once to describe all of them,
695 * and then filling in the descriptions for any that hadn't been
696 * described already. But this is easier for now.
699 rv
= NS_DescribeCodeAddress(pc
, &details
);
702 tmstats
.dladdr_failures
++;
706 /* Check whether we need to emit a library trace record. */
709 if (details
.library
[0]) {
711 libraries
= PL_NewHashTable(100, PL_HashString
,
712 PL_CompareStrings
, PL_CompareValues
,
713 &lfdset_hashallocops
, NULL
);
715 tmstats
.btmalloc_failures
++;
719 hash
= PL_HashString(details
.library
);
720 hep
= PL_HashTableRawLookup(libraries
, hash
, details
.library
);
723 library
= (char*) he
->key
;
724 library_serial
= (uint32
) NS_PTR_TO_INT32(he
->value
);
725 le
= (lfdset_entry
*) he
;
726 if (LFD_TEST(fp
->lfd
, &le
->lfdset
)) {
727 /* We already logged an event on fp for this library. */
731 library
= strdup(details
.library
);
733 library_serial
= ++library_serial_generator
;
734 he
= PL_HashTableRawAdd(libraries
, hep
, hash
, library
,
735 (void*) library_serial
);
738 tmstats
.btmalloc_failures
++;
741 le
= (lfdset_entry
*) he
;
744 /* Need to log an event to fp for this lib. */
745 slash
= strrchr(library
, '/');
746 log_event1(fp
, TM_EVENT_LIBRARY
, library_serial
);
747 log_string(fp
, slash
? slash
+ 1 : library
);
748 LFD_SET(fp
->lfd
, &le
->lfdset
);
752 /* For compatibility with current log format, always emit a
753 * filename trace record, using "noname" / 0 when no file name
756 filename
= details
.filename
[0] ? details
.filename
: "noname";
758 filenames
= PL_NewHashTable(100, PL_HashString
,
759 PL_CompareStrings
, PL_CompareValues
,
760 &lfdset_hashallocops
, NULL
);
762 tmstats
.btmalloc_failures
++;
766 hash
= PL_HashString(filename
);
767 hep
= PL_HashTableRawLookup(filenames
, hash
, filename
);
770 filename
= (char*) he
->key
;
771 filename_serial
= (uint32
) NS_PTR_TO_INT32(he
->value
);
772 le
= (lfdset_entry
*) he
;
773 if (LFD_TEST(fp
->lfd
, &le
->lfdset
)) {
774 /* We already logged an event on fp for this filename. */
778 filename
= strdup(filename
);
780 filename_serial
= ++filename_serial_generator
;
781 he
= PL_HashTableRawAdd(filenames
, hep
, hash
, filename
,
782 (void*) filename_serial
);
785 tmstats
.btmalloc_failures
++;
788 le
= (lfdset_entry
*) he
;
791 /* Need to log an event to fp for this filename. */
792 log_event1(fp
, TM_EVENT_FILENAME
, filename_serial
);
793 log_filename(fp
, filename
);
794 LFD_SET(fp
->lfd
, &le
->lfdset
);
797 if (!details
.function
[0]) {
798 PR_snprintf(details
.function
, sizeof(details
.function
),
799 "%s+%X", library
? library
: "main", details
.loffset
);
802 /* Emit an 'N' (for New method, 'M' is for malloc!) event if needed. */
805 methods
= PL_NewHashTable(10000, PL_HashString
,
806 PL_CompareStrings
, PL_CompareValues
,
807 &lfdset_hashallocops
, NULL
);
809 tmstats
.btmalloc_failures
++;
813 hash
= PL_HashString(details
.function
);
814 hep
= PL_HashTableRawLookup(methods
, hash
, details
.function
);
817 method
= (char*) he
->key
;
818 method_serial
= (uint32
) NS_PTR_TO_INT32(he
->value
);
819 le
= (lfdset_entry
*) he
;
820 if (LFD_TEST(fp
->lfd
, &le
->lfdset
)) {
821 /* We already logged an event on fp for this method. */
825 method
= strdup(details
.function
);
827 method_serial
= ++method_serial_generator
;
828 he
= PL_HashTableRawAdd(methods
, hep
, hash
, method
,
829 (void*) method_serial
);
832 tmstats
.btmalloc_failures
++;
835 le
= (lfdset_entry
*) he
;
838 log_event4(fp
, TM_EVENT_METHOD
, method_serial
, library_serial
,
839 filename_serial
, details
.lineno
);
840 log_string(fp
, method
);
841 LFD_SET(fp
->lfd
, &le
->lfdset
);
844 /* Create a new callsite record. */
846 site
= __libc_malloc(sizeof(callsite
));
848 tmstats
.btmalloc_failures
++;
852 /* Update parent and max-kids-per-parent stats. */
854 tmstats
.calltree_parents
++;
856 for (tmp
= parent
->kids
; tmp
; tmp
= tmp
->siblings
)
858 if (nkids
> tmstats
.calltree_maxkids
) {
859 tmstats
.calltree_maxkids
= nkids
;
860 calltree_maxkids_parent
= parent
;
863 /* Insert the new site into the tree. */
865 site
->serial
= ++callsite_serial_generator
;
866 LFD_ZERO(&site
->lfdset
);
868 site
->library
= library
;
869 site
->offset
= details
.loffset
;
870 site
->parent
= parent
;
871 site
->siblings
= parent
->kids
;
876 /* Log the site with its parent, method, and offset. */
877 log_event4(fp
, TM_EVENT_CALLSITE
, site
->serial
, parent
->serial
,
878 method_serial
, details
.foffset
);
879 LFD_SET(fp
->lfd
, &site
->lfdset
);
883 } while (stack_index
> 0);
886 calltree_maxstack_top
= site
;
897 * Buffer the stack from top at low index to bottom at high, so that we can
898 * reverse it in calltree.
901 stack_callback(void *pc
, void *closure
)
903 stack_buffer_info
*info
= (stack_buffer_info
*) closure
;
906 * If we run out of buffer, keep incrementing entries so that
907 * backtrace can call us again with a bigger buffer.
909 if (info
->entries
< info
->size
)
910 info
->buffer
[info
->entries
] = pc
;
915 * The caller MUST NOT be holding tmlock when calling backtrace.
918 backtrace(tm_thread
*t
, int skip
)
921 stack_buffer_info
*info
= &t
->backtrace_buf
;
922 void ** new_stack_buffer
;
923 size_t new_stack_buffer_size
;
925 t
->suppress_tracing
++;
928 * NS_StackWalk can (on Windows) acquire a lock the shared library
929 * loader. Another thread might call malloc while holding that lock
930 * (when loading a shared library). So we can't be in tmlock during
931 * this call. For details, see
932 * https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8
935 /* skip == 0 means |backtrace| should show up, so don't use skip + 1 */
936 /* NB: this call is repeated below if the buffer is too small */
938 NS_StackWalk(stack_callback
, skip
, info
);
941 * To avoid allocating in stack_callback (which, on Windows, is
942 * called on a different thread from the one we're running on here),
943 * reallocate here if it didn't have a big enough buffer (which
944 * includes the first call on any thread), and call it again.
946 if (info
->entries
> info
->size
) {
947 new_stack_buffer_size
= 2 * info
->entries
;
948 new_stack_buffer
= __libc_realloc(info
->buffer
,
949 new_stack_buffer_size
* sizeof(void*));
950 if (!new_stack_buffer
)
952 info
->buffer
= new_stack_buffer
;
953 info
->size
= new_stack_buffer_size
;
955 /* and call NS_StackWalk again */
957 NS_StackWalk(stack_callback
, skip
, info
);
959 PR_ASSERT(info
->entries
* 2 == new_stack_buffer_size
); /* same stack */
962 if (info
->entries
== 0) {
963 t
->suppress_tracing
--;
967 site
= calltree(info
->buffer
, info
->entries
, t
);
970 tmstats
.backtrace_calls
++;
972 tmstats
.backtrace_failures
++;
973 PR_ASSERT(tmstats
.backtrace_failures
< 100);
977 t
->suppress_tracing
--;
981 typedef struct allocation
{
984 FILE *trackfp
; /* for allocation tracking */
987 #define ALLOC_HEAP_SIZE 150000
989 static allocation alloc_heap
[ALLOC_HEAP_SIZE
];
990 static allocation
*alloc_freelist
= NULL
;
991 static int alloc_heap_initialized
= 0;
993 static PLHashEntry
*alloc_allocentry(void *pool
, const void *key
)
995 allocation
**listp
, *alloc
;
998 if (!alloc_heap_initialized
) {
1000 listp
= &alloc_freelist
;
1001 for (alloc
= alloc_heap
; --n
>= 0; alloc
++) {
1003 listp
= (allocation
**) &alloc
->entry
.next
;
1006 alloc_heap_initialized
= 1;
1009 listp
= &alloc_freelist
;
1012 return __libc_malloc(sizeof(allocation
));
1013 *listp
= (allocation
*) alloc
->entry
.next
;
1014 return &alloc
->entry
;
1017 static void alloc_freeentry(void *pool
, PLHashEntry
*he
, PRUintn flag
)
1021 if (flag
!= HT_FREE_ENTRY
)
1023 alloc
= (allocation
*) he
;
1024 if ((PRUptrdiff
)(alloc
- alloc_heap
) < (PRUptrdiff
)ALLOC_HEAP_SIZE
) {
1025 alloc
->entry
.next
= &alloc_freelist
->entry
;
1026 alloc_freelist
= alloc
;
1028 __libc_free((void*) alloc
);
1032 static PLHashAllocOps alloc_hashallocops
= {
1033 generic_alloctable
, generic_freetable
,
1034 alloc_allocentry
, alloc_freeentry
1037 static PLHashNumber
hash_pointer(const void *key
)
1039 return (PLHashNumber
) key
;
1042 static PLHashTable
*allocations
= NULL
;
1044 static PLHashTable
*new_allocations(void)
1046 allocations
= PL_NewHashTable(200000, hash_pointer
,
1047 PL_CompareValues
, PL_CompareValues
,
1048 &alloc_hashallocops
, NULL
);
1052 #define get_allocations() (allocations ? allocations : new_allocations())
1054 #if defined(XP_MACOSX)
1056 /* from malloc.c in Libc */
1058 malloc_logger_t(unsigned type
, unsigned arg1
, unsigned arg2
, unsigned arg3
,
1059 unsigned result
, unsigned num_hot_frames_to_skip
);
1061 extern malloc_logger_t
*malloc_logger
;
1063 #define MALLOC_LOG_TYPE_ALLOCATE 2
1064 #define MALLOC_LOG_TYPE_DEALLOCATE 4
1065 #define MALLOC_LOG_TYPE_HAS_ZONE 8
1066 #define MALLOC_LOG_TYPE_CLEARED 64
1069 my_malloc_logger(unsigned type
, unsigned arg1
, unsigned arg2
, unsigned arg3
,
1070 unsigned result
, unsigned num_hot_frames_to_skip
)
1072 unsigned all_args
[3] = { arg1
, arg2
, arg3
};
1073 unsigned *args
= all_args
+ ((type
& MALLOC_LOG_TYPE_HAS_ZONE
) ? 1 : 0);
1075 unsigned alloc_type
=
1076 type
& (MALLOC_LOG_TYPE_ALLOCATE
| MALLOC_LOG_TYPE_DEALLOCATE
);
1077 tm_thread
*t
= tm_get_thread();
1079 if (alloc_type
== (MALLOC_LOG_TYPE_ALLOCATE
| MALLOC_LOG_TYPE_DEALLOCATE
)) {
1080 ReallocCallback((void*)args
[0], (void*)result
, args
[1], 0, 0, t
);
1081 } else if (alloc_type
== MALLOC_LOG_TYPE_ALLOCATE
) {
1083 * We don't get size/count information for calloc, so just use
1086 MallocCallback((void*)result
, args
[0], 0, 0, t
);
1087 } else if (alloc_type
== MALLOC_LOG_TYPE_DEALLOCATE
) {
1088 FreeCallback((void*)args
[0], 0, 0, t
);
1095 PR_ASSERT(!malloc_logger
);
1096 malloc_logger
= my_malloc_logger
;
1100 ShutdownHooker(void)
1102 PR_ASSERT(malloc_logger
== my_malloc_logger
);
1103 malloc_logger
= NULL
;
1106 #elif defined(XP_UNIX)
1109 * We can't use glibc's malloc hooks because they can't be used in a
1110 * threadsafe manner. They require unsetting the hooks to call into the
1111 * original malloc implementation, and then resetting them when the
1112 * original implementation returns. If another thread calls the same
1113 * allocation function while the hooks are unset, we have no chance to
1114 * intercept the call.
1117 NS_EXTERNAL_VIS_(__ptr_t
)
1120 PRUint32 start
, end
;
1124 if (!tracing_enabled
|| !PR_Initialized() ||
1125 (t
= tm_get_thread())->suppress_tracing
!= 0) {
1126 return __libc_malloc(size
);
1129 t
->suppress_tracing
++;
1130 start
= PR_IntervalNow();
1131 ptr
= __libc_malloc(size
);
1132 end
= PR_IntervalNow();
1133 t
->suppress_tracing
--;
1135 MallocCallback(ptr
, size
, start
, end
, t
);
1140 NS_EXTERNAL_VIS_(__ptr_t
)
1141 calloc(size_t count
, size_t size
)
1143 PRUint32 start
, end
;
1147 if (!tracing_enabled
|| !PR_Initialized() ||
1148 (t
= tm_get_thread())->suppress_tracing
!= 0) {
1149 return __libc_calloc(count
, size
);
1152 t
->suppress_tracing
++;
1153 start
= PR_IntervalNow();
1154 ptr
= __libc_calloc(count
, size
);
1155 end
= PR_IntervalNow();
1156 t
->suppress_tracing
--;
1158 CallocCallback(ptr
, count
, size
, start
, end
, t
);
1163 NS_EXTERNAL_VIS_(__ptr_t
)
1164 realloc(__ptr_t oldptr
, size_t size
)
1166 PRUint32 start
, end
;
1170 if (!tracing_enabled
|| !PR_Initialized() ||
1171 (t
= tm_get_thread())->suppress_tracing
!= 0) {
1172 return __libc_realloc(oldptr
, size
);
1175 t
->suppress_tracing
++;
1176 start
= PR_IntervalNow();
1177 ptr
= __libc_realloc(oldptr
, size
);
1178 end
= PR_IntervalNow();
1179 t
->suppress_tracing
--;
1181 /* FIXME bug 392008: We could race with reallocation of oldptr. */
1182 ReallocCallback(oldptr
, ptr
, size
, start
, end
, t
);
1187 NS_EXTERNAL_VIS_(void*)
1190 PRUint32 start
, end
;
1194 if (!tracing_enabled
|| !PR_Initialized() ||
1195 (t
= tm_get_thread())->suppress_tracing
!= 0) {
1196 return __libc_valloc(size
);
1199 t
->suppress_tracing
++;
1200 start
= PR_IntervalNow();
1201 ptr
= __libc_valloc(size
);
1202 end
= PR_IntervalNow();
1203 t
->suppress_tracing
--;
1205 MallocCallback(ptr
, size
, start
, end
, t
);
1210 NS_EXTERNAL_VIS_(void*)
1211 memalign(size_t boundary
, size_t size
)
1213 PRUint32 start
, end
;
1217 if (!tracing_enabled
|| !PR_Initialized() ||
1218 (t
= tm_get_thread())->suppress_tracing
!= 0) {
1219 return __libc_memalign(boundary
, size
);
1222 t
->suppress_tracing
++;
1223 start
= PR_IntervalNow();
1224 ptr
= __libc_memalign(boundary
, size
);
1225 end
= PR_IntervalNow();
1226 t
->suppress_tracing
--;
1228 MallocCallback(ptr
, size
, start
, end
, t
);
1233 NS_EXTERNAL_VIS_(int)
1234 posix_memalign(void **memptr
, size_t alignment
, size_t size
)
1236 __ptr_t ptr
= memalign(alignment
, size
);
1243 NS_EXTERNAL_VIS_(void)
1246 PRUint32 start
, end
;
1249 if (!tracing_enabled
|| !PR_Initialized() ||
1250 (t
= tm_get_thread())->suppress_tracing
!= 0) {
1255 t
->suppress_tracing
++;
1256 start
= PR_IntervalNow();
1258 end
= PR_IntervalNow();
1259 t
->suppress_tracing
--;
1261 /* FIXME bug 392008: We could race with reallocation of ptr. */
1263 FreeCallback(ptr
, start
, end
, t
);
1266 NS_EXTERNAL_VIS_(void)
1272 #define StartupHooker() PR_BEGIN_MACRO PR_END_MACRO
1273 #define ShutdownHooker() PR_BEGIN_MACRO PR_END_MACRO
1275 #elif defined(XP_WIN32)
1277 /* See nsWinTraceMalloc.cpp. */
1281 static const char magic
[] = NS_TRACE_MALLOC_MAGIC
;
1284 log_header(int logfd
)
1286 uint32 ticksPerSec
= PR_htonl(PR_TicksPerSecond());
1287 (void) write(logfd
, magic
, NS_TRACE_MALLOC_MAGIC_SIZE
);
1288 (void) write(logfd
, &ticksPerSec
, sizeof ticksPerSec
);
1292 NS_TraceMallocStartup(int logfd
)
1294 /* We must be running on the primordial thread. */
1295 PR_ASSERT(tracing_enabled
== 0);
1296 PR_ASSERT(logfp
== &default_logfile
);
1297 tracing_enabled
= (logfd
>= 0);
1299 if (tracing_enabled
) {
1300 PR_ASSERT(logfp
->simsize
== 0); /* didn't overflow startup buffer */
1302 /* Log everything in logfp (aka default_logfile)'s buffer to logfd. */
1304 logfile_list
= &default_logfile
;
1305 logfp
->prevp
= &logfile_list
;
1306 logfile_tail
= &logfp
->next
;
1310 atexit(NS_TraceMallocShutdown
);
1313 * We only allow one thread until NS_TraceMallocStartup is called.
1314 * When it is, we have to initialize tls_index before allocating tmlock
1315 * since get_tm_index uses NULL-tmlock to detect tls_index being
1318 main_thread
.suppress_tracing
++;
1319 TM_CREATE_TLS_INDEX(tls_index
);
1320 TM_SET_TLS_DATA(tls_index
, &main_thread
);
1321 tmlock
= PR_NewLock();
1322 main_thread
.suppress_tracing
--;
1324 if (tracing_enabled
)
1329 * Options for log files, with the log file name either as the next option
1330 * or separated by '=' (e.g. "./mozilla --trace-malloc * malloc.log" or
1331 * "./mozilla --trace-malloc=malloc.log").
1333 static const char TMLOG_OPTION
[] = "--trace-malloc";
1334 static const char SDLOG_OPTION
[] = "--shutdown-leaks";
1336 #define SHOULD_PARSE_ARG(name_, log_, arg_) \
1337 (0 == strncmp(arg_, name_, sizeof(name_) - 1))
1339 #define PARSE_ARG(name_, log_, argv_, i_, consumed_) \
1341 char _nextchar = argv_[i_][sizeof(name_) - 1]; \
1342 if (_nextchar == '=') { \
1343 log_ = argv_[i_] + sizeof(name_); \
1345 } else if (_nextchar == '\0') { \
1346 log_ = argv_[i_+1]; \
1352 NS_TraceMallocStartupArgs(int argc
, char **argv
)
1354 int i
, logfd
= -1, consumed
, logflags
;
1355 char *tmlogname
= NULL
, *sdlogname_local
= NULL
;
1358 * Look for the --trace-malloc <logfile> option early, to avoid missing
1359 * early mallocs (we miss static constructors whose output overflows the
1360 * log file's static 16K output buffer).
1362 for (i
= 1; i
< argc
; i
+= consumed
) {
1364 if (SHOULD_PARSE_ARG(TMLOG_OPTION
, tmlogname
, argv
[i
]))
1365 PARSE_ARG(TMLOG_OPTION
, tmlogname
, argv
, i
, consumed
);
1366 else if (SHOULD_PARSE_ARG(SDLOG_OPTION
, sdlogname_local
, argv
[i
]))
1367 PARSE_ARG(SDLOG_OPTION
, sdlogname_local
, argv
, i
, consumed
);
1370 #ifndef XP_WIN32 /* If we don't comment this out, it will crash Windows. */
1372 /* Now remove --trace-malloc and its argument from argv. */
1374 for (j
= i
; j
< argc
; ++j
)
1375 argv
[j
] = argv
[j
+consumed
];
1377 consumed
= 0; /* don't advance next iteration */
1389 switch (*tmlogname
) {
1392 if (pipe(pipefds
) == 0) {
1395 /* In child: set up stdin, parse args, and exec. */
1397 char **nargv
, *token
;
1399 if (pipefds
[0] != 0) {
1400 dup2(pipefds
[0], 0);
1405 tmlogname
= strtok(tmlogname
+ 1, " \t");
1407 nargv
= (char **) malloc((maxargc
+1) * sizeof(char *));
1408 if (!nargv
) exit(1);
1410 nargv
[nargc
++] = tmlogname
;
1411 while ((token
= strtok(NULL
, " \t")) != NULL
) {
1412 if (nargc
== maxargc
) {
1415 realloc(nargv
, (maxargc
+1) * sizeof(char*));
1416 if (!nargv
) exit(1);
1418 nargv
[nargc
++] = token
;
1420 nargv
[nargc
] = NULL
;
1423 execvp(tmlogname
, nargv
);
1428 /* In parent: set logfd to the pipe's write side. */
1435 "%s: can't pipe to trace-malloc child process %s: %s\n",
1436 argv
[0], tmlogname
, strerror(errno
));
1442 /* Don't log from startup, but do prepare to log later. */
1443 /* XXX traditional meaning of '-' as option argument is "stdin" or "stdout" */
1444 if (tmlogname
[1] == '\0')
1449 logflags
= O_CREAT
| O_WRONLY
| O_TRUNC
;
1450 #if defined(XP_WIN32)
1452 * Avoid translations on WIN32.
1454 logflags
|= O_BINARY
;
1456 logfd
= open(tmlogname
, logflags
, 0644);
1459 "%s: can't create trace-malloc log named %s: %s\n",
1460 argv
[0], tmlogname
, strerror(errno
));
1467 if (sdlogname_local
) {
1468 strncpy(sdlogname
, sdlogname_local
, sizeof(sdlogname
));
1469 sdlogname
[sizeof(sdlogname
) - 1] = '\0';
1472 NS_TraceMallocStartup(logfd
);
1477 NS_TraceMallocShutdown(void)
1482 NS_TraceMallocDumpAllocations(sdlogname
);
1484 if (tmstats
.backtrace_failures
) {
1486 "TraceMalloc backtrace failures: %lu (malloc %lu dladdr %lu)\n",
1487 (unsigned long) tmstats
.backtrace_failures
,
1488 (unsigned long) tmstats
.btmalloc_failures
,
1489 (unsigned long) tmstats
.dladdr_failures
);
1491 while ((fp
= logfile_list
) != NULL
) {
1492 logfile_list
= fp
->next
;
1499 if (fp
!= &default_logfile
) {
1501 logfp
= &default_logfile
;
1506 PRLock
*lock
= tmlock
;
1508 PR_DestroyLock(lock
);
1510 if (tracing_enabled
) {
1511 tracing_enabled
= 0;
1517 NS_TraceMallocDisable(void)
1519 tm_thread
*t
= tm_get_thread();
1523 /* Robustify in case of duplicate call. */
1524 PR_ASSERT(tracing_enabled
);
1525 if (tracing_enabled
== 0)
1528 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1529 for (fp
= logfile_list
; fp
; fp
= fp
->next
)
1531 sample
= --tracing_enabled
;
1532 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1538 NS_TraceMallocEnable(void)
1540 tm_thread
*t
= tm_get_thread();
1543 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1544 sample
= ++tracing_enabled
;
1545 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1551 NS_TraceMallocChangeLogFD(int fd
)
1553 logfile
*oldfp
, *fp
;
1555 tm_thread
*t
= tm_get_thread();
1557 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1559 if (oldfp
->fd
!= fd
) {
1560 flush_logfile(oldfp
);
1561 fp
= get_logfile(fd
);
1563 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1566 if (fd
>= 0 && fstat(fd
, &sb
) == 0 && sb
.st_size
== 0)
1570 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1575 lfd_clr_enumerator(PLHashEntry
*he
, PRIntn i
, void *arg
)
1577 lfdset_entry
*le
= (lfdset_entry
*) he
;
1578 logfile
*fp
= (logfile
*) arg
;
1580 LFD_CLR(fp
->lfd
, &le
->lfdset
);
1581 return HT_ENUMERATE_NEXT
;
1585 lfd_clr_walk(callsite
*site
, logfile
*fp
)
1589 LFD_CLR(fp
->lfd
, &site
->lfdset
);
1590 for (kid
= site
->kids
; kid
; kid
= kid
->siblings
)
1591 lfd_clr_walk(kid
, fp
);
1595 NS_TraceMallocCloseLogFD(int fd
)
1598 tm_thread
*t
= tm_get_thread();
1600 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1602 fp
= get_logfile(fd
);
1605 if (fp
== &default_logfile
) {
1606 /* Leave default_logfile in logfile_list with an fd of -1. */
1609 /* NB: we can never free lfd 0, it belongs to default_logfile. */
1610 PR_ASSERT(fp
->lfd
== 0);
1612 /* Clear fp->lfd in all possible lfdsets. */
1613 PL_HashTableEnumerateEntries(libraries
, lfd_clr_enumerator
, fp
);
1614 PL_HashTableEnumerateEntries(methods
, lfd_clr_enumerator
, fp
);
1615 lfd_clr_walk(&calltree_root
, fp
);
1617 /* Unlink fp from logfile_list, freeing lfd for reallocation. */
1618 *fp
->prevp
= fp
->next
;
1620 PR_ASSERT(logfile_tail
== &fp
->next
);
1621 logfile_tail
= fp
->prevp
;
1624 /* Reset logfp if we must, then free fp. */
1626 logfp
= &default_logfile
;
1631 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1636 NS_TraceMallocLogTimestamp(const char *caption
)
1645 tm_thread
*t
= tm_get_thread();
1647 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1650 log_byte(fp
, TM_EVENT_TIMESTAMP
);
1653 gettimeofday(&tv
, NULL
);
1654 log_uint32(fp
, (uint32
) tv
.tv_sec
);
1655 log_uint32(fp
, (uint32
) tv
.tv_usec
);
1659 log_uint32(fp
, (uint32
) tb
.time
);
1660 log_uint32(fp
, (uint32
) tb
.millitm
);
1662 log_string(fp
, caption
);
1664 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1668 print_stack(FILE *ofp
, callsite
*site
)
1671 if (site
->name
|| site
->parent
) {
1672 fprintf(ofp
, "%s[%s +0x%X]\n",
1673 site
->name
, site
->library
, site
->offset
);
1675 site
= site
->parent
;
1680 allocation_enumerator(PLHashEntry
*he
, PRIntn i
, void *arg
)
1682 allocation
*alloc
= (allocation
*) he
;
1683 FILE *ofp
= (FILE*) arg
;
1684 callsite
*site
= (callsite
*) he
->value
;
1686 extern const char* nsGetTypeName(const void* ptr
);
1687 unsigned long *p
, *end
;
1689 fprintf(ofp
, "%p <%s> (%lu)\n",
1691 nsGetTypeName(he
->key
),
1692 (unsigned long) alloc
->size
);
1694 for (p
= (unsigned long*) he
->key
,
1695 end
= (unsigned long*) ((char*)he
->key
+ alloc
->size
);
1697 fprintf(ofp
, "\t0x%08lX\n", *p
);
1700 print_stack(ofp
, site
);
1702 return HT_ENUMERATE_NEXT
;
1706 NS_TraceStack(int skip
, FILE *ofp
)
1709 tm_thread
*t
= tm_get_thread();
1711 site
= backtrace(t
, skip
+ 1);
1713 if (site
->name
|| site
->parent
) {
1714 fprintf(ofp
, "%s[%s +0x%X]\n",
1715 site
->name
, site
->library
, site
->offset
);
1717 site
= site
->parent
;
1722 NS_TraceMallocDumpAllocations(const char *pathname
)
1727 tm_thread
*t
= tm_get_thread();
1729 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1731 ofp
= fopen(pathname
, WRITE_FLAGS
);
1734 PL_HashTableEnumerateEntries(allocations
, allocation_enumerator
,
1737 rv
= ferror(ofp
) ? -1 : 0;
1743 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1749 NS_TraceMallocFlushLogfiles(void)
1752 tm_thread
*t
= tm_get_thread();
1754 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1756 for (fp
= logfile_list
; fp
; fp
= fp
->next
)
1759 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1763 NS_TrackAllocation(void* ptr
, FILE *ofp
)
1766 tm_thread
*t
= tm_get_thread();
1768 fprintf(ofp
, "Trying to track %p\n", (void*) ptr
);
1771 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1772 if (get_allocations()) {
1773 alloc
= (allocation
*)
1774 *PL_HashTableRawLookup(allocations
, hash_pointer(ptr
), ptr
);
1776 fprintf(ofp
, "Tracking %p\n", (void*) ptr
);
1777 alloc
->trackfp
= ofp
;
1779 fprintf(ofp
, "Not tracking %p\n", (void*) ptr
);
1782 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1786 MallocCallback(void *ptr
, size_t size
, PRUint32 start
, PRUint32 end
, tm_thread
*t
)
1792 if (!tracing_enabled
|| t
->suppress_tracing
!= 0)
1795 site
= backtrace(t
, 2);
1797 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1798 tmstats
.malloc_calls
++;
1800 tmstats
.malloc_failures
++;
1803 log_event5(logfp
, TM_EVENT_MALLOC
,
1804 site
->serial
, start
, end
- start
,
1805 (uint32
)NS_PTR_TO_INT32(ptr
), size
);
1807 if (get_allocations()) {
1808 he
= PL_HashTableAdd(allocations
, ptr
, site
);
1810 alloc
= (allocation
*) he
;
1812 alloc
->trackfp
= NULL
;
1816 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1820 CallocCallback(void *ptr
, size_t count
, size_t size
, PRUint32 start
, PRUint32 end
, tm_thread
*t
)
1826 if (!tracing_enabled
|| t
->suppress_tracing
!= 0)
1829 site
= backtrace(t
, 2);
1831 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1832 tmstats
.calloc_calls
++;
1834 tmstats
.calloc_failures
++;
1838 log_event5(logfp
, TM_EVENT_CALLOC
,
1839 site
->serial
, start
, end
- start
,
1840 (uint32
)NS_PTR_TO_INT32(ptr
), size
);
1842 if (get_allocations()) {
1843 he
= PL_HashTableAdd(allocations
, ptr
, site
);
1845 alloc
= (allocation
*) he
;
1847 alloc
->trackfp
= NULL
;
1851 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1855 ReallocCallback(void * oldptr
, void *ptr
, size_t size
,
1856 PRUint32 start
, PRUint32 end
, tm_thread
*t
)
1858 callsite
*oldsite
, *site
;
1861 PLHashEntry
**hep
, *he
;
1863 FILE *trackfp
= NULL
;
1865 if (!tracing_enabled
|| t
->suppress_tracing
!= 0)
1868 site
= backtrace(t
, 2);
1870 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1871 tmstats
.realloc_calls
++;
1876 if (oldptr
&& get_allocations()) {
1877 hash
= hash_pointer(oldptr
);
1878 hep
= PL_HashTableRawLookup(allocations
, hash
, oldptr
);
1881 oldsite
= (callsite
*) he
->value
;
1882 alloc
= (allocation
*) he
;
1883 oldsize
= alloc
->size
;
1884 trackfp
= alloc
->trackfp
;
1886 fprintf(alloc
->trackfp
,
1887 "\nrealloc(%p, %lu), oldsize %lu, alloc site %p\n",
1888 (void*) ptr
, (unsigned long) size
,
1889 (unsigned long) oldsize
, (void*) oldsite
);
1890 NS_TraceStack(1, trackfp
);
1896 * When realloc() fails, the original block is not freed or moved, so
1897 * we'll leave the allocation entry untouched.
1899 tmstats
.realloc_failures
++;
1902 log_event8(logfp
, TM_EVENT_REALLOC
,
1903 site
->serial
, start
, end
- start
,
1904 (uint32
)NS_PTR_TO_INT32(ptr
), size
,
1905 oldsite
? oldsite
->serial
: 0,
1906 (uint32
)NS_PTR_TO_INT32(oldptr
), oldsize
);
1908 if (ptr
&& allocations
) {
1909 if (ptr
!= oldptr
) {
1911 * If we're reallocating (not allocating new space by passing
1912 * null to realloc) and realloc moved the block, free oldptr.
1915 PL_HashTableRawRemove(allocations
, hep
, he
);
1917 /* Record the new allocation now, setting he. */
1918 he
= PL_HashTableAdd(allocations
, ptr
, site
);
1921 * If we haven't yet recorded an allocation (possibly due to a
1922 * temporary memory shortage), do it now.
1925 he
= PL_HashTableAdd(allocations
, ptr
, site
);
1928 alloc
= (allocation
*) he
;
1930 alloc
->trackfp
= trackfp
;
1934 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1938 FreeCallback(void * ptr
, PRUint32 start
, PRUint32 end
, tm_thread
*t
)
1940 PLHashEntry
**hep
, *he
;
1944 if (!tracing_enabled
|| t
->suppress_tracing
!= 0)
1947 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t
);
1948 tmstats
.free_calls
++;
1950 tmstats
.null_free_calls
++;
1952 if (get_allocations()) {
1953 hep
= PL_HashTableRawLookup(allocations
, hash_pointer(ptr
), ptr
);
1956 site
= (callsite
*) he
->value
;
1958 alloc
= (allocation
*) he
;
1959 if (alloc
->trackfp
) {
1960 fprintf(alloc
->trackfp
, "\nfree(%p), alloc site %p\n",
1961 (void*) ptr
, (void*) site
);
1962 NS_TraceStack(1, alloc
->trackfp
);
1964 log_event5(logfp
, TM_EVENT_FREE
,
1965 site
->serial
, start
, end
- start
,
1966 (uint32
)NS_PTR_TO_INT32(ptr
), alloc
->size
);
1968 PL_HashTableRawRemove(allocations
, hep
, he
);
1972 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t
);
1975 PR_IMPLEMENT(nsTMStackTraceID
)
1976 NS_TraceMallocGetStackTrace(void)
1979 tm_thread
*t
= tm_get_thread();
1981 site
= backtrace(t
, 2);
1982 return (nsTMStackTraceID
) site
;
1986 NS_TraceMallocPrintStackTrace(FILE *ofp
, nsTMStackTraceID id
)
1988 print_stack(ofp
, (callsite
*)id
);
1991 #endif /* NS_TRACE_MALLOC */