1 // Copyright (c) 2005, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Maxim Lifantsev
34 // ./heap-checker_unittest
36 // If the unittest crashes because it can't find pprof, try:
37 // PPROF_PATH=/usr/local/someplace/bin/pprof ./heap-checker_unittest
39 // To test that the whole-program heap checker will actually cause a leak, try:
40 // HEAPCHECK_TEST_LEAK= ./heap-checker_unittest
41 // HEAPCHECK_TEST_LOOP_LEAK= ./heap-checker_unittest
43 // Note: Both of the above commands *should* abort with an error message.
45 // CAVEAT: Do not use vector<> and string on-heap objects in this test,
46 // otherwise the test can sometimes fail for tricky leak checks
47 // when we want some allocated object not to be found live by the heap checker.
48 // This can happen with memory allocators like tcmalloc that can allocate
49 // heap objects back to back without any book-keeping data in between.
50 // What happens is that end-of-storage pointers of a live vector
51 // (or a string depending on the STL implementation used)
52 // can happen to point to that other heap-allocated
53 // object that is not reachable otherwise and that
54 // we don't want to be reachable.
56 // The implication of this for real leak checking
57 // is just one more chance for the liveness flood to be inexact
58 // (see the comment in our .h file).
60 #include "config_for_unittests.h"
64 #if defined HAVE_STDINT_H
65 #include <stdint.h> // to get uint16_t (ISO naming madness)
66 #elif defined HAVE_INTTYPES_H
67 #include <inttypes.h> // another place uint16_t might be defined
69 #include <sys/types.h>
71 #include <errno.h> // errno
73 #include <unistd.h> // for sleep(), geteuid()
78 #include <fcntl.h> // for open(), close()
79 #ifdef HAVE_EXECINFO_H
80 #include <execinfo.h> // backtrace
83 #include <grp.h> // getgrent, getgrnam
90 #include <iostream> // for cout
91 #include <iomanip> // for hex
99 #include "base/commandlineflags.h"
100 #include "base/googleinit.h"
101 #include "base/logging.h"
102 #include "base/commandlineflags.h"
103 #include "base/thread_lister.h"
104 #include <gperftools/heap-checker.h>
105 #include "memory_region_map.h"
106 #include <gperftools/malloc_extension.h>
107 #include <gperftools/stacktrace.h>
109 // On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
110 // form of the name instead.
111 #ifndef MAP_ANONYMOUS
112 # define MAP_ANONYMOUS MAP_ANON
117 // ========================================================================= //
119 // TODO(maxim): write a shell script to test that these indeed crash us
120 // (i.e. we do detect leaks)
121 // Maybe add more such crash tests.
123 DEFINE_bool(test_leak
,
124 EnvToBool("HEAP_CHECKER_TEST_TEST_LEAK", false),
125 "If should cause a leak crash");
126 DEFINE_bool(test_loop_leak
,
127 EnvToBool("HEAP_CHECKER_TEST_TEST_LOOP_LEAK", false),
128 "If should cause a looped leak crash");
129 DEFINE_bool(test_register_leak
,
130 EnvToBool("HEAP_CHECKER_TEST_TEST_REGISTER_LEAK", false),
131 "If should cause a leak crash by hiding a pointer "
132 "that is only in a register");
133 DEFINE_bool(test_cancel_global_check
,
134 EnvToBool("HEAP_CHECKER_TEST_TEST_CANCEL_GLOBAL_CHECK", false),
135 "If should test HeapLeakChecker::CancelGlobalCheck "
136 "when --test_leak or --test_loop_leak are given; "
137 "the test should not fail then");
138 DEFINE_bool(maybe_stripped
,
139 EnvToBool("HEAP_CHECKER_TEST_MAYBE_STRIPPED", true),
140 "If we think we can be a stripped binary");
141 DEFINE_bool(interfering_threads
,
142 EnvToBool("HEAP_CHECKER_TEST_INTERFERING_THREADS", true),
143 "If we should use threads trying "
144 "to interfere with leak checking");
145 DEFINE_bool(hoarding_threads
,
146 EnvToBool("HEAP_CHECKER_TEST_HOARDING_THREADS", true),
147 "If threads (usually the manager thread) are known "
148 "to retain some old state in their global buffers, "
149 "so that it's hard to force leaks when threads are around");
150 // TODO(maxim): Chage the default to false
151 // when the standard environment used NTPL threads:
152 // they do not seem to have this problem.
153 DEFINE_bool(no_threads
,
154 EnvToBool("HEAP_CHECKER_TEST_NO_THREADS", false),
155 "If we should not use any threads");
156 // This is used so we can make can_create_leaks_reliably true
157 // for any pthread implementation and test with that.
159 DECLARE_int64(heap_check_max_pointer_offset
); // heap-checker.cc
160 DECLARE_string(heap_check
); // in heap-checker.cc
162 #define WARN_IF(cond, msg) LOG_IF(WARNING, cond, msg)
164 // This is an evil macro! Be very careful using it...
165 #undef VLOG // and we start by evilling overriding logging.h VLOG
166 #define VLOG(lvl) if (FLAGS_verbose >= (lvl)) cout << "\n"
167 // This is, likewise, evil
168 #define LOGF VLOG(INFO)
170 static void RunHeapBusyThreads(); // below
175 virtual ~Closure() { }
176 virtual void Run() = 0;
179 class Callback0
: public Closure
{
181 typedef void (*FunctionSignature
)();
183 inline Callback0(FunctionSignature f
) : f_(f
) {}
184 virtual void Run() { (*f_
)(); delete this; }
187 FunctionSignature f_
;
190 template <class P1
> class Callback1
: public Closure
{
192 typedef void (*FunctionSignature
)(P1
);
194 inline Callback1
<P1
>(FunctionSignature f
, P1 p1
) : f_(f
), p1_(p1
) {}
195 virtual void Run() { (*f_
)(p1_
); delete this; }
198 FunctionSignature f_
;
202 template <class P1
, class P2
> class Callback2
: public Closure
{
204 typedef void (*FunctionSignature
)(P1
,P2
);
206 inline Callback2
<P1
,P2
>(FunctionSignature f
, P1 p1
, P2 p2
) : f_(f
), p1_(p1
), p2_(p2
) {}
207 virtual void Run() { (*f_
)(p1_
, p2_
); delete this; }
210 FunctionSignature f_
;
215 inline Callback0
* NewCallback(void (*function
)()) {
216 return new Callback0(function
);
220 inline Callback1
<P1
>* NewCallback(void (*function
)(P1
), P1 p1
) {
221 return new Callback1
<P1
>(function
, p1
);
224 template <class P1
, class P2
>
225 inline Callback2
<P1
,P2
>* NewCallback(void (*function
)(P1
,P2
), P1 p1
, P2 p2
) {
226 return new Callback2
<P1
,P2
>(function
, p1
, p2
);
230 // Set to true at end of main, so threads know. Not entirely thread-safe!,
231 // but probably good enough.
232 static bool g_have_exited_main
= false;
234 // If we can reliably create leaks (i.e. make leaked object
235 // really unreachable from any global data).
236 static bool can_create_leaks_reliably
= false;
238 // We use a simple allocation wrapper
239 // to make sure we wipe out the newly allocated objects
240 // in case they still happened to contain some pointer data
241 // accidentally left by the memory allocator.
242 struct Initialized
{ };
243 static Initialized initialized
;
244 void* operator new(size_t size
, const Initialized
&) {
245 // Below we use "p = new(initialized) Foo[1];" and "delete[] p;"
246 // instead of "p = new(initialized) Foo;"
247 // when we need to delete an allocated object.
248 void* p
= malloc(size
);
252 void* operator new[](size_t size
, const Initialized
&) {
253 char* p
= new char[size
];
258 static void DoWipeStack(int n
); // defined below
259 static void WipeStack() { DoWipeStack(20); }
261 static void Pause() {
262 poll(NULL
, 0, 77); // time for thread activity in HeapBusyThreadBody
264 // Indirectly test malloc_extension.*:
265 CHECK(MallocExtension::instance()->VerifyAllMemory());
268 int histogram
[kMallocHistogramSize
];
269 if (MallocExtension::instance()
270 ->MallocMemoryStats(&blocks
, &total
, histogram
) && total
!= 0) {
271 VLOG(3) << "Malloc stats: " << blocks
<< " blocks of "
272 << total
<< " bytes";
273 for (int i
= 0; i
< kMallocHistogramSize
; ++i
) {
275 VLOG(3) << " Malloc histogram at " << i
<< " : " << histogram
[i
];
279 WipeStack(); // e.g. MallocExtension::VerifyAllMemory
280 // can leave pointers to heap objects on stack
283 // Make gcc think a pointer is "used"
285 static void Use(T
** foo
) {
286 VLOG(2) << "Dummy-using " << static_cast<void*>(*foo
) << " at " << foo
;
289 // Arbitrary value, but not such that xor'ing with it is likely
290 // to map one valid pointer to another valid pointer:
291 static const uintptr_t kHideMask
=
292 static_cast<uintptr_t>(0xF03A5F7BF03A5F7BLL
);
294 // Helpers to hide a pointer from live data traversal.
295 // We just xor the pointer so that (with high probability)
296 // it's not a valid address of a heap object anymore.
297 // Both Hide and UnHide must be executed within RunHidden() below
298 // to prevent leaving stale data on active stack that can be a pointer
299 // to a heap object that is not actually reachable via live variables.
300 // (UnHide might leave heap pointer value for an object
301 // that will be deallocated but later another object
302 // can be allocated at the same heap address.)
304 static void Hide(T
** ptr
) {
305 // we cast values, not dereferenced pointers, so no aliasing issues:
306 *ptr
= reinterpret_cast<T
*>(reinterpret_cast<uintptr_t>(*ptr
) ^ kHideMask
);
307 VLOG(2) << "hid: " << static_cast<void*>(*ptr
);
311 static void UnHide(T
** ptr
) {
312 VLOG(2) << "unhiding: " << static_cast<void*>(*ptr
);
313 // we cast values, not dereferenced pointers, so no aliasing issues:
314 *ptr
= reinterpret_cast<T
*>(reinterpret_cast<uintptr_t>(*ptr
) ^ kHideMask
);
317 static void LogHidden(const char* message
, const void* ptr
) {
318 LOGF
<< message
<< " : "
319 << ptr
<< " ^ " << reinterpret_cast<void*>(kHideMask
) << endl
;
322 // volatile to fool the compiler against inlining the calls to these
323 void (*volatile run_hidden_ptr
)(Closure
* c
, int n
);
324 void (*volatile wipe_stack_ptr
)(int n
);
326 static void DoRunHidden(Closure
* c
, int n
) {
328 VLOG(10) << "Level " << n
<< " at " << &n
;
329 (*run_hidden_ptr
)(c
, n
-1);
330 (*wipe_stack_ptr
)(n
);
331 sleep(0); // undo -foptimize-sibling-calls
337 /*static*/ void DoWipeStack(int n
) {
338 VLOG(10) << "Wipe level " << n
<< " at " << &n
;
341 volatile int arr
[sz
];
342 for (int i
= 0; i
< sz
; ++i
) arr
[i
] = 0;
343 (*wipe_stack_ptr
)(n
-1);
344 sleep(0); // undo -foptimize-sibling-calls
348 // This executes closure c several stack frames down from the current one
349 // and then makes an effort to also wipe out the stack data that was used by
351 // This way we prevent leak checker from finding any temporary pointers
352 // of the closure execution on the stack and deciding that
353 // these pointers (and the pointed objects) are still live.
354 static void RunHidden(Closure
* c
) {
359 static void DoAllocHidden(size_t size
, void** ptr
) {
360 void* p
= new(initialized
) char[size
];
362 Use(&p
); // use only hidden versions
363 VLOG(2) << "Allocated hidden " << p
<< " at " << &p
;
364 *ptr
= p
; // assign the hidden versions
367 static void* AllocHidden(size_t size
) {
369 RunHidden(NewCallback(DoAllocHidden
, size
, &r
));
373 static void DoDeAllocHidden(void** ptr
) {
374 Use(ptr
); // use only hidden versions
376 VLOG(2) << "Deallocating hidden " << p
;
378 delete [] reinterpret_cast<char*>(p
);
381 static void DeAllocHidden(void** ptr
) {
382 RunHidden(NewCallback(DoDeAllocHidden
, ptr
));
387 void PreventHeapReclaiming(size_t size
) {
390 static void** no_reclaim_list
= NULL
;
391 CHECK(size
>= sizeof(void*));
392 // We can't use malloc_reclaim_memory flag in opt mode as debugallocation.cc
393 // is not used. Instead we allocate a bunch of heap objects that are
394 // of the same size as what we are going to leak to ensure that the object
395 // we are about to leak is not at the same address as some old allocated
396 // and freed object that might still have pointers leading to it.
397 for (int i
= 0; i
< 100; ++i
) {
398 void** p
= reinterpret_cast<void**>(new(initialized
) char[size
]);
399 p
[0] = no_reclaim_list
;
406 static bool RunSilent(HeapLeakChecker
* check
,
407 bool (HeapLeakChecker::* func
)()) {
408 // By default, don't print the 'we detected a leak' message in the
409 // cases we're expecting a leak (we still print when --v is >= 1).
410 // This way, the logging output is less confusing: we only print
411 // "we detected a leak", and how to diagnose it, for *unexpected* leaks.
412 int32 old_FLAGS_verbose
= FLAGS_verbose
;
413 if (!VLOG_IS_ON(1)) // not on a verbose setting
414 FLAGS_verbose
= FATAL
; // only log fatal errors
415 const bool retval
= (check
->*func
)();
416 FLAGS_verbose
= old_FLAGS_verbose
;
420 #define RUN_SILENT(check, func) RunSilent(&(check), &HeapLeakChecker::func)
422 enum CheckType
{ SAME_HEAP
, NO_LEAKS
};
424 static void VerifyLeaks(HeapLeakChecker
* check
, CheckType type
,
425 int leaked_bytes
, int leaked_objects
) {
426 WipeStack(); // to help with can_create_leaks_reliably
427 const bool no_leaks
=
428 type
== NO_LEAKS
? RUN_SILENT(*check
, BriefNoLeaks
)
429 : RUN_SILENT(*check
, BriefSameHeap
);
430 if (can_create_leaks_reliably
) {
431 // these might still fail occasionally, but it should be very rare
432 CHECK_EQ(no_leaks
, false);
433 CHECK_EQ(check
->BytesLeaked(), leaked_bytes
);
434 CHECK_EQ(check
->ObjectsLeaked(), leaked_objects
);
436 WARN_IF(no_leaks
!= false,
437 "Expected leaks not found: "
438 "Some liveness flood must be too optimistic");
443 static void TestHeapLeakCheckerDeathSimple() {
444 HeapLeakChecker
check("death_simple");
445 void* foo
= AllocHidden(100 * sizeof(int));
447 void* bar
= AllocHidden(300);
449 LogHidden("Leaking", foo
);
450 LogHidden("Leaking", bar
);
452 VerifyLeaks(&check
, NO_LEAKS
, 300 + 100 * sizeof(int), 2);
457 static void MakeDeathLoop(void** arr1
, void** arr2
) {
458 PreventHeapReclaiming(2 * sizeof(void*));
459 void** a1
= new(initialized
) void*[2];
460 void** a2
= new(initialized
) void*[2];
461 a1
[1] = reinterpret_cast<void*>(a2
);
462 a2
[1] = reinterpret_cast<void*>(a1
);
467 VLOG(2) << "Made hidden loop at " << &a1
<< " to " << arr1
;
472 // not deallocates two objects linked together
473 static void TestHeapLeakCheckerDeathLoop() {
474 HeapLeakChecker
check("death_loop");
477 RunHidden(NewCallback(MakeDeathLoop
, &arr1
, &arr2
));
480 LogHidden("Leaking", arr1
);
481 LogHidden("Leaking", arr2
);
483 VerifyLeaks(&check
, NO_LEAKS
, 4 * sizeof(void*), 2);
484 DeAllocHidden(&arr1
);
485 DeAllocHidden(&arr2
);
488 // deallocates more than allocates
489 static void TestHeapLeakCheckerDeathInverse() {
490 void* bar
= AllocHidden(250 * sizeof(int));
492 LogHidden("Pre leaking", bar
);
494 HeapLeakChecker
check("death_inverse");
495 void* foo
= AllocHidden(100 * sizeof(int));
497 LogHidden("Leaking", foo
);
500 VerifyLeaks(&check
, SAME_HEAP
,
501 100 * static_cast<int64
>(sizeof(int)),
506 // deallocates more than allocates
507 static void TestHeapLeakCheckerDeathNoLeaks() {
508 void* foo
= AllocHidden(100 * sizeof(int));
510 void* bar
= AllocHidden(250 * sizeof(int));
512 HeapLeakChecker
check("death_noleaks");
514 CHECK_EQ(check
.BriefNoLeaks(), true);
519 static void TestHeapLeakCheckerDeathCountLess() {
520 void* bar1
= AllocHidden(50 * sizeof(int));
522 void* bar2
= AllocHidden(50 * sizeof(int));
524 LogHidden("Pre leaking", bar1
);
525 LogHidden("Pre leaking", bar2
);
527 HeapLeakChecker
check("death_count_less");
528 void* foo
= AllocHidden(100 * sizeof(int));
530 LogHidden("Leaking", foo
);
531 DeAllocHidden(&bar1
);
532 DeAllocHidden(&bar2
);
534 VerifyLeaks(&check
, SAME_HEAP
,
541 static void TestHeapLeakCheckerDeathCountMore() {
542 void* foo
= AllocHidden(100 * sizeof(int));
544 LogHidden("Pre leaking", foo
);
546 HeapLeakChecker
check("death_count_more");
547 void* bar1
= AllocHidden(50 * sizeof(int));
549 void* bar2
= AllocHidden(50 * sizeof(int));
551 LogHidden("Leaking", bar1
);
552 LogHidden("Leaking", bar2
);
555 VerifyLeaks(&check
, SAME_HEAP
,
558 DeAllocHidden(&bar1
);
559 DeAllocHidden(&bar2
);
562 static void TestHiddenPointer() {
565 HiddenPointer
<void> p(foo
);
566 CHECK_EQ(foo
, p
.get());
568 // Confirm pointer doesn't appear to contain a byte sequence
569 // that == the pointer. We don't really need to test that
570 // the xor trick itself works, as without it nothing in this
571 // test suite would work. See the Hide/Unhide/*Hidden* set
572 // of helper methods.
573 CHECK_NE(foo
, *reinterpret_cast<void**>(&p
));
576 // simple tests that deallocate what they allocated
577 static void TestHeapLeakChecker() {
578 { HeapLeakChecker
check("trivial");
583 CHECK(check
.BriefSameHeap());
586 { HeapLeakChecker
check("simple");
587 void* foo
= AllocHidden(100 * sizeof(int));
589 void* bar
= AllocHidden(200 * sizeof(int));
594 CHECK(check
.BriefSameHeap());
598 // no false positives
599 static void TestHeapLeakCheckerNoFalsePositives() {
600 { HeapLeakChecker
check("trivial_p");
605 CHECK(check
.BriefSameHeap());
608 { HeapLeakChecker
check("simple_p");
609 void* foo
= AllocHidden(100 * sizeof(int));
611 void* bar
= AllocHidden(200 * sizeof(int));
616 CHECK(check
.SameHeap());
620 // test that we detect leaks when we have same total # of bytes and
621 // objects, but different individual object sizes
622 static void TestLeakButTotalsMatch() {
623 void* bar1
= AllocHidden(240 * sizeof(int));
625 void* bar2
= AllocHidden(160 * sizeof(int));
627 LogHidden("Pre leaking", bar1
);
628 LogHidden("Pre leaking", bar2
);
630 HeapLeakChecker
check("trick");
631 void* foo1
= AllocHidden(280 * sizeof(int));
633 void* foo2
= AllocHidden(120 * sizeof(int));
635 LogHidden("Leaking", foo1
);
636 LogHidden("Leaking", foo2
);
637 DeAllocHidden(&bar1
);
638 DeAllocHidden(&bar2
);
641 // foo1 and foo2 leaked
642 VerifyLeaks(&check
, NO_LEAKS
, (280+120)*sizeof(int), 2);
644 DeAllocHidden(&foo1
);
645 DeAllocHidden(&foo2
);
648 // no false negatives from pprof
649 static void TestHeapLeakCheckerDeathTrick() {
650 void* bar1
= AllocHidden(240 * sizeof(int));
652 void* bar2
= AllocHidden(160 * sizeof(int));
654 HeapLeakChecker
check("death_trick");
655 DeAllocHidden(&bar1
);
656 DeAllocHidden(&bar2
);
657 void* foo1
= AllocHidden(280 * sizeof(int));
659 void* foo2
= AllocHidden(120 * sizeof(int));
661 // TODO(maxim): use the above if we make pprof work in automated test runs
662 if (!FLAGS_maybe_stripped
) {
663 CHECK_EQ(RUN_SILENT(check
, SameHeap
), false);
664 // pprof checking should catch the leak
666 WARN_IF(RUN_SILENT(check
, SameHeap
) != false,
667 "death_trick leak is not caught; "
668 "we must be using a stripped binary");
670 DeAllocHidden(&foo1
);
671 DeAllocHidden(&foo2
);
675 static void TransLeaks() {
676 AllocHidden(1 * sizeof(char));
679 // range-based disabling using Disabler
680 static void ScopedDisabledLeaks() {
681 HeapLeakChecker::Disabler disabler
;
682 AllocHidden(3 * sizeof(int));
684 (void)malloc(10); // Direct leak
687 // have different disabled leaks
688 static void* RunDisabledLeaks(void* a
) {
689 ScopedDisabledLeaks();
693 // have different disabled leaks inside of a thread
694 static void ThreadDisabledLeaks() {
695 if (FLAGS_no_threads
) return;
698 CHECK_EQ(pthread_attr_init(&attr
), 0);
699 CHECK_EQ(pthread_create(&tid
, &attr
, RunDisabledLeaks
, NULL
), 0);
701 CHECK_EQ(pthread_join(tid
, &res
), 0);
704 // different disabled leaks (some in threads)
705 static void TestHeapLeakCheckerDisabling() {
706 HeapLeakChecker
check("disabling");
708 RunDisabledLeaks(NULL
);
709 RunDisabledLeaks(NULL
);
710 ThreadDisabledLeaks();
711 RunDisabledLeaks(NULL
);
712 ThreadDisabledLeaks();
713 ThreadDisabledLeaks();
717 CHECK(check
.SameHeap());
720 typedef set
<int> IntSet
;
722 static int some_ints
[] = { 1, 2, 3, 21, 22, 23, 24, 25 };
724 static void DoTestSTLAlloc() {
725 IntSet
* x
= new(initialized
) IntSet
[1];
726 *x
= IntSet(some_ints
, some_ints
+ 6);
727 for (int i
= 0; i
< 1000; i
++) {
733 // Check that normal STL usage does not result in a leak report.
734 // (In particular we test that there's no complex STL's own allocator
735 // running on top of our allocator with hooks to heap profiler
736 // that can result in false leak report in this case.)
737 static void TestSTLAlloc() {
738 HeapLeakChecker
check("stl");
739 RunHidden(NewCallback(DoTestSTLAlloc
));
740 CHECK_EQ(check
.BriefSameHeap(), true);
743 static void DoTestSTLAllocInverse(IntSet
** setx
) {
744 IntSet
* x
= new(initialized
) IntSet
[1];
745 *x
= IntSet(some_ints
, some_ints
+ 3);
746 for (int i
= 0; i
< 100; i
++) {
753 static void FreeTestSTLAllocInverse(IntSet
** setx
) {
759 // Check that normal leaked STL usage *does* result in a leak report.
760 // (In particular we test that there's no complex STL's own allocator
761 // running on top of our allocator with hooks to heap profiler
762 // that can result in false absence of leak report in this case.)
763 static void TestSTLAllocInverse() {
764 HeapLeakChecker
check("death_inverse_stl");
766 RunHidden(NewCallback(DoTestSTLAllocInverse
, &x
));
767 LogHidden("Leaking", x
);
768 if (can_create_leaks_reliably
) {
769 WipeStack(); // to help with can_create_leaks_reliably
770 // these might still fail occasionally, but it should be very rare
771 CHECK_EQ(RUN_SILENT(check
, BriefNoLeaks
), false);
772 CHECK_GE(check
.BytesLeaked(), 100 * sizeof(int));
773 CHECK_GE(check
.ObjectsLeaked(), 100);
774 // assumes set<>s are represented by some kind of binary tree
775 // or something else allocating >=1 heap object per set object
777 WARN_IF(RUN_SILENT(check
, BriefNoLeaks
) != false,
778 "Expected leaks not found: "
779 "Some liveness flood must be too optimistic");
781 RunHidden(NewCallback(FreeTestSTLAllocInverse
, &x
));
784 template<class Alloc
>
785 static void DirectTestSTLAlloc(Alloc allocator
, const char* name
) {
786 HeapLeakChecker
check((string("direct_stl-") + name
).c_str());
787 static const int kSize
= 1000;
788 typename
Alloc::pointer ptrs
[kSize
];
789 for (int i
= 0; i
< kSize
; ++i
) {
790 typename
Alloc::pointer p
= allocator
.allocate(i
*3+1);
791 HeapLeakChecker::IgnoreObject(p
);
792 // This will crash if p is not known to heap profiler:
793 // (i.e. STL's "allocator" does not have a direct hook to heap profiler)
794 HeapLeakChecker::UnIgnoreObject(p
);
797 for (int i
= 0; i
< kSize
; ++i
) {
798 allocator
.deallocate(ptrs
[i
], i
*3+1);
801 CHECK(check
.BriefSameHeap()); // just in case
804 static struct group
* grp
= NULL
;
805 static const int kKeys
= 50;
806 static pthread_key_t key
[kKeys
];
808 static void KeyFree(void* ptr
) {
809 delete [] reinterpret_cast<char*>(ptr
);
812 static bool key_init_has_run
= false;
814 static void KeyInit() {
815 for (int i
= 0; i
< kKeys
; ++i
) {
816 CHECK_EQ(pthread_key_create(&key
[i
], KeyFree
), 0);
817 VLOG(2) << "pthread key " << i
<< " : " << key
[i
];
819 key_init_has_run
= true; // needed for a sanity-check
822 // force various C library static and thread-specific allocations
823 static void TestLibCAllocate() {
824 CHECK(key_init_has_run
);
825 for (int i
= 0; i
< kKeys
; ++i
) {
826 void* p
= pthread_getspecific(key
[i
]);
829 // Test-logging inside threads which (potentially) creates and uses
830 // thread-local data inside standard C++ library:
831 VLOG(0) << "Adding pthread-specifics for thread " << pthread_self()
832 << " pid " << getpid();
834 p
= new(initialized
) char[77 + i
];
835 VLOG(2) << "pthread specific " << i
<< " : " << p
;
836 pthread_setspecific(key
[i
], p
);
841 const time_t now
= time(NULL
);
843 #ifdef HAVE_EXECINFO_H
848 gid_t gid
= getgid();
850 if (grp
== NULL
) grp
= getgrent(); // a race condition here is okay
851 getgrnam(grp
->gr_name
);
858 // Continuous random heap memory activity to try to disrupt heap checking.
859 static void* HeapBusyThreadBody(void* a
) {
860 const int thread_num
= reinterpret_cast<intptr_t>(a
);
861 VLOG(0) << "A new HeapBusyThread " << thread_num
;
865 // Try to hide ptr from heap checker in a CPU register:
866 // Here we are just making a best effort to put the only pointer
867 // to a heap object into a thread register to test
868 // the thread-register finding machinery in the heap checker.
869 #if defined(__i386__) && defined(__GNUC__)
870 register int** ptr
asm("esi");
871 #elif defined(__x86_64__) && defined(__GNUC__)
872 register int** ptr
asm("r15");
877 typedef set
<int> Set
;
880 // TestLibCAllocate() calls libc functions that don't work so well
881 // after main() has exited. So we just don't do the test then.
882 if (!g_have_exited_main
)
886 ptr
= new(initialized
) int*[1];
887 *ptr
= new(initialized
) int[1];
889 set
<int>* s2
= new(initialized
) set
<int>[1];
891 s2
->insert(*s1
.begin());
892 user
+= *s2
->begin();
894 if (random() % 51 == 0) {
896 if (random() % 2 == 0) {
901 VLOG(3) << pthread_self() << " (" << getpid() << "): in wait: "
902 << ptr
<< ", " << *ptr
<< "; " << s1
.size();
903 VLOG(2) << pthread_self() << " (" << getpid() << "): in wait, ptr = "
904 << reinterpret_cast<void*>(
905 reinterpret_cast<uintptr_t>(ptr
) ^ kHideMask
)
906 << "^" << reinterpret_cast<void*>(kHideMask
);
907 if (FLAGS_test_register_leak
&& thread_num
% 5 == 0) {
908 // Hide the register "ptr" value with an xor mask.
909 // If one provides --test_register_leak flag, the test should
910 // (with very high probability) crash on some leak check
911 // with a leak report (of some x * sizeof(int) + y * sizeof(int*) bytes)
912 // pointing at the two lines above in this function
913 // with "new(initialized) int" in them as the allocators
914 // of the leaked objects.
915 // CAVEAT: We can't really prevent a compiler to save some
916 // temporary values of "ptr" on the stack and thus let us find
917 // the heap objects not via the register.
918 // Hence it's normal if for certain compilers or optimization modes
919 // --test_register_leak does not cause a leak crash of the above form
920 // (this happens e.g. for gcc 4.0.1 in opt mode).
921 ptr
= reinterpret_cast<int **>(
922 reinterpret_cast<uintptr_t>(ptr
) ^ kHideMask
);
923 // busy loop to get the thread interrupted at:
924 for (int i
= 1; i
< 10000000; ++i
) user
+= (1 + user
* user
* 5) / i
;
925 ptr
= reinterpret_cast<int **>(
926 reinterpret_cast<uintptr_t>(ptr
) ^ kHideMask
);
928 poll(NULL
, 0, random() % 100);
930 VLOG(2) << pthread_self() << ": continuing";
931 if (random() % 3 == 0) {
941 static void RunHeapBusyThreads() {
943 if (!FLAGS_interfering_threads
|| FLAGS_no_threads
) return;
945 const int n
= 17; // make many threads
949 CHECK_EQ(pthread_attr_init(&attr
), 0);
950 // make them and let them run
951 for (int i
= 0; i
< n
; ++i
) {
952 VLOG(0) << "Creating extra thread " << i
+ 1;
953 CHECK(pthread_create(&tid
, &attr
, HeapBusyThreadBody
,
954 reinterpret_cast<void*>(i
)) == 0);
961 // ========================================================================= //
963 // This code section is to test that objects that are reachable from global
964 // variables are not reported as leaks
965 // as well as that (Un)IgnoreObject work for such objects fine.
967 // An object making functions:
968 // returns a "weird" pointer to a new object for which
969 // it's worth checking that the object is reachable via that pointer.
970 typedef void* (*ObjMakerFunc
)();
971 static list
<ObjMakerFunc
> obj_makers
; // list of registered object makers
973 // Helper macro to register an object making function
974 // 'name' is an identifier of this object maker,
975 // 'body' is its function body that must declare
976 // pointer 'p' to the nex object to return.
978 // REGISTER_OBJ_MAKER(trivial, int* p = new(initialized) int;)
979 #define REGISTER_OBJ_MAKER(name, body) \
980 void* ObjMaker_##name##_() { \
981 VLOG(1) << "Obj making " << #name; \
985 static ObjMakerRegistrar maker_reg_##name##__(&ObjMaker_##name##_);
986 // helper class for REGISTER_OBJ_MAKER
987 struct ObjMakerRegistrar
{
988 ObjMakerRegistrar(ObjMakerFunc obj_maker
) { obj_makers
.push_back(obj_maker
); }
991 // List of the objects/pointers made with all the obj_makers
992 // to test reachability via global data pointers during leak checks.
993 static list
<void*>* live_objects
= new list
<void*>;
994 // pointer so that it does not get destructed on exit
996 // Exerciser for one ObjMakerFunc.
997 static void TestPointerReach(ObjMakerFunc obj_maker
) {
998 HeapLeakChecker::IgnoreObject(obj_maker()); // test IgnoreObject
1000 void* obj
= obj_maker();
1001 HeapLeakChecker::IgnoreObject(obj
);
1002 HeapLeakChecker::UnIgnoreObject(obj
); // test UnIgnoreObject
1003 HeapLeakChecker::IgnoreObject(obj
); // not to need deletion for obj
1005 live_objects
->push_back(obj_maker()); // test reachability at leak check
1008 // Test all ObjMakerFunc registred via REGISTER_OBJ_MAKER.
1009 static void TestObjMakers() {
1010 for (list
<ObjMakerFunc
>::const_iterator i
= obj_makers
.begin();
1011 i
!= obj_makers
.end(); ++i
) {
1012 TestPointerReach(*i
);
1013 TestPointerReach(*i
); // a couple more times would not hurt
1014 TestPointerReach(*i
);
1018 // A dummy class to mimic allocation behavior of string-s.
1022 size
= 3 + random() % 30;
1023 ptr
= new(initialized
) T
[size
];
1025 ~Array() { delete [] ptr
; }
1026 Array(const Array
& x
) {
1028 ptr
= new(initialized
) T
[size
];
1029 for (size_t i
= 0; i
< size
; ++i
) {
1033 void operator=(const Array
& x
) {
1036 ptr
= new(initialized
) T
[size
];
1037 for (size_t i
= 0; i
< size
; ++i
) {
1041 void append(const Array
& x
) {
1042 T
* p
= new(initialized
) T
[size
+ x
.size
];
1043 for (size_t i
= 0; i
< size
; ++i
) {
1046 for (size_t i
= 0; i
< x
.size
; ++i
) {
1047 p
[size
+i
] = x
.ptr
[i
];
1058 // to test pointers to objects, built-in arrays, string, etc:
1059 REGISTER_OBJ_MAKER(plain
, int* p
= new(initialized
) int;)
1060 REGISTER_OBJ_MAKER(int_array_1
, int* p
= new(initialized
) int[1];)
1061 REGISTER_OBJ_MAKER(int_array
, int* p
= new(initialized
) int[10];)
1062 REGISTER_OBJ_MAKER(string
, Array
<char>* p
= new(initialized
) Array
<char>();)
1063 REGISTER_OBJ_MAKER(string_array
,
1064 Array
<char>* p
= new(initialized
) Array
<char>[5];)
1065 REGISTER_OBJ_MAKER(char_array
, char* p
= new(initialized
) char[5];)
1066 REGISTER_OBJ_MAKER(appended_string
,
1067 Array
<char>* p
= new Array
<char>();
1068 p
->append(Array
<char>());
1070 REGISTER_OBJ_MAKER(plain_ptr
, int** p
= new(initialized
) int*;)
1071 REGISTER_OBJ_MAKER(linking_ptr
,
1072 int** p
= new(initialized
) int*;
1073 *p
= new(initialized
) int;
1077 REGISTER_OBJ_MAKER(0_sized
, void* p
= malloc(0);) // 0-sized object (important)
1078 REGISTER_OBJ_MAKER(1_sized
, void* p
= malloc(1);)
1079 REGISTER_OBJ_MAKER(2_sized
, void* p
= malloc(2);)
1080 REGISTER_OBJ_MAKER(3_sized
, void* p
= malloc(3);)
1081 REGISTER_OBJ_MAKER(4_sized
, void* p
= malloc(4);)
1083 static int set_data
[] = { 1, 2, 3, 4, 5, 6, 7, 21, 22, 23, 24, 25, 26, 27 };
1084 static set
<int> live_leak_set(set_data
, set_data
+7);
1085 static const set
<int> live_leak_const_set(set_data
, set_data
+14);
1087 REGISTER_OBJ_MAKER(set
,
1088 set
<int>* p
= new(initialized
) set
<int>(set_data
, set_data
+ 13);
1093 explicit ClassA(int a
) : ptr(NULL
) { }
1096 static const ClassA
live_leak_mutable(1);
1101 explicit TClass(int a
) : ptr(NULL
) { }
1105 static const TClass
<Array
<char> > live_leak_templ_mutable(1);
1111 virtual void f() { }
1112 virtual ~ClassB() { }
1119 virtual void f2() { }
1120 virtual ~ClassB2() { }
1123 class ClassD1
: public ClassB
{
1125 virtual void f() { }
1128 class ClassD2
: public ClassB2
{
1130 virtual void f2() { }
1133 class ClassD
: public ClassD1
, public ClassD2
{
1135 virtual void f() { }
1136 virtual void f2() { }
1139 // to test pointers to objects of base subclasses:
1141 REGISTER_OBJ_MAKER(B
, ClassB
* p
= new(initialized
) ClassB
;)
1142 REGISTER_OBJ_MAKER(D1
, ClassD1
* p
= new(initialized
) ClassD1
;)
1143 REGISTER_OBJ_MAKER(D2
, ClassD2
* p
= new(initialized
) ClassD2
;)
1144 REGISTER_OBJ_MAKER(D
, ClassD
* p
= new(initialized
) ClassD
;)
1146 REGISTER_OBJ_MAKER(D1_as_B
, ClassB
* p
= new(initialized
) ClassD1
;)
1147 REGISTER_OBJ_MAKER(D2_as_B2
, ClassB2
* p
= new(initialized
) ClassD2
;)
1148 REGISTER_OBJ_MAKER(D_as_B
, ClassB
* p
= new(initialized
) ClassD
;)
1149 REGISTER_OBJ_MAKER(D_as_D1
, ClassD1
* p
= new(initialized
) ClassD
;)
1150 // inside-object pointers:
1151 REGISTER_OBJ_MAKER(D_as_B2
, ClassB2
* p
= new(initialized
) ClassD
;)
1152 REGISTER_OBJ_MAKER(D_as_D2
, ClassD2
* p
= new(initialized
) ClassD
;)
1156 virtual void A() = 0;
1157 virtual ~InterfaceA() { }
1164 virtual void B() = 0;
1165 virtual ~InterfaceB() { }
1170 class InterfaceC
: public InterfaceA
{
1172 virtual void C() = 0;
1173 virtual ~InterfaceC() { }
1178 class ClassMltD1
: public ClassB
, public InterfaceB
, public InterfaceC
{
1181 virtual void f() { }
1182 virtual void A() { }
1183 virtual void B() { }
1184 virtual void C() { }
1187 class ClassMltD2
: public InterfaceA
, public InterfaceB
, public ClassB
{
1190 virtual void f() { }
1191 virtual void A() { }
1192 virtual void B() { }
1195 // to specifically test heap reachability under
1196 // inerface-only multiple inheritance (some use inside-object pointers):
1197 REGISTER_OBJ_MAKER(MltD1
, ClassMltD1
* p
= new(initialized
) ClassMltD1
;)
1198 REGISTER_OBJ_MAKER(MltD1_as_B
, ClassB
* p
= new(initialized
) ClassMltD1
;)
1199 REGISTER_OBJ_MAKER(MltD1_as_IA
, InterfaceA
* p
= new(initialized
) ClassMltD1
;)
1200 REGISTER_OBJ_MAKER(MltD1_as_IB
, InterfaceB
* p
= new(initialized
) ClassMltD1
;)
1201 REGISTER_OBJ_MAKER(MltD1_as_IC
, InterfaceC
* p
= new(initialized
) ClassMltD1
;)
1203 REGISTER_OBJ_MAKER(MltD2
, ClassMltD2
* p
= new(initialized
) ClassMltD2
;)
1204 REGISTER_OBJ_MAKER(MltD2_as_B
, ClassB
* p
= new(initialized
) ClassMltD2
;)
1205 REGISTER_OBJ_MAKER(MltD2_as_IA
, InterfaceA
* p
= new(initialized
) ClassMltD2
;)
1206 REGISTER_OBJ_MAKER(MltD2_as_IB
, InterfaceB
* p
= new(initialized
) ClassMltD2
;)
1208 // to mimic UnicodeString defined in third_party/icu,
1209 // which store a platform-independent-sized refcount in the first
1210 // few bytes and keeps a pointer pointing behind the refcount.
1211 REGISTER_OBJ_MAKER(unicode_string
,
1212 char* p
= new char[sizeof(uint32
) * 10];
1213 p
+= sizeof(uint32
);
1215 // similar, but for platform-dependent-sized refcount
1216 REGISTER_OBJ_MAKER(ref_counted
,
1217 char* p
= new char[sizeof(int) * 20];
1224 Inner(Nesting
* p
) : parent(p
) {}
1233 Nesting() : i0(this), i1(this), i2(this), i3(this) {}
1236 // to test inside-object pointers pointing at objects nested into heap objects:
1237 REGISTER_OBJ_MAKER(nesting_i0
, Nesting::Inner
* p
= &((new Nesting())->i0
);)
1238 REGISTER_OBJ_MAKER(nesting_i1
, Nesting::Inner
* p
= &((new Nesting())->i1
);)
1239 REGISTER_OBJ_MAKER(nesting_i2
, Nesting::Inner
* p
= &((new Nesting())->i2
);)
1240 REGISTER_OBJ_MAKER(nesting_i3
, Nesting::Inner
* p
= &((new Nesting())->i3
);)
1242 // allocate many objects reachable from global data
1243 static void TestHeapLeakCheckerLiveness() {
1244 live_leak_mutable
.ptr
= new(initialized
) char[77];
1245 live_leak_templ_mutable
.ptr
= new(initialized
) Array
<char>();
1246 live_leak_templ_mutable
.val
= Array
<char>();
1251 // ========================================================================= //
1253 // Get address (PC value) following the mmap call into addr_after_mmap_call
1254 static void* Mmapper(uintptr_t* addr_after_mmap_call
) {
1255 void* r
= mmap(NULL
, 100, PROT_READ
|PROT_WRITE
,
1256 MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
1257 // Get current PC value into addr_after_mmap_call
1259 CHECK_EQ(GetStackTrace(stack
, 1, 0), 1);
1260 *addr_after_mmap_call
= reinterpret_cast<uintptr_t>(stack
[0]);
1261 sleep(0); // undo -foptimize-sibling-calls
1265 // to trick complier into preventing inlining
1266 static void* (*mmapper_addr
)(uintptr_t* addr
) = &Mmapper
;
1268 // TODO(maxim): copy/move this to memory_region_map_unittest
1269 // TODO(maxim): expand this test to include mmap64, mremap and sbrk calls.
1270 static void VerifyMemoryRegionMapStackGet() {
1271 uintptr_t caller_addr_limit
;
1272 void* addr
= (*mmapper_addr
)(&caller_addr_limit
);
1273 uintptr_t caller
= 0;
1274 { MemoryRegionMap::LockHolder l
;
1275 for (MemoryRegionMap::RegionIterator
1276 i
= MemoryRegionMap::BeginRegionLocked();
1277 i
!= MemoryRegionMap::EndRegionLocked(); ++i
) {
1278 if (i
->start_addr
== reinterpret_cast<uintptr_t>(addr
)) {
1279 CHECK_EQ(caller
, 0);
1280 caller
= i
->caller();
1284 // caller must point into Mmapper function:
1285 if (!(reinterpret_cast<uintptr_t>(mmapper_addr
) <= caller
&&
1286 caller
< caller_addr_limit
)) {
1287 LOGF
<< std::hex
<< "0x" << caller
1288 << " does not seem to point into code of function Mmapper at "
1289 << "0x" << reinterpret_cast<uintptr_t>(mmapper_addr
)
1290 << "! Stack frame collection must be off in MemoryRegionMap!";
1296 static void* Mallocer(uintptr_t* addr_after_malloc_call
) {
1297 void* r
= malloc(100);
1298 sleep(0); // undo -foptimize-sibling-calls
1299 // Get current PC value into addr_after_malloc_call
1301 CHECK_EQ(GetStackTrace(stack
, 1, 0), 1);
1302 *addr_after_malloc_call
= reinterpret_cast<uintptr_t>(stack
[0]);
1306 // to trick complier into preventing inlining
1307 static void* (*mallocer_addr
)(uintptr_t* addr
) = &Mallocer
;
1309 // non-static for friendship with HeapProfiler
1310 // TODO(maxim): expand this test to include
1311 // realloc, calloc, memalign, valloc, pvalloc, new, and new[].
1312 extern void VerifyHeapProfileTableStackGet() {
1313 uintptr_t caller_addr_limit
;
1314 void* addr
= (*mallocer_addr
)(&caller_addr_limit
);
1316 reinterpret_cast<uintptr_t>(HeapLeakChecker::GetAllocCaller(addr
));
1317 // caller must point into Mallocer function:
1318 if (!(reinterpret_cast<uintptr_t>(mallocer_addr
) <= caller
&&
1319 caller
< caller_addr_limit
)) {
1320 LOGF
<< std::hex
<< "0x" << caller
1321 << " does not seem to point into code of function Mallocer at "
1322 << "0x" << reinterpret_cast<uintptr_t>(mallocer_addr
)
1323 << "! Stack frame collection must be off in heap profiler!";
1329 // ========================================================================= //
1331 static void MakeALeak(void** arr
) {
1332 PreventHeapReclaiming(10 * sizeof(int));
1333 void* a
= new(initialized
) int[10];
1338 // Helper to do 'return 0;' inside main(): insted we do 'return Pass();'
1340 fprintf(stdout
, "PASS\n");
1341 g_have_exited_main
= true;
1345 int main(int argc
, char** argv
) {
1346 run_hidden_ptr
= DoRunHidden
;
1347 wipe_stack_ptr
= DoWipeStack
;
1348 if (!HeapLeakChecker::IsActive()) {
1349 CHECK_EQ(FLAGS_heap_check
, "");
1350 LOG(WARNING
, "HeapLeakChecker got turned off; we won't test much...");
1352 VerifyMemoryRegionMapStackGet();
1353 VerifyHeapProfileTableStackGet();
1358 // glibc 2.4, on x86_64 at least, has a lock-ordering bug, which
1359 // means deadlock is possible when one thread calls dl_open at the
1360 // same time another thread is calling dl_iterate_phdr. libunwind
1361 // calls dl_iterate_phdr, and TestLibCAllocate calls dl_open (or the
1362 // various syscalls in it do), at least the first time it's run.
1363 // To avoid the deadlock, we run TestLibCAllocate once before getting
1365 // TODO(csilvers): once libc is fixed, or libunwind can work around it,
1366 // get rid of this early call. We *want* our test to
1367 // find potential problems like this one!
1370 if (FLAGS_interfering_threads
) {
1371 RunHeapBusyThreads(); // add interference early
1375 LOGF
<< "In main(): heap_check=" << FLAGS_heap_check
<< endl
;
1377 CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good
1379 if (FLAGS_test_leak
) {
1381 RunHidden(NewCallback(MakeALeak
, &arr
));
1383 LogHidden("Leaking", arr
);
1384 if (FLAGS_test_cancel_global_check
) {
1385 HeapLeakChecker::CancelGlobalCheck();
1387 // Verify we can call NoGlobalLeaks repeatedly without deadlocking
1388 HeapLeakChecker::NoGlobalLeaks();
1389 HeapLeakChecker::NoGlobalLeaks();
1392 // whole-program leak-check should (with very high probability)
1393 // catch the leak of arr (10 * sizeof(int) bytes)
1394 // (when !FLAGS_test_cancel_global_check)
1397 if (FLAGS_test_loop_leak
) {
1400 RunHidden(NewCallback(MakeDeathLoop
, &arr1
, &arr2
));
1403 LogHidden("Loop leaking", arr1
);
1404 LogHidden("Loop leaking", arr2
);
1405 if (FLAGS_test_cancel_global_check
) {
1406 HeapLeakChecker::CancelGlobalCheck();
1408 // Verify we can call NoGlobalLeaks repeatedly without deadlocking
1409 HeapLeakChecker::NoGlobalLeaks();
1410 HeapLeakChecker::NoGlobalLeaks();
1413 // whole-program leak-check should (with very high probability)
1414 // catch the leak of arr1 and arr2 (4 * sizeof(void*) bytes)
1415 // (when !FLAGS_test_cancel_global_check)
1418 if (FLAGS_test_register_leak
) {
1419 // make us fail only where the .sh test expects:
1421 for (int i
= 0; i
< 100; ++i
) { // give it some time to crash
1422 CHECK(HeapLeakChecker::NoGlobalLeaks());
1428 TestHeapLeakCheckerLiveness();
1430 HeapLeakChecker
heap_check("all");
1432 TestHiddenPointer();
1434 TestHeapLeakChecker();
1436 TestLeakButTotalsMatch();
1439 TestHeapLeakCheckerDeathSimple();
1441 TestHeapLeakCheckerDeathLoop();
1443 TestHeapLeakCheckerDeathInverse();
1445 TestHeapLeakCheckerDeathNoLeaks();
1447 TestHeapLeakCheckerDeathCountLess();
1449 TestHeapLeakCheckerDeathCountMore();
1452 TestHeapLeakCheckerDeathTrick();
1455 CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good
1457 TestHeapLeakCheckerNoFalsePositives();
1460 TestHeapLeakCheckerDisabling();
1465 TestSTLAllocInverse();
1468 // Test that various STL allocators work. Some of these are redundant, but
1469 // we don't know how STL might change in the future. For example,
1470 // http://wiki/Main/StringNeStdString.
1471 #define DTSL(a) { DirectTestSTLAlloc(a, #a); \
1473 DTSL(std::allocator
<char>());
1474 DTSL(std::allocator
<int>());
1475 DTSL(std::string().get_allocator());
1476 DTSL(string().get_allocator());
1477 DTSL(vector
<int>().get_allocator());
1478 DTSL(vector
<double>().get_allocator());
1479 DTSL(vector
<vector
<int> >().get_allocator());
1480 DTSL(vector
<string
>().get_allocator());
1481 DTSL((map
<string
, string
>().get_allocator()));
1482 DTSL((map
<string
, int>().get_allocator()));
1483 DTSL(set
<char>().get_allocator());
1489 CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good
1493 if (!FLAGS_maybe_stripped
) {
1494 CHECK(heap_check
.SameHeap());
1496 WARN_IF(heap_check
.SameHeap() != true,
1497 "overall leaks are caught; we must be using a stripped binary");
1500 CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good