Revert of Roll src/third_party/WebKit e0eac24:489c548 (svn 193311:193320) (patchset...
[chromium-blink-merge.git] / third_party / tcmalloc / chromium / src / tests / heap-checker_unittest.cc
blobab326c98867245dccf20c65d439be28a10656d55
1 // Copyright (c) 2005, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 // ---
31 // Author: Maxim Lifantsev
33 // Running:
34 // ./heap-checker_unittest
36 // If the unittest crashes because it can't find pprof, try:
37 // PPROF_PATH=/usr/local/someplace/bin/pprof ./heap-checker_unittest
39 // To test that the whole-program heap checker will actually cause a leak, try:
40 // HEAPCHECK_TEST_LEAK= ./heap-checker_unittest
41 // HEAPCHECK_TEST_LOOP_LEAK= ./heap-checker_unittest
43 // Note: Both of the above commands *should* abort with an error message.
45 // CAVEAT: Do not use vector<> and string on-heap objects in this test,
46 // otherwise the test can sometimes fail for tricky leak checks
47 // when we want some allocated object not to be found live by the heap checker.
48 // This can happen with memory allocators like tcmalloc that can allocate
49 // heap objects back to back without any book-keeping data in between.
50 // What happens is that end-of-storage pointers of a live vector
51 // (or a string depending on the STL implementation used)
52 // can happen to point to that other heap-allocated
53 // object that is not reachable otherwise and that
54 // we don't want to be reachable.
56 // The implication of this for real leak checking
57 // is just one more chance for the liveness flood to be inexact
58 // (see the comment in our .h file).
60 #include "config_for_unittests.h"
61 #ifdef HAVE_POLL_H
62 #include <poll.h>
63 #endif
64 #if defined HAVE_STDINT_H
65 #include <stdint.h> // to get uint16_t (ISO naming madness)
66 #elif defined HAVE_INTTYPES_H
67 #include <inttypes.h> // another place uint16_t might be defined
68 #endif
69 #include <sys/types.h>
70 #include <stdlib.h>
71 #include <errno.h> // errno
72 #ifdef HAVE_UNISTD_H
73 #include <unistd.h> // for sleep(), geteuid()
74 #endif
75 #ifdef HAVE_MMAP
76 #include <sys/mman.h>
77 #endif
78 #include <fcntl.h> // for open(), close()
79 #ifdef HAVE_EXECINFO_H
80 #include <execinfo.h> // backtrace
81 #endif
82 #ifdef HAVE_GRP_H
83 #include <grp.h> // getgrent, getgrnam
84 #endif
85 #ifdef HAVE_PWD_H
86 #include <pwd.h>
87 #endif
89 #include <algorithm>
90 #include <iostream> // for cout
91 #include <iomanip> // for hex
92 #include <list>
93 #include <map>
94 #include <memory>
95 #include <set>
96 #include <string>
97 #include <vector>
99 #include "base/commandlineflags.h"
100 #include "base/googleinit.h"
101 #include "base/logging.h"
102 #include "base/commandlineflags.h"
103 #include "base/thread_lister.h"
104 #include <gperftools/heap-checker.h>
105 #include "memory_region_map.h"
106 #include <gperftools/malloc_extension.h>
107 #include <gperftools/stacktrace.h>
109 // On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
110 // form of the name instead.
111 #ifndef MAP_ANONYMOUS
112 # define MAP_ANONYMOUS MAP_ANON
113 #endif
115 using namespace std;
117 // ========================================================================= //
119 // TODO(maxim): write a shell script to test that these indeed crash us
120 // (i.e. we do detect leaks)
121 // Maybe add more such crash tests.
123 DEFINE_bool(test_leak,
124 EnvToBool("HEAP_CHECKER_TEST_TEST_LEAK", false),
125 "If should cause a leak crash");
126 DEFINE_bool(test_loop_leak,
127 EnvToBool("HEAP_CHECKER_TEST_TEST_LOOP_LEAK", false),
128 "If should cause a looped leak crash");
129 DEFINE_bool(test_register_leak,
130 EnvToBool("HEAP_CHECKER_TEST_TEST_REGISTER_LEAK", false),
131 "If should cause a leak crash by hiding a pointer "
132 "that is only in a register");
133 DEFINE_bool(test_cancel_global_check,
134 EnvToBool("HEAP_CHECKER_TEST_TEST_CANCEL_GLOBAL_CHECK", false),
135 "If should test HeapLeakChecker::CancelGlobalCheck "
136 "when --test_leak or --test_loop_leak are given; "
137 "the test should not fail then");
138 DEFINE_bool(maybe_stripped,
139 EnvToBool("HEAP_CHECKER_TEST_MAYBE_STRIPPED", true),
140 "If we think we can be a stripped binary");
141 DEFINE_bool(interfering_threads,
142 EnvToBool("HEAP_CHECKER_TEST_INTERFERING_THREADS", true),
143 "If we should use threads trying "
144 "to interfere with leak checking");
145 DEFINE_bool(hoarding_threads,
146 EnvToBool("HEAP_CHECKER_TEST_HOARDING_THREADS", true),
147 "If threads (usually the manager thread) are known "
148 "to retain some old state in their global buffers, "
149 "so that it's hard to force leaks when threads are around");
150 // TODO(maxim): Chage the default to false
151 // when the standard environment used NTPL threads:
152 // they do not seem to have this problem.
153 DEFINE_bool(no_threads,
154 EnvToBool("HEAP_CHECKER_TEST_NO_THREADS", false),
155 "If we should not use any threads");
156 // This is used so we can make can_create_leaks_reliably true
157 // for any pthread implementation and test with that.
159 DECLARE_int64(heap_check_max_pointer_offset); // heap-checker.cc
160 DECLARE_string(heap_check); // in heap-checker.cc
162 #define WARN_IF(cond, msg) LOG_IF(WARNING, cond, msg)
164 // This is an evil macro! Be very careful using it...
165 #undef VLOG // and we start by evilling overriding logging.h VLOG
166 #define VLOG(lvl) if (FLAGS_verbose >= (lvl)) cout << "\n"
167 // This is, likewise, evil
168 #define LOGF VLOG(INFO)
170 static void RunHeapBusyThreads(); // below
173 class Closure {
174 public:
175 virtual ~Closure() { }
176 virtual void Run() = 0;
179 class Callback0 : public Closure {
180 public:
181 typedef void (*FunctionSignature)();
183 inline Callback0(FunctionSignature f) : f_(f) {}
184 virtual void Run() { (*f_)(); delete this; }
186 private:
187 FunctionSignature f_;
190 template <class P1> class Callback1 : public Closure {
191 public:
192 typedef void (*FunctionSignature)(P1);
194 inline Callback1<P1>(FunctionSignature f, P1 p1) : f_(f), p1_(p1) {}
195 virtual void Run() { (*f_)(p1_); delete this; }
197 private:
198 FunctionSignature f_;
199 P1 p1_;
202 template <class P1, class P2> class Callback2 : public Closure {
203 public:
204 typedef void (*FunctionSignature)(P1,P2);
206 inline Callback2<P1,P2>(FunctionSignature f, P1 p1, P2 p2) : f_(f), p1_(p1), p2_(p2) {}
207 virtual void Run() { (*f_)(p1_, p2_); delete this; }
209 private:
210 FunctionSignature f_;
211 P1 p1_;
212 P2 p2_;
215 inline Callback0* NewCallback(void (*function)()) {
216 return new Callback0(function);
219 template <class P1>
220 inline Callback1<P1>* NewCallback(void (*function)(P1), P1 p1) {
221 return new Callback1<P1>(function, p1);
224 template <class P1, class P2>
225 inline Callback2<P1,P2>* NewCallback(void (*function)(P1,P2), P1 p1, P2 p2) {
226 return new Callback2<P1,P2>(function, p1, p2);
230 // Set to true at end of main, so threads know. Not entirely thread-safe!,
231 // but probably good enough.
232 static bool g_have_exited_main = false;
234 // If we can reliably create leaks (i.e. make leaked object
235 // really unreachable from any global data).
236 static bool can_create_leaks_reliably = false;
238 // We use a simple allocation wrapper
239 // to make sure we wipe out the newly allocated objects
240 // in case they still happened to contain some pointer data
241 // accidentally left by the memory allocator.
242 struct Initialized { };
243 static Initialized initialized;
244 void* operator new(size_t size, const Initialized&) {
245 // Below we use "p = new(initialized) Foo[1];" and "delete[] p;"
246 // instead of "p = new(initialized) Foo;"
247 // when we need to delete an allocated object.
248 void* p = malloc(size);
249 memset(p, 0, size);
250 return p;
252 void* operator new[](size_t size, const Initialized&) {
253 char* p = new char[size];
254 memset(p, 0, size);
255 return p;
258 static void DoWipeStack(int n); // defined below
259 static void WipeStack() { DoWipeStack(20); }
261 static void Pause() {
262 poll(NULL, 0, 77); // time for thread activity in HeapBusyThreadBody
264 // Indirectly test malloc_extension.*:
265 CHECK(MallocExtension::instance()->VerifyAllMemory());
266 int blocks;
267 size_t total;
268 int histogram[kMallocHistogramSize];
269 if (MallocExtension::instance()
270 ->MallocMemoryStats(&blocks, &total, histogram) && total != 0) {
271 VLOG(3) << "Malloc stats: " << blocks << " blocks of "
272 << total << " bytes";
273 for (int i = 0; i < kMallocHistogramSize; ++i) {
274 if (histogram[i]) {
275 VLOG(3) << " Malloc histogram at " << i << " : " << histogram[i];
279 WipeStack(); // e.g. MallocExtension::VerifyAllMemory
280 // can leave pointers to heap objects on stack
283 // Make gcc think a pointer is "used"
284 template <class T>
285 static void Use(T** foo) {
286 VLOG(2) << "Dummy-using " << static_cast<void*>(*foo) << " at " << foo;
289 // Arbitrary value, but not such that xor'ing with it is likely
290 // to map one valid pointer to another valid pointer:
291 static const uintptr_t kHideMask =
292 static_cast<uintptr_t>(0xF03A5F7BF03A5F7BLL);
294 // Helpers to hide a pointer from live data traversal.
295 // We just xor the pointer so that (with high probability)
296 // it's not a valid address of a heap object anymore.
297 // Both Hide and UnHide must be executed within RunHidden() below
298 // to prevent leaving stale data on active stack that can be a pointer
299 // to a heap object that is not actually reachable via live variables.
300 // (UnHide might leave heap pointer value for an object
301 // that will be deallocated but later another object
302 // can be allocated at the same heap address.)
303 template <class T>
304 static void Hide(T** ptr) {
305 // we cast values, not dereferenced pointers, so no aliasing issues:
306 *ptr = reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(*ptr) ^ kHideMask);
307 VLOG(2) << "hid: " << static_cast<void*>(*ptr);
310 template <class T>
311 static void UnHide(T** ptr) {
312 VLOG(2) << "unhiding: " << static_cast<void*>(*ptr);
313 // we cast values, not dereferenced pointers, so no aliasing issues:
314 *ptr = reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(*ptr) ^ kHideMask);
317 static void LogHidden(const char* message, const void* ptr) {
318 LOGF << message << " : "
319 << ptr << " ^ " << reinterpret_cast<void*>(kHideMask) << endl;
322 // volatile to fool the compiler against inlining the calls to these
323 void (*volatile run_hidden_ptr)(Closure* c, int n);
324 void (*volatile wipe_stack_ptr)(int n);
326 static void DoRunHidden(Closure* c, int n) {
327 if (n) {
328 VLOG(10) << "Level " << n << " at " << &n;
329 (*run_hidden_ptr)(c, n-1);
330 (*wipe_stack_ptr)(n);
331 sleep(0); // undo -foptimize-sibling-calls
332 } else {
333 c->Run();
337 /*static*/ void DoWipeStack(int n) {
338 VLOG(10) << "Wipe level " << n << " at " << &n;
339 if (n) {
340 const int sz = 30;
341 volatile int arr[sz];
342 for (int i = 0; i < sz; ++i) arr[i] = 0;
343 (*wipe_stack_ptr)(n-1);
344 sleep(0); // undo -foptimize-sibling-calls
348 // This executes closure c several stack frames down from the current one
349 // and then makes an effort to also wipe out the stack data that was used by
350 // the closure.
351 // This way we prevent leak checker from finding any temporary pointers
352 // of the closure execution on the stack and deciding that
353 // these pointers (and the pointed objects) are still live.
354 static void RunHidden(Closure* c) {
355 DoRunHidden(c, 15);
356 DoWipeStack(20);
359 static void DoAllocHidden(size_t size, void** ptr) {
360 void* p = new(initialized) char[size];
361 Hide(&p);
362 Use(&p); // use only hidden versions
363 VLOG(2) << "Allocated hidden " << p << " at " << &p;
364 *ptr = p; // assign the hidden versions
367 static void* AllocHidden(size_t size) {
368 void* r;
369 RunHidden(NewCallback(DoAllocHidden, size, &r));
370 return r;
373 static void DoDeAllocHidden(void** ptr) {
374 Use(ptr); // use only hidden versions
375 void* p = *ptr;
376 VLOG(2) << "Deallocating hidden " << p;
377 UnHide(&p);
378 delete [] reinterpret_cast<char*>(p);
381 static void DeAllocHidden(void** ptr) {
382 RunHidden(NewCallback(DoDeAllocHidden, ptr));
383 *ptr = NULL;
384 Use(ptr);
387 void PreventHeapReclaiming(size_t size) {
388 #ifdef NDEBUG
389 if (true) {
390 static void** no_reclaim_list = NULL;
391 CHECK(size >= sizeof(void*));
392 // We can't use malloc_reclaim_memory flag in opt mode as debugallocation.cc
393 // is not used. Instead we allocate a bunch of heap objects that are
394 // of the same size as what we are going to leak to ensure that the object
395 // we are about to leak is not at the same address as some old allocated
396 // and freed object that might still have pointers leading to it.
397 for (int i = 0; i < 100; ++i) {
398 void** p = reinterpret_cast<void**>(new(initialized) char[size]);
399 p[0] = no_reclaim_list;
400 no_reclaim_list = p;
403 #endif
406 static bool RunSilent(HeapLeakChecker* check,
407 bool (HeapLeakChecker::* func)()) {
408 // By default, don't print the 'we detected a leak' message in the
409 // cases we're expecting a leak (we still print when --v is >= 1).
410 // This way, the logging output is less confusing: we only print
411 // "we detected a leak", and how to diagnose it, for *unexpected* leaks.
412 int32 old_FLAGS_verbose = FLAGS_verbose;
413 if (!VLOG_IS_ON(1)) // not on a verbose setting
414 FLAGS_verbose = FATAL; // only log fatal errors
415 const bool retval = (check->*func)();
416 FLAGS_verbose = old_FLAGS_verbose;
417 return retval;
420 #define RUN_SILENT(check, func) RunSilent(&(check), &HeapLeakChecker::func)
422 enum CheckType { SAME_HEAP, NO_LEAKS };
424 static void VerifyLeaks(HeapLeakChecker* check, CheckType type,
425 int leaked_bytes, int leaked_objects) {
426 WipeStack(); // to help with can_create_leaks_reliably
427 const bool no_leaks =
428 type == NO_LEAKS ? RUN_SILENT(*check, BriefNoLeaks)
429 : RUN_SILENT(*check, BriefSameHeap);
430 if (can_create_leaks_reliably) {
431 // these might still fail occasionally, but it should be very rare
432 CHECK_EQ(no_leaks, false);
433 CHECK_EQ(check->BytesLeaked(), leaked_bytes);
434 CHECK_EQ(check->ObjectsLeaked(), leaked_objects);
435 } else {
436 WARN_IF(no_leaks != false,
437 "Expected leaks not found: "
438 "Some liveness flood must be too optimistic");
442 // not deallocates
443 static void TestHeapLeakCheckerDeathSimple() {
444 HeapLeakChecker check("death_simple");
445 void* foo = AllocHidden(100 * sizeof(int));
446 Use(&foo);
447 void* bar = AllocHidden(300);
448 Use(&bar);
449 LogHidden("Leaking", foo);
450 LogHidden("Leaking", bar);
451 Pause();
452 VerifyLeaks(&check, NO_LEAKS, 300 + 100 * sizeof(int), 2);
453 DeAllocHidden(&foo);
454 DeAllocHidden(&bar);
457 static void MakeDeathLoop(void** arr1, void** arr2) {
458 PreventHeapReclaiming(2 * sizeof(void*));
459 void** a1 = new(initialized) void*[2];
460 void** a2 = new(initialized) void*[2];
461 a1[1] = reinterpret_cast<void*>(a2);
462 a2[1] = reinterpret_cast<void*>(a1);
463 Hide(&a1);
464 Hide(&a2);
465 Use(&a1);
466 Use(&a2);
467 VLOG(2) << "Made hidden loop at " << &a1 << " to " << arr1;
468 *arr1 = a1;
469 *arr2 = a2;
472 // not deallocates two objects linked together
473 static void TestHeapLeakCheckerDeathLoop() {
474 HeapLeakChecker check("death_loop");
475 void* arr1;
476 void* arr2;
477 RunHidden(NewCallback(MakeDeathLoop, &arr1, &arr2));
478 Use(&arr1);
479 Use(&arr2);
480 LogHidden("Leaking", arr1);
481 LogHidden("Leaking", arr2);
482 Pause();
483 VerifyLeaks(&check, NO_LEAKS, 4 * sizeof(void*), 2);
484 DeAllocHidden(&arr1);
485 DeAllocHidden(&arr2);
488 // deallocates more than allocates
489 static void TestHeapLeakCheckerDeathInverse() {
490 void* bar = AllocHidden(250 * sizeof(int));
491 Use(&bar);
492 LogHidden("Pre leaking", bar);
493 Pause();
494 HeapLeakChecker check("death_inverse");
495 void* foo = AllocHidden(100 * sizeof(int));
496 Use(&foo);
497 LogHidden("Leaking", foo);
498 DeAllocHidden(&bar);
499 Pause();
500 VerifyLeaks(&check, SAME_HEAP,
501 100 * static_cast<int64>(sizeof(int)),
503 DeAllocHidden(&foo);
506 // deallocates more than allocates
507 static void TestHeapLeakCheckerDeathNoLeaks() {
508 void* foo = AllocHidden(100 * sizeof(int));
509 Use(&foo);
510 void* bar = AllocHidden(250 * sizeof(int));
511 Use(&bar);
512 HeapLeakChecker check("death_noleaks");
513 DeAllocHidden(&bar);
514 CHECK_EQ(check.BriefNoLeaks(), true);
515 DeAllocHidden(&foo);
518 // have less objecs
519 static void TestHeapLeakCheckerDeathCountLess() {
520 void* bar1 = AllocHidden(50 * sizeof(int));
521 Use(&bar1);
522 void* bar2 = AllocHidden(50 * sizeof(int));
523 Use(&bar2);
524 LogHidden("Pre leaking", bar1);
525 LogHidden("Pre leaking", bar2);
526 Pause();
527 HeapLeakChecker check("death_count_less");
528 void* foo = AllocHidden(100 * sizeof(int));
529 Use(&foo);
530 LogHidden("Leaking", foo);
531 DeAllocHidden(&bar1);
532 DeAllocHidden(&bar2);
533 Pause();
534 VerifyLeaks(&check, SAME_HEAP,
535 100 * sizeof(int),
537 DeAllocHidden(&foo);
540 // have more objecs
541 static void TestHeapLeakCheckerDeathCountMore() {
542 void* foo = AllocHidden(100 * sizeof(int));
543 Use(&foo);
544 LogHidden("Pre leaking", foo);
545 Pause();
546 HeapLeakChecker check("death_count_more");
547 void* bar1 = AllocHidden(50 * sizeof(int));
548 Use(&bar1);
549 void* bar2 = AllocHidden(50 * sizeof(int));
550 Use(&bar2);
551 LogHidden("Leaking", bar1);
552 LogHidden("Leaking", bar2);
553 DeAllocHidden(&foo);
554 Pause();
555 VerifyLeaks(&check, SAME_HEAP,
556 100 * sizeof(int),
558 DeAllocHidden(&bar1);
559 DeAllocHidden(&bar2);
562 static void TestHiddenPointer() {
563 int i;
564 void* foo = &i;
565 HiddenPointer<void> p(foo);
566 CHECK_EQ(foo, p.get());
568 // Confirm pointer doesn't appear to contain a byte sequence
569 // that == the pointer. We don't really need to test that
570 // the xor trick itself works, as without it nothing in this
571 // test suite would work. See the Hide/Unhide/*Hidden* set
572 // of helper methods.
573 CHECK_NE(foo, *reinterpret_cast<void**>(&p));
576 // simple tests that deallocate what they allocated
577 static void TestHeapLeakChecker() {
578 { HeapLeakChecker check("trivial");
579 int foo = 5;
580 int* p = &foo;
581 Use(&p);
582 Pause();
583 CHECK(check.BriefSameHeap());
585 Pause();
586 { HeapLeakChecker check("simple");
587 void* foo = AllocHidden(100 * sizeof(int));
588 Use(&foo);
589 void* bar = AllocHidden(200 * sizeof(int));
590 Use(&bar);
591 DeAllocHidden(&foo);
592 DeAllocHidden(&bar);
593 Pause();
594 CHECK(check.BriefSameHeap());
598 // no false positives
599 static void TestHeapLeakCheckerNoFalsePositives() {
600 { HeapLeakChecker check("trivial_p");
601 int foo = 5;
602 int* p = &foo;
603 Use(&p);
604 Pause();
605 CHECK(check.BriefSameHeap());
607 Pause();
608 { HeapLeakChecker check("simple_p");
609 void* foo = AllocHidden(100 * sizeof(int));
610 Use(&foo);
611 void* bar = AllocHidden(200 * sizeof(int));
612 Use(&bar);
613 DeAllocHidden(&foo);
614 DeAllocHidden(&bar);
615 Pause();
616 CHECK(check.SameHeap());
620 // test that we detect leaks when we have same total # of bytes and
621 // objects, but different individual object sizes
622 static void TestLeakButTotalsMatch() {
623 void* bar1 = AllocHidden(240 * sizeof(int));
624 Use(&bar1);
625 void* bar2 = AllocHidden(160 * sizeof(int));
626 Use(&bar2);
627 LogHidden("Pre leaking", bar1);
628 LogHidden("Pre leaking", bar2);
629 Pause();
630 HeapLeakChecker check("trick");
631 void* foo1 = AllocHidden(280 * sizeof(int));
632 Use(&foo1);
633 void* foo2 = AllocHidden(120 * sizeof(int));
634 Use(&foo2);
635 LogHidden("Leaking", foo1);
636 LogHidden("Leaking", foo2);
637 DeAllocHidden(&bar1);
638 DeAllocHidden(&bar2);
639 Pause();
641 // foo1 and foo2 leaked
642 VerifyLeaks(&check, NO_LEAKS, (280+120)*sizeof(int), 2);
644 DeAllocHidden(&foo1);
645 DeAllocHidden(&foo2);
648 // no false negatives from pprof
649 static void TestHeapLeakCheckerDeathTrick() {
650 void* bar1 = AllocHidden(240 * sizeof(int));
651 Use(&bar1);
652 void* bar2 = AllocHidden(160 * sizeof(int));
653 Use(&bar2);
654 HeapLeakChecker check("death_trick");
655 DeAllocHidden(&bar1);
656 DeAllocHidden(&bar2);
657 void* foo1 = AllocHidden(280 * sizeof(int));
658 Use(&foo1);
659 void* foo2 = AllocHidden(120 * sizeof(int));
660 Use(&foo2);
661 // TODO(maxim): use the above if we make pprof work in automated test runs
662 if (!FLAGS_maybe_stripped) {
663 CHECK_EQ(RUN_SILENT(check, SameHeap), false);
664 // pprof checking should catch the leak
665 } else {
666 WARN_IF(RUN_SILENT(check, SameHeap) != false,
667 "death_trick leak is not caught; "
668 "we must be using a stripped binary");
670 DeAllocHidden(&foo1);
671 DeAllocHidden(&foo2);
674 // simple leak
675 static void TransLeaks() {
676 AllocHidden(1 * sizeof(char));
679 // range-based disabling using Disabler
680 static void ScopedDisabledLeaks() {
681 HeapLeakChecker::Disabler disabler;
682 AllocHidden(3 * sizeof(int));
683 TransLeaks();
684 (void)malloc(10); // Direct leak
687 // have different disabled leaks
688 static void* RunDisabledLeaks(void* a) {
689 ScopedDisabledLeaks();
690 return a;
693 // have different disabled leaks inside of a thread
694 static void ThreadDisabledLeaks() {
695 if (FLAGS_no_threads) return;
696 pthread_t tid;
697 pthread_attr_t attr;
698 CHECK_EQ(pthread_attr_init(&attr), 0);
699 CHECK_EQ(pthread_create(&tid, &attr, RunDisabledLeaks, NULL), 0);
700 void* res;
701 CHECK_EQ(pthread_join(tid, &res), 0);
704 // different disabled leaks (some in threads)
705 static void TestHeapLeakCheckerDisabling() {
706 HeapLeakChecker check("disabling");
708 RunDisabledLeaks(NULL);
709 RunDisabledLeaks(NULL);
710 ThreadDisabledLeaks();
711 RunDisabledLeaks(NULL);
712 ThreadDisabledLeaks();
713 ThreadDisabledLeaks();
715 Pause();
717 CHECK(check.SameHeap());
720 typedef set<int> IntSet;
722 static int some_ints[] = { 1, 2, 3, 21, 22, 23, 24, 25 };
724 static void DoTestSTLAlloc() {
725 IntSet* x = new(initialized) IntSet[1];
726 *x = IntSet(some_ints, some_ints + 6);
727 for (int i = 0; i < 1000; i++) {
728 x->insert(i*3);
730 delete [] x;
733 // Check that normal STL usage does not result in a leak report.
734 // (In particular we test that there's no complex STL's own allocator
735 // running on top of our allocator with hooks to heap profiler
736 // that can result in false leak report in this case.)
737 static void TestSTLAlloc() {
738 HeapLeakChecker check("stl");
739 RunHidden(NewCallback(DoTestSTLAlloc));
740 CHECK_EQ(check.BriefSameHeap(), true);
743 static void DoTestSTLAllocInverse(IntSet** setx) {
744 IntSet* x = new(initialized) IntSet[1];
745 *x = IntSet(some_ints, some_ints + 3);
746 for (int i = 0; i < 100; i++) {
747 x->insert(i*2);
749 Hide(&x);
750 *setx = x;
753 static void FreeTestSTLAllocInverse(IntSet** setx) {
754 IntSet* x = *setx;
755 UnHide(&x);
756 delete [] x;
759 // Check that normal leaked STL usage *does* result in a leak report.
760 // (In particular we test that there's no complex STL's own allocator
761 // running on top of our allocator with hooks to heap profiler
762 // that can result in false absence of leak report in this case.)
763 static void TestSTLAllocInverse() {
764 HeapLeakChecker check("death_inverse_stl");
765 IntSet* x;
766 RunHidden(NewCallback(DoTestSTLAllocInverse, &x));
767 LogHidden("Leaking", x);
768 if (can_create_leaks_reliably) {
769 WipeStack(); // to help with can_create_leaks_reliably
770 // these might still fail occasionally, but it should be very rare
771 CHECK_EQ(RUN_SILENT(check, BriefNoLeaks), false);
772 CHECK_GE(check.BytesLeaked(), 100 * sizeof(int));
773 CHECK_GE(check.ObjectsLeaked(), 100);
774 // assumes set<>s are represented by some kind of binary tree
775 // or something else allocating >=1 heap object per set object
776 } else {
777 WARN_IF(RUN_SILENT(check, BriefNoLeaks) != false,
778 "Expected leaks not found: "
779 "Some liveness flood must be too optimistic");
781 RunHidden(NewCallback(FreeTestSTLAllocInverse, &x));
784 template<class Alloc>
785 static void DirectTestSTLAlloc(Alloc allocator, const char* name) {
786 HeapLeakChecker check((string("direct_stl-") + name).c_str());
787 static const int kSize = 1000;
788 typename Alloc::pointer ptrs[kSize];
789 for (int i = 0; i < kSize; ++i) {
790 typename Alloc::pointer p = allocator.allocate(i*3+1);
791 HeapLeakChecker::IgnoreObject(p);
792 // This will crash if p is not known to heap profiler:
793 // (i.e. STL's "allocator" does not have a direct hook to heap profiler)
794 HeapLeakChecker::UnIgnoreObject(p);
795 ptrs[i] = p;
797 for (int i = 0; i < kSize; ++i) {
798 allocator.deallocate(ptrs[i], i*3+1);
799 ptrs[i] = NULL;
801 CHECK(check.BriefSameHeap()); // just in case
804 static struct group* grp = NULL;
805 static const int kKeys = 50;
806 static pthread_key_t key[kKeys];
808 static void KeyFree(void* ptr) {
809 delete [] reinterpret_cast<char*>(ptr);
812 static bool key_init_has_run = false;
814 static void KeyInit() {
815 for (int i = 0; i < kKeys; ++i) {
816 CHECK_EQ(pthread_key_create(&key[i], KeyFree), 0);
817 VLOG(2) << "pthread key " << i << " : " << key[i];
819 key_init_has_run = true; // needed for a sanity-check
822 // force various C library static and thread-specific allocations
823 static void TestLibCAllocate() {
824 CHECK(key_init_has_run);
825 for (int i = 0; i < kKeys; ++i) {
826 void* p = pthread_getspecific(key[i]);
827 if (NULL == p) {
828 if (i == 0) {
829 // Test-logging inside threads which (potentially) creates and uses
830 // thread-local data inside standard C++ library:
831 VLOG(0) << "Adding pthread-specifics for thread " << pthread_self()
832 << " pid " << getpid();
834 p = new(initialized) char[77 + i];
835 VLOG(2) << "pthread specific " << i << " : " << p;
836 pthread_setspecific(key[i], p);
840 strerror(errno);
841 const time_t now = time(NULL);
842 ctime(&now);
843 #ifdef HAVE_EXECINFO_H
844 void *stack[1];
845 backtrace(stack, 1);
846 #endif
847 #ifdef HAVE_GRP_H
848 gid_t gid = getgid();
849 getgrgid(gid);
850 if (grp == NULL) grp = getgrent(); // a race condition here is okay
851 getgrnam(grp->gr_name);
852 #endif
853 #ifdef HAVE_PWD_H
854 getpwuid(geteuid());
855 #endif
858 // Continuous random heap memory activity to try to disrupt heap checking.
859 static void* HeapBusyThreadBody(void* a) {
860 const int thread_num = reinterpret_cast<intptr_t>(a);
861 VLOG(0) << "A new HeapBusyThread " << thread_num;
862 TestLibCAllocate();
864 int user = 0;
865 // Try to hide ptr from heap checker in a CPU register:
866 // Here we are just making a best effort to put the only pointer
867 // to a heap object into a thread register to test
868 // the thread-register finding machinery in the heap checker.
869 #if defined(__i386__) && defined(__GNUC__)
870 register int** ptr asm("esi");
871 #elif defined(__x86_64__) && defined(__GNUC__)
872 register int** ptr asm("r15");
873 #else
874 register int** ptr;
875 #endif
876 ptr = NULL;
877 typedef set<int> Set;
878 Set s1;
879 while (1) {
880 // TestLibCAllocate() calls libc functions that don't work so well
881 // after main() has exited. So we just don't do the test then.
882 if (!g_have_exited_main)
883 TestLibCAllocate();
885 if (ptr == NULL) {
886 ptr = new(initialized) int*[1];
887 *ptr = new(initialized) int[1];
889 set<int>* s2 = new(initialized) set<int>[1];
890 s1.insert(random());
891 s2->insert(*s1.begin());
892 user += *s2->begin();
893 **ptr += user;
894 if (random() % 51 == 0) {
895 s1.clear();
896 if (random() % 2 == 0) {
897 s1.~Set();
898 new(&s1) Set;
901 VLOG(3) << pthread_self() << " (" << getpid() << "): in wait: "
902 << ptr << ", " << *ptr << "; " << s1.size();
903 VLOG(2) << pthread_self() << " (" << getpid() << "): in wait, ptr = "
904 << reinterpret_cast<void*>(
905 reinterpret_cast<uintptr_t>(ptr) ^ kHideMask)
906 << "^" << reinterpret_cast<void*>(kHideMask);
907 if (FLAGS_test_register_leak && thread_num % 5 == 0) {
908 // Hide the register "ptr" value with an xor mask.
909 // If one provides --test_register_leak flag, the test should
910 // (with very high probability) crash on some leak check
911 // with a leak report (of some x * sizeof(int) + y * sizeof(int*) bytes)
912 // pointing at the two lines above in this function
913 // with "new(initialized) int" in them as the allocators
914 // of the leaked objects.
915 // CAVEAT: We can't really prevent a compiler to save some
916 // temporary values of "ptr" on the stack and thus let us find
917 // the heap objects not via the register.
918 // Hence it's normal if for certain compilers or optimization modes
919 // --test_register_leak does not cause a leak crash of the above form
920 // (this happens e.g. for gcc 4.0.1 in opt mode).
921 ptr = reinterpret_cast<int **>(
922 reinterpret_cast<uintptr_t>(ptr) ^ kHideMask);
923 // busy loop to get the thread interrupted at:
924 for (int i = 1; i < 10000000; ++i) user += (1 + user * user * 5) / i;
925 ptr = reinterpret_cast<int **>(
926 reinterpret_cast<uintptr_t>(ptr) ^ kHideMask);
927 } else {
928 poll(NULL, 0, random() % 100);
930 VLOG(2) << pthread_self() << ": continuing";
931 if (random() % 3 == 0) {
932 delete [] *ptr;
933 delete [] ptr;
934 ptr = NULL;
936 delete [] s2;
938 return a;
941 static void RunHeapBusyThreads() {
942 KeyInit();
943 if (!FLAGS_interfering_threads || FLAGS_no_threads) return;
945 const int n = 17; // make many threads
947 pthread_t tid;
948 pthread_attr_t attr;
949 CHECK_EQ(pthread_attr_init(&attr), 0);
950 // make them and let them run
951 for (int i = 0; i < n; ++i) {
952 VLOG(0) << "Creating extra thread " << i + 1;
953 CHECK(pthread_create(&tid, &attr, HeapBusyThreadBody,
954 reinterpret_cast<void*>(i)) == 0);
957 Pause();
958 Pause();
961 // ========================================================================= //
963 // This code section is to test that objects that are reachable from global
964 // variables are not reported as leaks
965 // as well as that (Un)IgnoreObject work for such objects fine.
967 // An object making functions:
968 // returns a "weird" pointer to a new object for which
969 // it's worth checking that the object is reachable via that pointer.
970 typedef void* (*ObjMakerFunc)();
971 static list<ObjMakerFunc> obj_makers; // list of registered object makers
973 // Helper macro to register an object making function
974 // 'name' is an identifier of this object maker,
975 // 'body' is its function body that must declare
976 // pointer 'p' to the nex object to return.
977 // Usage example:
978 // REGISTER_OBJ_MAKER(trivial, int* p = new(initialized) int;)
979 #define REGISTER_OBJ_MAKER(name, body) \
980 void* ObjMaker_##name##_() { \
981 VLOG(1) << "Obj making " << #name; \
982 body; \
983 return p; \
985 static ObjMakerRegistrar maker_reg_##name##__(&ObjMaker_##name##_);
986 // helper class for REGISTER_OBJ_MAKER
987 struct ObjMakerRegistrar {
988 ObjMakerRegistrar(ObjMakerFunc obj_maker) { obj_makers.push_back(obj_maker); }
991 // List of the objects/pointers made with all the obj_makers
992 // to test reachability via global data pointers during leak checks.
993 static list<void*>* live_objects = new list<void*>;
994 // pointer so that it does not get destructed on exit
996 // Exerciser for one ObjMakerFunc.
997 static void TestPointerReach(ObjMakerFunc obj_maker) {
998 HeapLeakChecker::IgnoreObject(obj_maker()); // test IgnoreObject
1000 void* obj = obj_maker();
1001 HeapLeakChecker::IgnoreObject(obj);
1002 HeapLeakChecker::UnIgnoreObject(obj); // test UnIgnoreObject
1003 HeapLeakChecker::IgnoreObject(obj); // not to need deletion for obj
1005 live_objects->push_back(obj_maker()); // test reachability at leak check
1008 // Test all ObjMakerFunc registred via REGISTER_OBJ_MAKER.
1009 static void TestObjMakers() {
1010 for (list<ObjMakerFunc>::const_iterator i = obj_makers.begin();
1011 i != obj_makers.end(); ++i) {
1012 TestPointerReach(*i);
1013 TestPointerReach(*i); // a couple more times would not hurt
1014 TestPointerReach(*i);
1018 // A dummy class to mimic allocation behavior of string-s.
1019 template<class T>
1020 struct Array {
1021 Array() {
1022 size = 3 + random() % 30;
1023 ptr = new(initialized) T[size];
1025 ~Array() { delete [] ptr; }
1026 Array(const Array& x) {
1027 size = x.size;
1028 ptr = new(initialized) T[size];
1029 for (size_t i = 0; i < size; ++i) {
1030 ptr[i] = x.ptr[i];
1033 void operator=(const Array& x) {
1034 delete [] ptr;
1035 size = x.size;
1036 ptr = new(initialized) T[size];
1037 for (size_t i = 0; i < size; ++i) {
1038 ptr[i] = x.ptr[i];
1041 void append(const Array& x) {
1042 T* p = new(initialized) T[size + x.size];
1043 for (size_t i = 0; i < size; ++i) {
1044 p[i] = ptr[i];
1046 for (size_t i = 0; i < x.size; ++i) {
1047 p[size+i] = x.ptr[i];
1049 size += x.size;
1050 delete [] ptr;
1051 ptr = p;
1053 private:
1054 size_t size;
1055 T* ptr;
1058 // to test pointers to objects, built-in arrays, string, etc:
1059 REGISTER_OBJ_MAKER(plain, int* p = new(initialized) int;)
1060 REGISTER_OBJ_MAKER(int_array_1, int* p = new(initialized) int[1];)
1061 REGISTER_OBJ_MAKER(int_array, int* p = new(initialized) int[10];)
1062 REGISTER_OBJ_MAKER(string, Array<char>* p = new(initialized) Array<char>();)
1063 REGISTER_OBJ_MAKER(string_array,
1064 Array<char>* p = new(initialized) Array<char>[5];)
1065 REGISTER_OBJ_MAKER(char_array, char* p = new(initialized) char[5];)
1066 REGISTER_OBJ_MAKER(appended_string,
1067 Array<char>* p = new Array<char>();
1068 p->append(Array<char>());
1070 REGISTER_OBJ_MAKER(plain_ptr, int** p = new(initialized) int*;)
1071 REGISTER_OBJ_MAKER(linking_ptr,
1072 int** p = new(initialized) int*;
1073 *p = new(initialized) int;
1076 // small objects:
1077 REGISTER_OBJ_MAKER(0_sized, void* p = malloc(0);) // 0-sized object (important)
1078 REGISTER_OBJ_MAKER(1_sized, void* p = malloc(1);)
1079 REGISTER_OBJ_MAKER(2_sized, void* p = malloc(2);)
1080 REGISTER_OBJ_MAKER(3_sized, void* p = malloc(3);)
1081 REGISTER_OBJ_MAKER(4_sized, void* p = malloc(4);)
1083 static int set_data[] = { 1, 2, 3, 4, 5, 6, 7, 21, 22, 23, 24, 25, 26, 27 };
1084 static set<int> live_leak_set(set_data, set_data+7);
1085 static const set<int> live_leak_const_set(set_data, set_data+14);
1087 REGISTER_OBJ_MAKER(set,
1088 set<int>* p = new(initialized) set<int>(set_data, set_data + 13);
1091 class ClassA {
1092 public:
1093 explicit ClassA(int a) : ptr(NULL) { }
1094 mutable char* ptr;
1096 static const ClassA live_leak_mutable(1);
1098 template<class C>
1099 class TClass {
1100 public:
1101 explicit TClass(int a) : ptr(NULL) { }
1102 mutable C val;
1103 mutable C* ptr;
1105 static const TClass<Array<char> > live_leak_templ_mutable(1);
1107 class ClassB {
1108 public:
1109 ClassB() { }
1110 char b[7];
1111 virtual void f() { }
1112 virtual ~ClassB() { }
1115 class ClassB2 {
1116 public:
1117 ClassB2() { }
1118 char b2[11];
1119 virtual void f2() { }
1120 virtual ~ClassB2() { }
1123 class ClassD1 : public ClassB {
1124 char d1[15];
1125 virtual void f() { }
1128 class ClassD2 : public ClassB2 {
1129 char d2[19];
1130 virtual void f2() { }
1133 class ClassD : public ClassD1, public ClassD2 {
1134 char d[3];
1135 virtual void f() { }
1136 virtual void f2() { }
1139 // to test pointers to objects of base subclasses:
1141 REGISTER_OBJ_MAKER(B, ClassB* p = new(initialized) ClassB;)
1142 REGISTER_OBJ_MAKER(D1, ClassD1* p = new(initialized) ClassD1;)
1143 REGISTER_OBJ_MAKER(D2, ClassD2* p = new(initialized) ClassD2;)
1144 REGISTER_OBJ_MAKER(D, ClassD* p = new(initialized) ClassD;)
1146 REGISTER_OBJ_MAKER(D1_as_B, ClassB* p = new(initialized) ClassD1;)
1147 REGISTER_OBJ_MAKER(D2_as_B2, ClassB2* p = new(initialized) ClassD2;)
1148 REGISTER_OBJ_MAKER(D_as_B, ClassB* p = new(initialized) ClassD;)
1149 REGISTER_OBJ_MAKER(D_as_D1, ClassD1* p = new(initialized) ClassD;)
1150 // inside-object pointers:
1151 REGISTER_OBJ_MAKER(D_as_B2, ClassB2* p = new(initialized) ClassD;)
1152 REGISTER_OBJ_MAKER(D_as_D2, ClassD2* p = new(initialized) ClassD;)
1154 class InterfaceA {
1155 public:
1156 virtual void A() = 0;
1157 virtual ~InterfaceA() { }
1158 protected:
1159 InterfaceA() { }
1162 class InterfaceB {
1163 public:
1164 virtual void B() = 0;
1165 virtual ~InterfaceB() { }
1166 protected:
1167 InterfaceB() { }
1170 class InterfaceC : public InterfaceA {
1171 public:
1172 virtual void C() = 0;
1173 virtual ~InterfaceC() { }
1174 protected:
1175 InterfaceC() { }
1178 class ClassMltD1 : public ClassB, public InterfaceB, public InterfaceC {
1179 public:
1180 char d1[11];
1181 virtual void f() { }
1182 virtual void A() { }
1183 virtual void B() { }
1184 virtual void C() { }
1187 class ClassMltD2 : public InterfaceA, public InterfaceB, public ClassB {
1188 public:
1189 char d2[15];
1190 virtual void f() { }
1191 virtual void A() { }
1192 virtual void B() { }
1195 // to specifically test heap reachability under
1196 // inerface-only multiple inheritance (some use inside-object pointers):
1197 REGISTER_OBJ_MAKER(MltD1, ClassMltD1* p = new(initialized) ClassMltD1;)
1198 REGISTER_OBJ_MAKER(MltD1_as_B, ClassB* p = new(initialized) ClassMltD1;)
1199 REGISTER_OBJ_MAKER(MltD1_as_IA, InterfaceA* p = new(initialized) ClassMltD1;)
1200 REGISTER_OBJ_MAKER(MltD1_as_IB, InterfaceB* p = new(initialized) ClassMltD1;)
1201 REGISTER_OBJ_MAKER(MltD1_as_IC, InterfaceC* p = new(initialized) ClassMltD1;)
1203 REGISTER_OBJ_MAKER(MltD2, ClassMltD2* p = new(initialized) ClassMltD2;)
1204 REGISTER_OBJ_MAKER(MltD2_as_B, ClassB* p = new(initialized) ClassMltD2;)
1205 REGISTER_OBJ_MAKER(MltD2_as_IA, InterfaceA* p = new(initialized) ClassMltD2;)
1206 REGISTER_OBJ_MAKER(MltD2_as_IB, InterfaceB* p = new(initialized) ClassMltD2;)
1208 // to mimic UnicodeString defined in third_party/icu,
1209 // which store a platform-independent-sized refcount in the first
1210 // few bytes and keeps a pointer pointing behind the refcount.
1211 REGISTER_OBJ_MAKER(unicode_string,
1212 char* p = new char[sizeof(uint32) * 10];
1213 p += sizeof(uint32);
1215 // similar, but for platform-dependent-sized refcount
1216 REGISTER_OBJ_MAKER(ref_counted,
1217 char* p = new char[sizeof(int) * 20];
1218 p += sizeof(int);
1221 struct Nesting {
1222 struct Inner {
1223 Nesting* parent;
1224 Inner(Nesting* p) : parent(p) {}
1226 Inner i0;
1227 char n1[5];
1228 Inner i1;
1229 char n2[11];
1230 Inner i2;
1231 char n3[27];
1232 Inner i3;
1233 Nesting() : i0(this), i1(this), i2(this), i3(this) {}
1236 // to test inside-object pointers pointing at objects nested into heap objects:
1237 REGISTER_OBJ_MAKER(nesting_i0, Nesting::Inner* p = &((new Nesting())->i0);)
1238 REGISTER_OBJ_MAKER(nesting_i1, Nesting::Inner* p = &((new Nesting())->i1);)
1239 REGISTER_OBJ_MAKER(nesting_i2, Nesting::Inner* p = &((new Nesting())->i2);)
1240 REGISTER_OBJ_MAKER(nesting_i3, Nesting::Inner* p = &((new Nesting())->i3);)
1242 // allocate many objects reachable from global data
1243 static void TestHeapLeakCheckerLiveness() {
1244 live_leak_mutable.ptr = new(initialized) char[77];
1245 live_leak_templ_mutable.ptr = new(initialized) Array<char>();
1246 live_leak_templ_mutable.val = Array<char>();
1248 TestObjMakers();
1251 // ========================================================================= //
1253 // Get address (PC value) following the mmap call into addr_after_mmap_call
1254 static void* Mmapper(uintptr_t* addr_after_mmap_call) {
1255 void* r = mmap(NULL, 100, PROT_READ|PROT_WRITE,
1256 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1257 // Get current PC value into addr_after_mmap_call
1258 void* stack[1];
1259 CHECK_EQ(GetStackTrace(stack, 1, 0), 1);
1260 *addr_after_mmap_call = reinterpret_cast<uintptr_t>(stack[0]);
1261 sleep(0); // undo -foptimize-sibling-calls
1262 return r;
1265 // to trick complier into preventing inlining
1266 static void* (*mmapper_addr)(uintptr_t* addr) = &Mmapper;
1268 // TODO(maxim): copy/move this to memory_region_map_unittest
1269 // TODO(maxim): expand this test to include mmap64, mremap and sbrk calls.
1270 static void VerifyMemoryRegionMapStackGet() {
1271 uintptr_t caller_addr_limit;
1272 void* addr = (*mmapper_addr)(&caller_addr_limit);
1273 uintptr_t caller = 0;
1274 { MemoryRegionMap::LockHolder l;
1275 for (MemoryRegionMap::RegionIterator
1276 i = MemoryRegionMap::BeginRegionLocked();
1277 i != MemoryRegionMap::EndRegionLocked(); ++i) {
1278 if (i->start_addr == reinterpret_cast<uintptr_t>(addr)) {
1279 CHECK_EQ(caller, 0);
1280 caller = i->caller();
1284 // caller must point into Mmapper function:
1285 if (!(reinterpret_cast<uintptr_t>(mmapper_addr) <= caller &&
1286 caller < caller_addr_limit)) {
1287 LOGF << std::hex << "0x" << caller
1288 << " does not seem to point into code of function Mmapper at "
1289 << "0x" << reinterpret_cast<uintptr_t>(mmapper_addr)
1290 << "! Stack frame collection must be off in MemoryRegionMap!";
1291 LOG(FATAL, "\n");
1293 munmap(addr, 100);
1296 static void* Mallocer(uintptr_t* addr_after_malloc_call) {
1297 void* r = malloc(100);
1298 sleep(0); // undo -foptimize-sibling-calls
1299 // Get current PC value into addr_after_malloc_call
1300 void* stack[1];
1301 CHECK_EQ(GetStackTrace(stack, 1, 0), 1);
1302 *addr_after_malloc_call = reinterpret_cast<uintptr_t>(stack[0]);
1303 return r;
1306 // to trick complier into preventing inlining
1307 static void* (*mallocer_addr)(uintptr_t* addr) = &Mallocer;
1309 // non-static for friendship with HeapProfiler
1310 // TODO(maxim): expand this test to include
1311 // realloc, calloc, memalign, valloc, pvalloc, new, and new[].
1312 extern void VerifyHeapProfileTableStackGet() {
1313 uintptr_t caller_addr_limit;
1314 void* addr = (*mallocer_addr)(&caller_addr_limit);
1315 uintptr_t caller =
1316 reinterpret_cast<uintptr_t>(HeapLeakChecker::GetAllocCaller(addr));
1317 // caller must point into Mallocer function:
1318 if (!(reinterpret_cast<uintptr_t>(mallocer_addr) <= caller &&
1319 caller < caller_addr_limit)) {
1320 LOGF << std::hex << "0x" << caller
1321 << " does not seem to point into code of function Mallocer at "
1322 << "0x" << reinterpret_cast<uintptr_t>(mallocer_addr)
1323 << "! Stack frame collection must be off in heap profiler!";
1324 LOG(FATAL, "\n");
1326 free(addr);
1329 // ========================================================================= //
1331 static void MakeALeak(void** arr) {
1332 PreventHeapReclaiming(10 * sizeof(int));
1333 void* a = new(initialized) int[10];
1334 Hide(&a);
1335 *arr = a;
1338 // Helper to do 'return 0;' inside main(): insted we do 'return Pass();'
1339 static int Pass() {
1340 fprintf(stdout, "PASS\n");
1341 g_have_exited_main = true;
1342 return 0;
1345 int main(int argc, char** argv) {
1346 run_hidden_ptr = DoRunHidden;
1347 wipe_stack_ptr = DoWipeStack;
1348 if (!HeapLeakChecker::IsActive()) {
1349 CHECK_EQ(FLAGS_heap_check, "");
1350 LOG(WARNING, "HeapLeakChecker got turned off; we won't test much...");
1351 } else {
1352 VerifyMemoryRegionMapStackGet();
1353 VerifyHeapProfileTableStackGet();
1356 KeyInit();
1358 // glibc 2.4, on x86_64 at least, has a lock-ordering bug, which
1359 // means deadlock is possible when one thread calls dl_open at the
1360 // same time another thread is calling dl_iterate_phdr. libunwind
1361 // calls dl_iterate_phdr, and TestLibCAllocate calls dl_open (or the
1362 // various syscalls in it do), at least the first time it's run.
1363 // To avoid the deadlock, we run TestLibCAllocate once before getting
1364 // multi-threaded.
1365 // TODO(csilvers): once libc is fixed, or libunwind can work around it,
1366 // get rid of this early call. We *want* our test to
1367 // find potential problems like this one!
1368 TestLibCAllocate();
1370 if (FLAGS_interfering_threads) {
1371 RunHeapBusyThreads(); // add interference early
1373 TestLibCAllocate();
1375 LOGF << "In main(): heap_check=" << FLAGS_heap_check << endl;
1377 CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good
1379 if (FLAGS_test_leak) {
1380 void* arr;
1381 RunHidden(NewCallback(MakeALeak, &arr));
1382 Use(&arr);
1383 LogHidden("Leaking", arr);
1384 if (FLAGS_test_cancel_global_check) {
1385 HeapLeakChecker::CancelGlobalCheck();
1386 } else {
1387 // Verify we can call NoGlobalLeaks repeatedly without deadlocking
1388 HeapLeakChecker::NoGlobalLeaks();
1389 HeapLeakChecker::NoGlobalLeaks();
1391 return Pass();
1392 // whole-program leak-check should (with very high probability)
1393 // catch the leak of arr (10 * sizeof(int) bytes)
1394 // (when !FLAGS_test_cancel_global_check)
1397 if (FLAGS_test_loop_leak) {
1398 void* arr1;
1399 void* arr2;
1400 RunHidden(NewCallback(MakeDeathLoop, &arr1, &arr2));
1401 Use(&arr1);
1402 Use(&arr2);
1403 LogHidden("Loop leaking", arr1);
1404 LogHidden("Loop leaking", arr2);
1405 if (FLAGS_test_cancel_global_check) {
1406 HeapLeakChecker::CancelGlobalCheck();
1407 } else {
1408 // Verify we can call NoGlobalLeaks repeatedly without deadlocking
1409 HeapLeakChecker::NoGlobalLeaks();
1410 HeapLeakChecker::NoGlobalLeaks();
1412 return Pass();
1413 // whole-program leak-check should (with very high probability)
1414 // catch the leak of arr1 and arr2 (4 * sizeof(void*) bytes)
1415 // (when !FLAGS_test_cancel_global_check)
1418 if (FLAGS_test_register_leak) {
1419 // make us fail only where the .sh test expects:
1420 Pause();
1421 for (int i = 0; i < 100; ++i) { // give it some time to crash
1422 CHECK(HeapLeakChecker::NoGlobalLeaks());
1423 Pause();
1425 return Pass();
1428 TestHeapLeakCheckerLiveness();
1430 HeapLeakChecker heap_check("all");
1432 TestHiddenPointer();
1434 TestHeapLeakChecker();
1435 Pause();
1436 TestLeakButTotalsMatch();
1437 Pause();
1439 TestHeapLeakCheckerDeathSimple();
1440 Pause();
1441 TestHeapLeakCheckerDeathLoop();
1442 Pause();
1443 TestHeapLeakCheckerDeathInverse();
1444 Pause();
1445 TestHeapLeakCheckerDeathNoLeaks();
1446 Pause();
1447 TestHeapLeakCheckerDeathCountLess();
1448 Pause();
1449 TestHeapLeakCheckerDeathCountMore();
1450 Pause();
1452 TestHeapLeakCheckerDeathTrick();
1453 Pause();
1455 CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good
1457 TestHeapLeakCheckerNoFalsePositives();
1458 Pause();
1460 TestHeapLeakCheckerDisabling();
1461 Pause();
1463 TestSTLAlloc();
1464 Pause();
1465 TestSTLAllocInverse();
1466 Pause();
1468 // Test that various STL allocators work. Some of these are redundant, but
1469 // we don't know how STL might change in the future. For example,
1470 // http://wiki/Main/StringNeStdString.
1471 #define DTSL(a) { DirectTestSTLAlloc(a, #a); \
1472 Pause(); }
1473 DTSL(std::allocator<char>());
1474 DTSL(std::allocator<int>());
1475 DTSL(std::string().get_allocator());
1476 DTSL(string().get_allocator());
1477 DTSL(vector<int>().get_allocator());
1478 DTSL(vector<double>().get_allocator());
1479 DTSL(vector<vector<int> >().get_allocator());
1480 DTSL(vector<string>().get_allocator());
1481 DTSL((map<string, string>().get_allocator()));
1482 DTSL((map<string, int>().get_allocator()));
1483 DTSL(set<char>().get_allocator());
1484 #undef DTSL
1486 TestLibCAllocate();
1487 Pause();
1489 CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good
1491 Pause();
1493 if (!FLAGS_maybe_stripped) {
1494 CHECK(heap_check.SameHeap());
1495 } else {
1496 WARN_IF(heap_check.SameHeap() != true,
1497 "overall leaks are caught; we must be using a stripped binary");
1500 CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good
1502 return Pass();