2 This file is part of Valgrind, a dynamic binary instrumentation
5 Copyright (C) 2008-2008 Google Inc
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2 of the
11 License, or (at your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
23 The GNU General Public License is contained in the file COPYING.
26 // Author: Konstantin Serebryany <opensource@google.com>
28 // This file contains a set of unit tests for a data race detection tool.
32 // This test can be compiled with pthreads (default) or
33 // with any other library that supports threads, locks, cond vars, etc.
35 // To compile with pthreads:
36 // g++ racecheck_unittest.cc dynamic_annotations.cc
37 // -lpthread -g -DDYNAMIC_ANNOTATIONS=1
39 // To compile with different library:
40 // 1. cp thread_wrappers_pthread.h thread_wrappers_yourlib.h
41 // 2. edit thread_wrappers_yourlib.h
42 // 3. add '-DTHREAD_WRAPPERS="thread_wrappers_yourlib.h"' to your compilation.
46 // This test must not include any other file specific to threading library,
47 // everything should be inside THREAD_WRAPPERS.
48 #ifndef THREAD_WRAPPERS
49 # define THREAD_WRAPPERS "thread_wrappers_pthread.h"
51 #include THREAD_WRAPPERS
53 #ifndef NEEDS_SEPERATE_RW_LOCK
54 #define RWLock Mutex // Mutex does work as an rw-lock.
55 #define WriterLockScoped MutexLock
56 #define ReaderLockScoped ReaderMutexLock
57 #endif // !NEEDS_SEPERATE_RW_LOCK
60 // Helgrind memory usage testing stuff
61 // If not present in dynamic_annotations.h/.cc - ignore
62 #ifndef ANNOTATE_RESET_STATS
63 #define ANNOTATE_RESET_STATS() do { } while(0)
65 #ifndef ANNOTATE_PRINT_STATS
66 #define ANNOTATE_PRINT_STATS() do { } while(0)
68 #ifndef ANNOTATE_PRINT_MEMORY_USAGE
69 #define ANNOTATE_PRINT_MEMORY_USAGE(a) do { } while(0)
73 // A function that allows to suppress gcc's warnings about
74 // unused return values in a portable way.
76 static inline void IGNORE_RETURN_VALUE(T v
)
84 #include <cstring> // strlen(), index(), rindex()
87 #include <sys/types.h>
90 #include <sys/mman.h> // mmap
92 #include <stdint.h> // uintptr_t
101 #include <strings.h> // index(), rindex()
105 // - Stability tests (marked STAB)
106 // - Performance tests (marked PERF)
108 // - TN (true negative) : no race exists and the tool is silent.
109 // - TP (true positive) : a race exists and reported.
110 // - FN (false negative): a race exists but not reported.
111 // - FP (false positive): no race exists but the tool reports it.
113 // The feature tests are marked according to the behavior of helgrind 3.3.0.
115 // TP and FP tests are annotated with ANNOTATE_EXPECT_RACE,
116 // so, no error reports should be seen when running under helgrind.
118 // When some of the FP cases are fixed in helgrind we'll need
119 // to update this test.
121 // Each test resides in its own namespace.
122 // Namespaces are named test01, test02, ...
123 // Please, *DO NOT* change the logic of existing tests nor rename them.
124 // Create a new test instead.
126 // Some tests use sleep()/usleep().
127 // This is not a synchronization, but a simple way to trigger
128 // some specific behaviour of the race detector's scheduler.
130 // Globals and utilities used by several tests. {{{1
135 typedef void (*void_func_void_t
)(void);
139 PERFORMANCE
= 1 << 2,
140 EXCLUDE_FROM_ALL
= 1 << 3,
141 NEEDS_ANNOTATIONS
= 1 << 4,
143 MEMORY_USAGE
= 1 << 6,
147 // Put everything into stderr.
149 #define printf(args...) \
152 fprintf(stderr, args);\
153 printf_mu.Unlock(); \
158 gettimeofday(&tv
, NULL
);
159 return (tv
.tv_sec
* 1000L) + (tv
.tv_usec
/ 1000L);
165 Test(void_func_void_t f
, int flags
)
169 Test() : f_(0), flags_(0) {}
171 ANNOTATE_RESET_STATS();
172 if (flags_
& PERFORMANCE
) {
173 long start
= GetTimeInMs();
175 long end
= GetTimeInMs();
176 printf ("Time: %4ldms\n", end
-start
);
179 if (flags_
& PRINT_STATS
)
180 ANNOTATE_PRINT_STATS();
181 if (flags_
& MEMORY_USAGE
)
182 ANNOTATE_PRINT_MEMORY_USAGE(0);
185 std::map
<int, Test
> TheMapOfTests
;
187 #define NOINLINE __attribute__ ((noinline))
188 extern "C" void NOINLINE
AnnotateSetVerbosity(const char *, int, int) {};
192 TestAdder(void_func_void_t f
, int id
, int flags
= FEATURE
) {
193 // AnnotateSetVerbosity(__FILE__, __LINE__, 0);
194 CHECK(TheMapOfTests
.count(id
) == 0);
195 TheMapOfTests
[id
] = Test(f
, flags
);
199 #define REGISTER_TEST(f, id) TestAdder add_test_##id (f, id);
200 #define REGISTER_TEST2(f, id, flags) TestAdder add_test_##id (f, id, flags);
202 static bool ArgIsOne(int *arg
) { return *arg
== 1; };
203 static bool ArgIsZero(int *arg
) { return *arg
== 0; };
204 static bool ArgIsTrue(bool *arg
) { return *arg
== true; };
206 // Call ANNOTATE_EXPECT_RACE only if 'machine' env variable is defined.
207 // Useful to test against several different machines.
208 // Supported machines so far:
209 // MSM_HYBRID1 -- aka MSMProp1
210 // MSM_HYBRID1_INIT_STATE -- aka MSMProp1 with --initialization-state=yes
211 // MSM_THREAD_SANITIZER -- ThreadSanitizer's state machine
212 #define ANNOTATE_EXPECT_RACE_FOR_MACHINE(mem, descr, machine) \
213 while(getenv(machine)) {\
214 ANNOTATE_EXPECT_RACE(mem, descr); \
218 #define ANNOTATE_EXPECT_RACE_FOR_TSAN(mem, descr) \
219 ANNOTATE_EXPECT_RACE_FOR_MACHINE(mem, descr, "MSM_THREAD_SANITIZER")
221 inline bool Tsan_PureHappensBefore() {
225 inline bool Tsan_FastMode() {
226 return getenv("TSAN_FAST_MODE") != NULL
;
229 // Initialize *(mem) to 0 if Tsan_FastMode.
230 #define FAST_MODE_INIT(mem) do { if (Tsan_FastMode()) { *(mem) = 0; } } while(0)
232 #ifndef MAIN_INIT_ACTION
233 #define MAIN_INIT_ACTION
238 int main(int argc
, char** argv
) { // {{{1
240 printf("FLAGS [phb=%i, fm=%i]\n", Tsan_PureHappensBefore(), Tsan_FastMode());
241 if (argc
== 2 && !strcmp(argv
[1], "benchmark")) {
242 for (std::map
<int,Test
>::iterator it
= TheMapOfTests
.begin();
243 it
!= TheMapOfTests
.end(); ++it
) {
244 if(!(it
->second
.flags_
& PERFORMANCE
)) continue;
247 } else if (argc
== 2 && !strcmp(argv
[1], "demo")) {
248 for (std::map
<int,Test
>::iterator it
= TheMapOfTests
.begin();
249 it
!= TheMapOfTests
.end(); ++it
) {
250 if(!(it
->second
.flags_
& RACE_DEMO
)) continue;
253 } else if (argc
> 1) {
254 // the tests are listed in command line flags
255 for (int i
= 1; i
< argc
; i
++) {
256 int f_num
= atoi(argv
[i
]);
257 CHECK(TheMapOfTests
.count(f_num
));
258 TheMapOfTests
[f_num
].Run();
261 bool run_tests_with_annotations
= false;
262 if (getenv("DRT_ALLOW_ANNOTATIONS")) {
263 run_tests_with_annotations
= true;
265 for (std::map
<int,Test
>::iterator it
= TheMapOfTests
.begin();
266 it
!= TheMapOfTests
.end();
268 if(it
->second
.flags_
& EXCLUDE_FROM_ALL
) continue;
269 if(it
->second
.flags_
& RACE_DEMO
) continue;
270 if((it
->second
.flags_
& NEEDS_ANNOTATIONS
)
271 && run_tests_with_annotations
== false) continue;
277 #ifdef THREAD_WRAPPERS_PTHREAD_H
281 // An array of threads. Create/start/join all elements at once. {{{1
282 class MyThreadArray
{
284 static const int kSize
= 5;
285 typedef void (*F
) (void);
286 MyThreadArray(F f1
, F f2
= NULL
, F f3
= NULL
, F f4
= NULL
, F f5
= NULL
) {
287 ar_
[0] = new MyThread(f1
);
288 ar_
[1] = f2
? new MyThread(f2
) : NULL
;
289 ar_
[2] = f3
? new MyThread(f3
) : NULL
;
290 ar_
[3] = f4
? new MyThread(f4
) : NULL
;
291 ar_
[4] = f5
? new MyThread(f5
) : NULL
;
294 for(int i
= 0; i
< kSize
; i
++) {
303 for(int i
= 0; i
< kSize
; i
++) {
311 for(int i
= 0; i
< kSize
; i
++) {
316 MyThread
*ar_
[kSize
];
325 printf("test00: negative\n");
326 printf("\tGLOB=%d\n", GLOB
);
328 REGISTER_TEST(Run
, 00)
329 } // namespace test00
332 // test01: TP. Simple race (write vs write). {{{1
342 const timespec delay
= { 0, 100 * 1000 * 1000 };
343 nanosleep(&delay
, 0);
348 FAST_MODE_INIT(&GLOB
);
349 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test01. TP.");
350 ANNOTATE_TRACE_MEMORY(&GLOB
);
351 printf("test01: positive\n");
353 const int tmp
= GLOB
;
354 printf("\tGLOB=%d\n", tmp
);
356 REGISTER_TEST(Run
, 1);
357 } // namespace test01
360 // test02: TN. Synchronization via CondVar. {{{1
363 // Two write accesses to GLOB are synchronized because
364 // the pair of CV.Signal() and CV.Wait() establish happens-before relation.
369 // 3. MU.Lock() a. write(GLOB)
372 // /--- d. CV.Signal()
373 // 4. while(COND) / e. MU.Unlock()
380 usleep(100000); // Make sure the waiter blocks.
393 pool
.Add(NewCallback(Waker
));
401 printf("test02: negative\n");
403 printf("\tGLOB=%d\n", GLOB
);
405 REGISTER_TEST(Run
, 2);
406 } // namespace test02
409 // test03: TN. Synchronization via LockWhen, signaller gets there first. {{{1
412 // Two write accesses to GLOB are synchronized via conditional critical section.
413 // Note that LockWhen() happens first (we use sleep(1) to make sure)!
421 // /--- d. MU.Unlock()
422 // 3. MU.LockWhen(COND==1) <---/
428 usleep(100000); // Make sure the waiter blocks.
432 COND
= 1; // We are done! Tell the Waiter.
433 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
439 pool
.Add(NewCallback(Waker
));
440 MU
.LockWhen(Condition(&ArgIsOne
, &COND
)); // calls ANNOTATE_CONDVAR_WAIT
441 MU
.Unlock(); // Waker is done!
446 printf("test03: negative\n");
448 printf("\tGLOB=%d\n", GLOB
);
450 REGISTER_TEST2(Run
, 3, FEATURE
|NEEDS_ANNOTATIONS
);
451 } // namespace test03
453 // test04: TN. Synchronization via PCQ. {{{1
456 ProducerConsumerQueue
Q(INT_MAX
);
457 // Two write accesses to GLOB are separated by PCQ Put/Get.
461 // 2. Q.Put() ---------\ .
462 // \-------> a. Q.Get()
477 printf("test04: negative\n");
478 MyThreadArray
t(Putter
, Getter
);
481 printf("\tGLOB=%d\n", GLOB
);
483 REGISTER_TEST(Run
, 4);
484 } // namespace test04
487 // test05: FP. Synchronization via CondVar, but waiter does not block. {{{1
488 // Since CondVar::Wait() is not called, we get a false positive.
491 // Two write accesses to GLOB are synchronized via CondVar.
492 // But race detector can not see it.
493 // See this for details:
494 // http://www.valgrind.org/docs/manual/hg-manual.html#hg-manual.effective-use.
499 // 3. MU.Lock() a. write(GLOB)
503 // 4. while(COND) e. MU.Unlock()
504 // CV.Wait(MU) <<< not called
521 pool
.Add(NewCallback(Waker
));
522 usleep(100000); // Make sure the signaller gets first.
530 FAST_MODE_INIT(&GLOB
);
531 if (!Tsan_PureHappensBefore())
532 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test05. FP. Unavoidable in hybrid scheme.");
533 printf("test05: unavoidable false positive\n");
535 printf("\tGLOB=%d\n", GLOB
);
537 REGISTER_TEST(Run
, 5);
538 } // namespace test05
541 // test06: TN. Synchronization via CondVar, but Waker gets there first. {{{1
544 // Same as test05 but we annotated the Wait() loop.
549 // 3. MU.Lock() a. write(GLOB)
552 // /------- d. CV.Signal()
553 // 4. while(COND) / e. MU.Unlock()
554 // CV.Wait(MU) <<< not called /
555 // 6. ANNOTATE_CONDVAR_WAIT(CV, MU) <----/
573 pool
.Add(NewCallback(Waker
));
574 usleep(100000); // Make sure the signaller gets first.
578 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
584 printf("test06: negative\n");
586 printf("\tGLOB=%d\n", GLOB
);
588 REGISTER_TEST2(Run
, 6, FEATURE
|NEEDS_ANNOTATIONS
);
589 } // namespace test06
592 // test07: TN. Synchronization via LockWhen(), Signaller is observed first. {{{1
596 // Two write accesses to GLOB are synchronized via conditional critical section.
597 // LockWhen() is observed after COND has been set (due to sleep).
598 // Unlock() calls ANNOTATE_CONDVAR_SIGNAL().
600 // Waiter: Signaller:
602 // 2. Start(Signaller)
606 // /--- d. MU.Unlock calls ANNOTATE_CONDVAR_SIGNAL
607 // 3. MU.LockWhen(COND==1) <---/
615 COND
= true; // We are done! Tell the Waiter.
616 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
620 MyThread
t(Signaller
);
622 usleep(100000); // Make sure the signaller gets there first.
624 MU
.LockWhen(Condition(&ArgIsTrue
, &COND
)); // calls ANNOTATE_CONDVAR_WAIT
625 MU
.Unlock(); // Signaller is done!
627 GLOB
= 2; // If LockWhen didn't catch the signal, a race may be reported here.
631 printf("test07: negative\n");
633 printf("\tGLOB=%d\n", GLOB
);
635 REGISTER_TEST2(Run
, 7, FEATURE
|NEEDS_ANNOTATIONS
);
636 } // namespace test07
638 // test08: TN. Synchronization via thread start/join. {{{1
641 // Three accesses to GLOB are separated by thread start/join.
645 // 2. Start(Worker) ------------>
647 // 3. Join(Worker) <------------
661 printf("test08: negative\n");
663 printf("\tGLOB=%d\n", GLOB
);
665 REGISTER_TEST(Run
, 8);
666 } // namespace test08
669 // test09: TP. Simple race (read vs write). {{{1
672 // A simple data race between writer and reader.
673 // Write happens after read (enforced by sleep).
674 // Usually, easily detectable by a race detector.
684 ANNOTATE_TRACE_MEMORY(&GLOB
);
685 FAST_MODE_INIT(&GLOB
);
686 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test09. TP.");
687 printf("test09: positive\n");
688 MyThreadArray
t(Writer
, Reader
);
691 printf("\tGLOB=%d\n", GLOB
);
693 REGISTER_TEST(Run
, 9);
694 } // namespace test09
697 // test10: FN. Simple race (write vs read). {{{1
700 // A simple data race between writer and reader.
701 // Write happens before Read (enforced by sleep),
702 // otherwise this test is the same as test09.
705 // 1. write(GLOB) a. sleep(long enough so that GLOB
706 // is most likely initialized by Writer)
710 // Eraser algorithm does not detect the race here,
711 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
722 FAST_MODE_INIT(&GLOB
);
723 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test10. TP. FN in MSMHelgrind.");
724 printf("test10: positive\n");
725 MyThreadArray
t(Writer
, Reader
);
728 printf("\tGLOB=%d\n", GLOB
);
730 REGISTER_TEST(Run
, 10);
731 } // namespace test10
734 // test11: FP. Synchronization via CondVar, 2 workers. {{{1
735 // This test is properly synchronized, but currently (Dec 2007)
736 // helgrind reports a false positive.
738 // Parent: Worker1, Worker2:
739 // 1. Start(workers) a. read(GLOB)
740 // 2. MU.Lock() b. MU.Lock()
741 // 3. while(COND != 2) /-------- c. CV.Signal()
742 // CV.Wait(&MU) <-------/ d. MU.Unlock()
762 MyThreadArray
t(Worker
, Worker
);
777 // ANNOTATE_EXPECT_RACE(&GLOB, "test11. FP. Fixed by MSMProp1.");
778 printf("test11: negative\n");
780 printf("\tGLOB=%d\n", GLOB
);
782 REGISTER_TEST(Run
, 11);
783 } // namespace test11
786 // test12: FP. Synchronization via Mutex, then via PCQ. {{{1
789 // This test is properly synchronized, but currently (Dec 2007)
790 // helgrind reports a false positive.
792 // First, we write to GLOB under MU, then we synchronize via PCQ,
793 // which is essentially a semaphore.
796 // 1. MU.Lock() a. MU.Lock()
797 // 2. write(GLOB) <---- MU ----> b. write(GLOB)
798 // 3. MU.Unlock() c. MU.Unlock()
799 // 4. Q.Put() ---------------> d. Q.Get()
802 ProducerConsumerQueue
Q(INT_MAX
);
823 // ANNOTATE_EXPECT_RACE(&GLOB, "test12. FP. Fixed by MSMProp1.");
824 printf("test12: negative\n");
825 MyThreadArray
t(Putter
, Getter
);
828 printf("\tGLOB=%d\n", GLOB
);
830 REGISTER_TEST(Run
, 12);
831 } // namespace test12
834 // test13: FP. Synchronization via Mutex, then via LockWhen. {{{1
837 // This test is essentially the same as test12, but uses LockWhen
841 // 1. MU.Lock() a. MU.Lock()
842 // 2. write(GLOB) <---------- MU ----------> b. write(GLOB)
843 // 3. MU.Unlock() c. MU.Unlock()
846 // 6. ANNOTATE_CONDVAR_SIGNAL -------\ .
847 // 7. MU.Unlock() \ .
848 // \----> d. MU.LockWhen(COND == 1)
860 ANNOTATE_CONDVAR_SIGNAL(&MU
);
869 MU
.LockWhen(Condition(&ArgIsOne
, &COND
));
875 // ANNOTATE_EXPECT_RACE(&GLOB, "test13. FP. Fixed by MSMProp1.");
876 printf("test13: negative\n");
879 MyThreadArray
t(Waker
, Waiter
);
883 printf("\tGLOB=%d\n", GLOB
);
885 REGISTER_TEST2(Run
, 13, FEATURE
|NEEDS_ANNOTATIONS
);
886 } // namespace test13
889 // test14: FP. Synchronization via PCQ, reads, 2 workers. {{{1
892 // This test is properly synchronized, but currently (Dec 2007)
893 // helgrind reports a false positive.
895 // This test is similar to test11, but uses PCQ (semaphore).
897 // Putter2: Putter1: Getter:
898 // 1. read(GLOB) a. read(GLOB)
899 // 2. Q2.Put() ----\ b. Q1.Put() -----\ .
900 // \ \--------> A. Q1.Get()
901 // \----------------------------------> B. Q2.Get()
903 ProducerConsumerQueue
Q1(INT_MAX
), Q2(INT_MAX
);
919 // ANNOTATE_EXPECT_RACE(&GLOB, "test14. FP. Fixed by MSMProp1.");
920 printf("test14: negative\n");
921 MyThreadArray
t(Getter
, Putter1
, Putter2
);
924 printf("\tGLOB=%d\n", GLOB
);
926 REGISTER_TEST(Run
, 14);
927 } // namespace test14
930 // test15: TN. Synchronization via LockWhen. One waker and 2 waiters. {{{1
932 // Waker: Waiter1, Waiter2:
936 // 4. ANNOTATE_CONDVAR_SIGNAL ------------> a. MU.LockWhen(COND == 1)
937 // 5. MU.Unlock() b. MU.Unlock()
948 ANNOTATE_CONDVAR_SIGNAL(&MU
);
953 MU
.LockWhen(Condition(&ArgIsOne
, &COND
));
961 printf("test15: negative\n");
962 MyThreadArray
t(Waker
, Waiter
, Waiter
);
965 printf("\tGLOB=%d\n", GLOB
);
967 REGISTER_TEST(Run
, 15);
968 } // namespace test15
971 // test16: FP. Barrier (emulated by CV), 2 threads. {{{1
974 // 1. MU.Lock() a. MU.Lock()
975 // 2. write(GLOB) <------------ MU ----------> b. write(GLOB)
976 // 3. MU.Unlock() c. MU.Unlock()
977 // 4. MU2.Lock() d. MU2.Lock()
978 // 5. COND-- e. COND--
979 // 6. ANNOTATE_CONDVAR_SIGNAL(MU2) ---->V .
980 // 7. MU2.Await(COND == 0) <------------+------ f. ANNOTATE_CONDVAR_SIGNAL(MU2)
981 // 8. MU2.Unlock() V-----> g. MU2.Await(COND == 0)
982 // 9. read(GLOB) h. MU2.Unlock()
986 // TODO: This way we may create too many edges in happens-before graph.
987 // Arndt Mühlenfeld in his PhD (TODO: link) suggests creating special nodes in
988 // happens-before graph to reduce the total number of edges.
1003 ANNOTATE_CONDVAR_SIGNAL(&MU2
);
1004 MU2
.Await(Condition(&ArgIsZero
, &COND
));
1011 // ANNOTATE_EXPECT_RACE(&GLOB, "test16. FP. Fixed by MSMProp1 + Barrier support.");
1013 printf("test16: negative\n");
1014 MyThreadArray
t(Worker
, Worker
);
1017 printf("\tGLOB=%d\n", GLOB
);
1019 REGISTER_TEST2(Run
, 16, FEATURE
|NEEDS_ANNOTATIONS
);
1020 } // namespace test16
1023 // test17: FP. Barrier (emulated by CV), 3 threads. {{{1
1025 // Same as test16, but with 3 threads.
1037 ANNOTATE_CONDVAR_SIGNAL(&MU2
);
1038 MU2
.Await(Condition(&ArgIsZero
, &COND
));
1045 // ANNOTATE_EXPECT_RACE(&GLOB, "test17. FP. Fixed by MSMProp1 + Barrier support.");
1047 printf("test17: negative\n");
1048 MyThreadArray
t(Worker
, Worker
, Worker
);
1051 printf("\tGLOB=%d\n", GLOB
);
1053 REGISTER_TEST2(Run
, 17, FEATURE
|NEEDS_ANNOTATIONS
);
1054 } // namespace test17
1057 // test18: TN. Synchronization via Await(), signaller gets there first. {{{1
1061 // Same as test03, but uses Mutex::Await() instead of Mutex::LockWhen().
1064 usleep(100000); // Make sure the waiter blocks.
1068 COND
= 1; // We are done! Tell the Waiter.
1069 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1073 pool
.StartWorkers();
1075 pool
.Add(NewCallback(Waker
));
1078 MU
.Await(Condition(&ArgIsOne
, &COND
)); // calls ANNOTATE_CONDVAR_WAIT
1079 MU
.Unlock(); // Waker is done!
1084 printf("test18: negative\n");
1086 printf("\tGLOB=%d\n", GLOB
);
1088 REGISTER_TEST2(Run
, 18, FEATURE
|NEEDS_ANNOTATIONS
);
1089 } // namespace test18
1091 // test19: TN. Synchronization via AwaitWithTimeout(). {{{1
1094 // Same as test18, but with AwaitWithTimeout. Do not timeout.
1097 usleep(100000); // Make sure the waiter blocks.
1101 COND
= 1; // We are done! Tell the Waiter.
1102 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1106 pool
.StartWorkers();
1108 pool
.Add(NewCallback(Waker
));
1111 CHECK(MU
.AwaitWithTimeout(Condition(&ArgIsOne
, &COND
), INT_MAX
));
1117 printf("test19: negative\n");
1119 printf("\tGLOB=%d\n", GLOB
);
1121 REGISTER_TEST2(Run
, 19, FEATURE
|NEEDS_ANNOTATIONS
);
1122 } // namespace test19
1124 // test20: TP. Incorrect synchronization via AwaitWhen(), timeout. {{{1
1128 // True race. We timeout in AwaitWhen.
1135 pool
.StartWorkers();
1137 pool
.Add(NewCallback(Waker
));
1140 CHECK(!MU
.AwaitWithTimeout(Condition(&ArgIsOne
, &COND
), 100));
1146 FAST_MODE_INIT(&GLOB
);
1147 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test20. TP.");
1148 printf("test20: positive\n");
1150 printf("\tGLOB=%d\n", GLOB
);
1152 REGISTER_TEST2(Run
, 20, FEATURE
|NEEDS_ANNOTATIONS
);
1153 } // namespace test20
1155 // test21: TP. Incorrect synchronization via LockWhenWithTimeout(). {{{1
1158 // True race. We timeout in LockWhenWithTimeout().
1166 pool
.StartWorkers();
1168 pool
.Add(NewCallback(Waker
));
1170 CHECK(!MU
.LockWhenWithTimeout(Condition(&ArgIsOne
, &COND
), 100));
1176 FAST_MODE_INIT(&GLOB
);
1177 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test21. TP.");
1178 printf("test21: positive\n");
1180 printf("\tGLOB=%d\n", GLOB
);
1182 REGISTER_TEST2(Run
, 21, FEATURE
|NEEDS_ANNOTATIONS
);
1183 } // namespace test21
1185 // test22: TP. Incorrect synchronization via CondVar::WaitWithTimeout(). {{{1
1189 // True race. We timeout in CondVar::WaitWithTimeout().
1196 pool
.StartWorkers();
1198 pool
.Add(NewCallback(Waker
));
1200 int64_t ms_left_to_wait
= 100;
1201 int64_t deadline_ms
= GetCurrentTimeMillis() + ms_left_to_wait
;
1203 while(COND
!= 1 && ms_left_to_wait
> 0) {
1204 CV
.WaitWithTimeout(&MU
, ms_left_to_wait
);
1205 ms_left_to_wait
= deadline_ms
- GetCurrentTimeMillis();
1212 FAST_MODE_INIT(&GLOB
);
1213 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test22. TP.");
1214 printf("test22: positive\n");
1216 printf("\tGLOB=%d\n", GLOB
);
1218 REGISTER_TEST(Run
, 22);
1219 } // namespace test22
1221 // test23: TN. TryLock, ReaderLock, ReaderTryLock. {{{1
1223 // Correct synchronization with TryLock, Lock, ReaderTryLock, ReaderLock.
1226 void Worker_TryLock() {
1227 for (int i
= 0; i
< 20; i
++) {
1239 void Worker_ReaderTryLock() {
1240 for (int i
= 0; i
< 20; i
++) {
1242 if (MU
.ReaderTryLock()) {
1252 void Worker_ReaderLock() {
1253 for (int i
= 0; i
< 20; i
++) {
1261 void Worker_Lock() {
1262 for (int i
= 0; i
< 20; i
++) {
1271 printf("test23: negative\n");
1272 MyThreadArray
t(Worker_TryLock
,
1273 Worker_ReaderTryLock
,
1279 printf("\tGLOB=%d\n", GLOB
);
1281 REGISTER_TEST(Run
, 23);
1282 } // namespace test23
1284 // test24: TN. Synchronization via ReaderLockWhen(). {{{1
1288 // Same as test03, but uses ReaderLockWhen().
1291 usleep(100000); // Make sure the waiter blocks.
1295 COND
= 1; // We are done! Tell the Waiter.
1296 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1300 pool
.StartWorkers();
1302 pool
.Add(NewCallback(Waker
));
1303 MU
.ReaderLockWhen(Condition(&ArgIsOne
, &COND
));
1309 printf("test24: negative\n");
1311 printf("\tGLOB=%d\n", GLOB
);
1313 REGISTER_TEST2(Run
, 24, FEATURE
|NEEDS_ANNOTATIONS
);
1314 } // namespace test24
1316 // test25: TN. Synchronization via ReaderLockWhenWithTimeout(). {{{1
1320 // Same as test24, but uses ReaderLockWhenWithTimeout().
1321 // We do not timeout.
1324 usleep(100000); // Make sure the waiter blocks.
1328 COND
= 1; // We are done! Tell the Waiter.
1329 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1333 pool
.StartWorkers();
1335 pool
.Add(NewCallback(Waker
));
1336 CHECK(MU
.ReaderLockWhenWithTimeout(Condition(&ArgIsOne
, &COND
), INT_MAX
));
1342 printf("test25: negative\n");
1344 printf("\tGLOB=%d\n", GLOB
);
1346 REGISTER_TEST2(Run
, 25, FEATURE
|NEEDS_ANNOTATIONS
);
1347 } // namespace test25
1349 // test26: TP. Incorrect synchronization via ReaderLockWhenWithTimeout(). {{{1
1353 // Same as test25, but we timeout and incorrectly assume happens-before.
1361 pool
.StartWorkers();
1363 pool
.Add(NewCallback(Waker
));
1364 CHECK(!MU
.ReaderLockWhenWithTimeout(Condition(&ArgIsOne
, &COND
), 100));
1370 FAST_MODE_INIT(&GLOB
);
1371 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test26. TP");
1372 printf("test26: positive\n");
1374 printf("\tGLOB=%d\n", GLOB
);
1376 REGISTER_TEST2(Run
, 26, FEATURE
|NEEDS_ANNOTATIONS
);
1377 } // namespace test26
1380 // test27: TN. Simple synchronization via SpinLock. {{{1
1393 printf("test27: negative\n");
1394 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
1397 printf("\tGLOB=%d\n", GLOB
);
1399 REGISTER_TEST2(Run
, 27, FEATURE
|NEEDS_ANNOTATIONS
);
1400 #endif // NO_SPINLOCK
1401 } // namespace test27
1404 // test28: TN. Synchronization via Mutex, then PCQ. 3 threads {{{1
1406 // Putter1: Getter: Putter2:
1407 // 1. MU.Lock() A. MU.Lock()
1408 // 2. write(GLOB) B. write(GLOB)
1409 // 3. MU.Unlock() C. MU.Unlock()
1410 // 4. Q.Put() ---------\ /------- D. Q.Put()
1411 // 5. MU.Lock() \-------> a. Q.Get() / E. MU.Lock()
1412 // 6. read(GLOB) b. Q.Get() <---------/ F. read(GLOB)
1413 // 7. MU.Unlock() (sleep) G. MU.Unlock()
1415 ProducerConsumerQueue
Q(INT_MAX
);
1439 printf("test28: negative\n");
1440 MyThreadArray
t(Getter
, Putter
, Putter
);
1443 printf("\tGLOB=%d\n", GLOB
);
1445 REGISTER_TEST(Run
, 28);
1446 } // namespace test28
1449 // test29: TN. Synchronization via Mutex, then PCQ. 4 threads. {{{1
1451 // Similar to test28, but has two Getters and two PCQs.
1452 ProducerConsumerQueue
*Q1
, *Q2
;
1456 void Putter(ProducerConsumerQueue
*q
) {
1470 void Putter1() { Putter(Q1
); }
1471 void Putter2() { Putter(Q2
); }
1478 usleep(48000); // TODO: remove this when FP in test32 is fixed.
1482 printf("test29: negative\n");
1483 Q1
= new ProducerConsumerQueue(INT_MAX
);
1484 Q2
= new ProducerConsumerQueue(INT_MAX
);
1485 MyThreadArray
t(Getter
, Getter
, Putter1
, Putter2
);
1488 printf("\tGLOB=%d\n", GLOB
);
1492 REGISTER_TEST(Run
, 29);
1493 } // namespace test29
1496 // test30: TN. Synchronization via 'safe' race. Writer vs multiple Readers. {{{1
1498 // This test shows a very risky kind of synchronization which is very easy
1499 // to get wrong. Actually, I am not sure I've got it right.
1501 // Writer: Reader1, Reader2, ..., ReaderN:
1502 // 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY
1503 // 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n)
1504 // 3. BOUNDARY++; c. read(GLOB[i]: i < n)
1506 // Here we have a 'safe' race on accesses to BOUNDARY and
1507 // no actual races on accesses to GLOB[]:
1508 // Writer writes to GLOB[i] where i>=BOUNDARY and then increments BOUNDARY.
1509 // Readers read BOUNDARY and read GLOB[i] where i<BOUNDARY.
1511 // I am not completely sure that this scheme guaranties no race between
1512 // accesses to GLOB since compilers and CPUs
1513 // are free to rearrange memory operations.
1514 // I am actually sure that this scheme is wrong unless we use
1515 // some smart memory fencing...
1520 volatile int BOUNDARY
= 0;
1523 for (int i
= 0; i
< N
; i
++) {
1524 CHECK(BOUNDARY
== i
);
1525 for (int j
= i
; j
< N
; j
++) {
1528 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY
+1));
1538 if (n
== 0) continue;
1539 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n
));
1540 for (int i
= 0; i
< n
; i
++) {
1541 CHECK(GLOB
[i
] == i
);
1548 FAST_MODE_INIT(&BOUNDARY
);
1549 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY
), "test30. Sync via 'safe' race.");
1550 printf("test30: negative\n");
1551 MyThreadArray
t(Writer
, Reader
, Reader
, Reader
);
1554 printf("\tGLOB=%d\n", GLOB
[N
-1]);
1556 REGISTER_TEST2(Run
, 30, FEATURE
|NEEDS_ANNOTATIONS
);
1557 } // namespace test30
1560 // test31: TN. Synchronization via 'safe' race. Writer vs Writer. {{{1
1562 // This test is similar to test30, but
1563 // it has one Writer instead of mulitple Readers.
1566 // 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY
1567 // 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n)
1568 // 3. BOUNDARY++; c. write(GLOB[i]: i < n)
1573 volatile int BOUNDARY
= 0;
1576 for (int i
= 0; i
< N
; i
++) {
1577 CHECK(BOUNDARY
== i
);
1578 for (int j
= i
; j
< N
; j
++) {
1581 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY
+1));
1591 if (n
== 0) continue;
1592 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n
));
1593 for (int i
= 0; i
< n
; i
++) {
1603 FAST_MODE_INIT(&BOUNDARY
);
1604 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY
), "test31. Sync via 'safe' race.");
1605 printf("test31: negative\n");
1606 MyThreadArray
t(Writer1
, Writer2
);
1609 printf("\tGLOB=%d\n", GLOB
[N
-1]);
1611 REGISTER_TEST2(Run
, 31, FEATURE
|NEEDS_ANNOTATIONS
);
1612 } // namespace test31
1615 // test32: FP. Synchronization via thread create/join. W/R. {{{1
1617 // This test is well synchronized but helgrind 3.3.0 reports a race.
1619 // Parent: Writer: Reader:
1620 // 1. Start(Reader) -----------------------\ .
1622 // 2. Start(Writer) ---\ \ .
1623 // \---> a. MU.Lock() \--> A. sleep(long enough)
1625 // /---- c. MU.Unlock()
1626 // 3. Join(Writer) <---/
1629 // /------------ D. MU.Unlock()
1630 // 4. Join(Reader) <----------------/
1634 // The call to sleep() in Reader is not part of synchronization,
1635 // it is required to trigger the false positive in helgrind 3.3.0.
1659 w
.Join(); // 'w' joins first.
1666 // ANNOTATE_EXPECT_RACE(&GLOB, "test32. FP. Fixed by MSMProp1.");
1667 printf("test32: negative\n");
1669 printf("\tGLOB=%d\n", GLOB
);
1672 REGISTER_TEST(Run
, 32);
1673 } // namespace test32
1676 // test33: STAB. Stress test for the number of thread sets (TSETs). {{{1
1679 // Here we access N memory locations from within log(N) threads.
1680 // We do it in such a way that helgrind creates nearly all possible TSETs.
1681 // Then we join all threads and start again (N_iter times).
1682 const int N_iter
= 48;
1683 const int Nlog
= 15;
1684 const int N
= 1 << Nlog
;
1694 for (int i
= 0; i
< N
; i
++) {
1695 // ARR[i] is accessed by threads from i-th subset
1703 printf("test33:\n");
1705 std::vector
<MyThread
*> vec(Nlog
);
1707 for (int j
= 0; j
< N_iter
; j
++) {
1708 // Create and start Nlog threads
1709 for (int i
= 0; i
< Nlog
; i
++) {
1710 vec
[i
] = new MyThread(Worker
);
1712 for (int i
= 0; i
< Nlog
; i
++) {
1715 // Join all threads.
1716 for (int i
= 0; i
< Nlog
; i
++) {
1720 printf("------------------\n");
1723 printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
1724 GLOB
, ARR
[1], ARR
[7], ARR
[N
-1]);
1726 REGISTER_TEST2(Run
, 33, STABILITY
|EXCLUDE_FROM_ALL
);
1727 } // namespace test33
1730 // test34: STAB. Stress test for the number of locks sets (LSETs). {{{1
1732 // Similar to test33, but for lock sets.
1734 const int N_iter
= 48;
1735 const int Nlog
= 10;
1736 const int N
= 1 << Nlog
;
1738 static Mutex
*MUs
[Nlog
];
1741 for (int i
= 0; i
< N
; i
++) {
1742 // ARR[i] is protected by MUs from i-th subset of all MUs
1743 for (int j
= 0; j
< Nlog
; j
++) if (i
& (1 << j
)) MUs
[j
]->Lock();
1745 for (int j
= 0; j
< Nlog
; j
++) if (i
& (1 << j
)) MUs
[j
]->Unlock();
1750 printf("test34:\n");
1751 for (int iter
= 0; iter
< N_iter
; iter
++) {
1752 for (int i
= 0; i
< Nlog
; i
++) {
1755 MyThreadArray
t(Worker
, Worker
);
1758 for (int i
= 0; i
< Nlog
; i
++) {
1761 printf("------------------\n");
1763 printf("\tGLOB=%d\n", GLOB
);
1765 REGISTER_TEST2(Run
, 34, STABILITY
|EXCLUDE_FROM_ALL
);
1766 } // namespace test34
1769 // test35: PERF. Lots of mutexes and lots of call to free(). {{{1
1771 // Helgrind 3.3.0 has very slow in shadow_mem_make_NoAccess(). Fixed locally.
1772 // With the fix helgrind runs this test about a minute.
1773 // Without the fix -- about 5 minutes. (on c2d 2.4GHz).
1775 // TODO: need to figure out the best way for performance testing.
1777 const int N_mu
= 25000;
1778 const int N_free
= 48000;
1781 for (int i
= 0; i
< N_free
; i
++)
1782 CHECK(777 == *ARR
[i
]);
1786 printf("test35:\n");
1787 std::vector
<Mutex
*> mus
;
1789 ARR
= new int *[N_free
];
1790 for (int i
= 0; i
< N_free
; i
++) {
1791 const int c
= N_free
/ N_mu
;
1793 mus
.push_back(new Mutex
);
1795 mus
.back()->Unlock();
1797 ARR
[i
] = new int(777);
1800 // Need to put all ARR[i] into shared state in order
1801 // to trigger the performance bug.
1802 MyThreadArray
t(Worker
, Worker
);
1806 for (int i
= 0; i
< N_free
; i
++) delete ARR
[i
];
1809 for (size_t i
= 0; i
< mus
.size(); i
++) {
1813 REGISTER_TEST2(Run
, 35, PERFORMANCE
|EXCLUDE_FROM_ALL
);
1814 } // namespace test35
1817 // test36: TN. Synchronization via Mutex, then PCQ. 3 threads. W/W {{{1
1819 // variation of test28 (W/W instead of W/R)
1821 // Putter1: Getter: Putter2:
1822 // 1. MU.Lock(); A. MU.Lock()
1823 // 2. write(GLOB) B. write(GLOB)
1824 // 3. MU.Unlock() C. MU.Unlock()
1825 // 4. Q.Put() ---------\ /------- D. Q.Put()
1826 // 5. MU1.Lock() \-------> a. Q.Get() / E. MU1.Lock()
1827 // 6. MU.Lock() b. Q.Get() <---------/ F. MU.Lock()
1828 // 7. write(GLOB) G. write(GLOB)
1829 // 8. MU.Unlock() H. MU.Unlock()
1830 // 9. MU1.Unlock() (sleep) I. MU1.Unlock()
1834 ProducerConsumerQueue
Q(INT_MAX
);
1862 printf("test36: negative \n");
1863 MyThreadArray
t(Getter
, Putter
, Putter
);
1866 printf("\tGLOB=%d\n", GLOB
);
1868 REGISTER_TEST(Run
, 36);
1869 } // namespace test36
1872 // test37: TN. Simple synchronization (write vs read). {{{1
1876 // Similar to test10, but properly locked.
1893 CHECK(GLOB
!= -777);
1898 printf("test37: negative\n");
1899 MyThreadArray
t(Writer
, Reader
);
1902 printf("\tGLOB=%d\n", GLOB
);
1904 REGISTER_TEST(Run
, 37);
1905 } // namespace test37
1908 // test38: TN. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1
1910 // Fusion of test29 and test36.
1912 // Putter1: Putter2: Getter1: Getter2:
1913 // MU1.Lock() MU1.Lock()
1914 // write(GLOB) write(GLOB)
1915 // MU1.Unlock() MU1.Unlock()
1916 // Q1.Put() Q2.Put()
1917 // Q1.Put() Q2.Put()
1918 // MU1.Lock() MU1.Lock()
1919 // MU2.Lock() MU2.Lock()
1920 // write(GLOB) write(GLOB)
1921 // MU2.Unlock() MU2.Unlock()
1922 // MU1.Unlock() MU1.Unlock() sleep sleep
1923 // Q1.Get() Q1.Get()
1924 // Q2.Get() Q2.Get()
1925 // MU2.Lock() MU2.Lock()
1926 // write(GLOB) write(GLOB)
1927 // MU2.Unlock() MU2.Unlock()
1931 ProducerConsumerQueue
*Q1
, *Q2
;
1935 void Putter(ProducerConsumerQueue
*q
) {
1951 void Putter1() { Putter(Q1
); }
1952 void Putter2() { Putter(Q2
); }
1963 usleep(48000); // TODO: remove this when FP in test32 is fixed.
1967 printf("test38: negative\n");
1968 Q1
= new ProducerConsumerQueue(INT_MAX
);
1969 Q2
= new ProducerConsumerQueue(INT_MAX
);
1970 MyThreadArray
t(Getter
, Getter
, Putter1
, Putter2
);
1973 printf("\tGLOB=%d\n", GLOB
);
1977 REGISTER_TEST(Run
, 38);
1978 } // namespace test38
1980 // test39: FP. Barrier. {{{1
1983 // Same as test17 but uses Barrier class (pthread_barrier_t).
1985 const int N_threads
= 3;
1986 Barrier
barrier(N_threads
);
1994 CHECK(GLOB
== N_threads
);
1997 ANNOTATE_TRACE_MEMORY(&GLOB
);
1998 // ANNOTATE_EXPECT_RACE(&GLOB, "test39. FP. Fixed by MSMProp1. Barrier.");
1999 printf("test39: negative\n");
2001 ThreadPool
pool(N_threads
);
2002 pool
.StartWorkers();
2003 for (int i
= 0; i
< N_threads
; i
++) {
2004 pool
.Add(NewCallback(Worker
));
2006 } // all folks are joined here.
2007 printf("\tGLOB=%d\n", GLOB
);
2009 REGISTER_TEST(Run
, 39);
2010 #endif // NO_BARRIER
2011 } // namespace test39
2014 // test40: FP. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1
2016 // Similar to test38 but with different order of events (due to sleep).
2018 // Putter1: Putter2: Getter1: Getter2:
2019 // MU1.Lock() MU1.Lock()
2020 // write(GLOB) write(GLOB)
2021 // MU1.Unlock() MU1.Unlock()
2022 // Q1.Put() Q2.Put()
2023 // Q1.Put() Q2.Put()
2024 // Q1.Get() Q1.Get()
2025 // Q2.Get() Q2.Get()
2026 // MU2.Lock() MU2.Lock()
2027 // write(GLOB) write(GLOB)
2028 // MU2.Unlock() MU2.Unlock()
2030 // MU1.Lock() MU1.Lock()
2031 // MU2.Lock() MU2.Lock()
2032 // write(GLOB) write(GLOB)
2033 // MU2.Unlock() MU2.Unlock()
2034 // MU1.Unlock() MU1.Unlock()
2037 ProducerConsumerQueue
*Q1
, *Q2
;
2041 void Putter(ProducerConsumerQueue
*q
) {
2058 void Putter1() { Putter(Q1
); }
2059 void Putter2() { Putter(Q2
); }
2069 usleep(48000); // TODO: remove this when FP in test32 is fixed.
2073 // ANNOTATE_EXPECT_RACE(&GLOB, "test40. FP. Fixed by MSMProp1. Complex Stuff.");
2074 printf("test40: negative\n");
2075 Q1
= new ProducerConsumerQueue(INT_MAX
);
2076 Q2
= new ProducerConsumerQueue(INT_MAX
);
2077 MyThreadArray
t(Getter
, Getter
, Putter1
, Putter2
);
2080 printf("\tGLOB=%d\n", GLOB
);
2084 REGISTER_TEST(Run
, 40);
2085 } // namespace test40
2087 // test41: TN. Test for race that appears when loading a dynamic symbol. {{{1
2090 ANNOTATE_NO_OP(NULL
); // An empty function, loaded from dll.
2093 printf("test41: negative\n");
2094 MyThreadArray
t(Worker
, Worker
, Worker
);
2098 REGISTER_TEST2(Run
, 41, FEATURE
|NEEDS_ANNOTATIONS
);
2099 } // namespace test41
2102 // test42: TN. Using the same cond var several times. {{{1
2120 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
2132 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
2145 // ANNOTATE_EXPECT_RACE(&GLOB, "test42. TN. debugging.");
2146 printf("test42: negative\n");
2147 MyThreadArray
t(Worker1
, Worker2
);
2150 printf("\tGLOB=%d\n", GLOB
);
2152 REGISTER_TEST2(Run
, 42, FEATURE
|NEEDS_ANNOTATIONS
);
2153 } // namespace test42
2163 // 3. read \--> a. Q.Get()
2166 ProducerConsumerQueue
Q(INT_MAX
);
2178 printf("test43: negative\n");
2179 MyThreadArray
t(Putter
, Getter
);
2182 printf("\tGLOB=%d\n", GLOB
);
2184 REGISTER_TEST(Run
, 43)
2185 } // namespace test43
2194 // 3. MU.Lock() \--> a. Q.Get()
2202 ProducerConsumerQueue
Q(INT_MAX
);
2218 // ANNOTATE_EXPECT_RACE(&GLOB, "test44. FP. Fixed by MSMProp1.");
2219 printf("test44: negative\n");
2220 MyThreadArray
t(Putter
, Getter
);
2223 printf("\tGLOB=%d\n", GLOB
);
2225 REGISTER_TEST(Run
, 44)
2226 } // namespace test44
2235 // 3. MU.Lock() \--> a. Q.Get()
2243 ProducerConsumerQueue
Q(INT_MAX
);
2259 printf("test45: negative\n");
2260 MyThreadArray
t(Putter
, Getter
);
2263 printf("\tGLOB=%d\n", GLOB
);
2265 REGISTER_TEST(Run
, 45)
2266 } // namespace test45
2276 // 4. MU.Unlock() (sleep)
2295 // If we move it to Run() we will get report in MSMHelgrind
2296 // due to its false positive (test32).
2298 printf("\tGLOB=%d\n", GLOB
);
2302 ANNOTATE_TRACE_MEMORY(&GLOB
);
2303 MyThreadArray
t(First
, Second
);
2307 REGISTER_TEST(Run
, 46)
2308 } // namespace test46
2311 // test47: TP. Not detected by pure happens-before detectors. {{{1
2313 // A true race that can not be detected by a pure happens-before
2319 // 3. MU.Unlock() (sleep)
2337 FAST_MODE_INIT(&GLOB
);
2338 if (!Tsan_PureHappensBefore())
2339 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test47. TP. Not detected by pure HB.");
2340 printf("test47: positive\n");
2341 MyThreadArray
t(First
, Second
);
2344 printf("\tGLOB=%d\n", GLOB
);
2346 REGISTER_TEST(Run
, 47)
2347 } // namespace test47
2350 // test48: FN. Simple race (single write vs multiple reads). {{{1
2353 // same as test10 but with single writer and multiple readers
2354 // A simple data race between single writer and multiple readers.
2355 // Write happens before Reads (enforced by sleep(1)),
2359 // 1. write(GLOB) a. sleep(long enough so that GLOB
2360 // is most likely initialized by Writer)
2364 // Eraser algorithm does not detect the race here,
2365 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
2372 CHECK(GLOB
!= -777);
2376 FAST_MODE_INIT(&GLOB
);
2377 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test48. TP. FN in MSMHelgrind.");
2378 printf("test48: positive\n");
2379 MyThreadArray
t(Writer
, Reader
,Reader
,Reader
);
2382 printf("\tGLOB=%d\n", GLOB
);
2384 REGISTER_TEST(Run
, 48)
2385 } // namespace test48
2388 // test49: FN. Simple race (single write vs multiple reads). {{{1
2391 // same as test10 but with multiple read operations done by a single reader
2392 // A simple data race between writer and readers.
2393 // Write happens before Read (enforced by sleep(1)),
2396 // 1. write(GLOB) a. sleep(long enough so that GLOB
2397 // is most likely initialized by Writer)
2404 // Eraser algorithm does not detect the race here,
2405 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
2412 CHECK(GLOB
!= -777);
2413 CHECK(GLOB
!= -777);
2414 CHECK(GLOB
!= -777);
2415 CHECK(GLOB
!= -777);
2419 FAST_MODE_INIT(&GLOB
);
2420 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test49. TP. FN in MSMHelgrind.");
2421 printf("test49: positive\n");
2422 MyThreadArray
t(Writer
, Reader
);
2425 printf("\tGLOB=%d\n", GLOB
);
2427 REGISTER_TEST(Run
, 49);
2428 } // namespace test49
2431 // test50: TP. Synchronization via CondVar. {{{1
2435 // Two last write accesses to GLOB are not synchronized
2440 // 3. MU.Lock() a. write(GLOB)
2443 // /--- d. CV.Signal()
2444 // 4. while(COND != 1) / e. MU.Unlock()
2445 // CV.Wait(MU) <---/
2447 // 6. write(GLOB) f. MU.Lock()
2453 usleep(100000); // Make sure the waiter blocks.
2470 pool
.StartWorkers();
2472 pool
.Add(NewCallback(Waker
));
2477 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
2483 FAST_MODE_INIT(&GLOB
);
2484 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test50. TP.");
2485 printf("test50: positive\n");
2487 printf("\tGLOB=%d\n", GLOB
);
2489 REGISTER_TEST2(Run
, 50, FEATURE
|NEEDS_ANNOTATIONS
);
2490 } // namespace test50
2493 // test51: TP. Synchronization via CondVar: problem with several signals. {{{1
2500 // scheduler dependent results because of several signals
2501 // second signal will be lost
2508 // 4. MU.Unlock() \ .
2509 // 5. write(GLOB) \ a. write(GLOB)
2512 // \--- d. CV.Signal()
2519 // LOST<---- i. CV.Signal()
2524 usleep(10000); // Make sure the waiter blocks.
2533 usleep(10000); // Make sure the waiter is signalled.
2539 CV
.Signal(); //Lost Signal
2546 pool
.StartWorkers();
2547 pool
.Add(NewCallback(Waker
));
2558 FAST_MODE_INIT(&GLOB
);
2559 ANNOTATE_EXPECT_RACE(&GLOB
, "test51. TP.");
2560 printf("test51: positive\n");
2562 printf("\tGLOB=%d\n", GLOB
);
2564 REGISTER_TEST(Run
, 51);
2565 } // namespace test51
2568 // test52: TP. Synchronization via CondVar: problem with several signals. {{{1
2574 // same as test51 but the first signal will be lost
2575 // scheduler dependent results because of several signals
2582 // LOST<---- d. CV.Signal()
2588 // 4. MU.Unlock() \ f. write(GLOB)
2589 // 5. write(GLOB) \ .
2592 // \--- i. CV.Signal()
2601 CV
.Signal(); //lost signal
2604 usleep(20000); // Make sure the waiter blocks
2616 pool
.StartWorkers();
2617 pool
.Add(NewCallback(Waker
));
2619 usleep(10000); // Make sure the first signal will be lost
2629 FAST_MODE_INIT(&GLOB
);
2630 ANNOTATE_EXPECT_RACE(&GLOB
, "test52. TP.");
2631 printf("test52: positive\n");
2633 printf("\tGLOB=%d\n", GLOB
);
2635 REGISTER_TEST(Run
, 52);
2636 } // namespace test52
2639 // test53: FP. Synchronization via implicit semaphore. {{{1
2641 // Correctly synchronized test, but the common lockset is empty.
2642 // The variable FLAG works as an implicit semaphore.
2643 // MSMHelgrind still does not complain since it does not maintain the lockset
2644 // at the exclusive state. But MSMProp1 does complain.
2648 // Initializer: Users
2656 // d. if (!f) goto a.
2666 void Initializer() {
2671 usleep(100000); // just in case
2682 // at this point Initializer will not access GLOB again
2684 CHECK(GLOB
>= 1000);
2690 FAST_MODE_INIT(&GLOB
);
2691 if (!Tsan_PureHappensBefore())
2692 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test53. FP. Implicit semaphore");
2693 printf("test53: FP. false positive, Implicit semaphore\n");
2694 MyThreadArray
t(Initializer
, User
, User
);
2697 printf("\tGLOB=%d\n", GLOB
);
2699 REGISTER_TEST(Run
, 53)
2700 } // namespace test53
2703 // test54: TN. Synchronization via implicit semaphore. Annotated {{{1
2705 // Same as test53, but annotated.
2710 void Initializer() {
2714 ANNOTATE_CONDVAR_SIGNAL(&GLOB
);
2716 usleep(100000); // just in case
2727 // at this point Initializer will not access GLOB again
2728 ANNOTATE_CONDVAR_WAIT(&GLOB
);
2730 CHECK(GLOB
>= 1000);
2736 printf("test54: negative\n");
2737 MyThreadArray
t(Initializer
, User
, User
);
2740 printf("\tGLOB=%d\n", GLOB
);
2742 REGISTER_TEST2(Run
, 54, FEATURE
|NEEDS_ANNOTATIONS
)
2743 } // namespace test54
2746 // test55: FP. Synchronization with TryLock. Not easy for race detectors {{{1
2748 // "Correct" synchronization with TryLock and Lock.
2750 // This scheme is actually very risky.
2751 // It is covered in detail in this video:
2752 // http://youtube.com/watch?v=mrvAqvtWYb4 (slide 36, near 50-th minute).
2756 void Worker_Lock() {
2761 void Worker_TryLock() {
2763 if (!MU
.TryLock()) {
2775 printf("test55:\n");
2776 MyThreadArray
t(Worker_Lock
, Worker_TryLock
);
2779 printf("\tGLOB=%d\n", GLOB
);
2781 REGISTER_TEST2(Run
, 55, FEATURE
|EXCLUDE_FROM_ALL
);
2782 } // namespace test55
2786 // test56: TP. Use of ANNOTATE_BENIGN_RACE. {{{1
2788 // For whatever reason the user wants to treat
2789 // a race on GLOB as a benign race.
2798 ANNOTATE_BENIGN_RACE(&GLOB
, "test56. Use of ANNOTATE_BENIGN_RACE.");
2799 ANNOTATE_BENIGN_RACE(&GLOB2
, "No race. The tool should be silent");
2800 printf("test56: positive\n");
2801 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
2804 printf("\tGLOB=%d\n", GLOB
);
2806 REGISTER_TEST2(Run
, 56, FEATURE
|NEEDS_ANNOTATIONS
)
2807 } // namespace test56
2810 // test57: TN: Correct use of atomics. {{{1
2814 for (int i
= 0; i
< 10; i
++) {
2815 AtomicIncrement(&GLOB
, 1);
2820 while (GLOB
< 20) usleep(1000);
2823 printf("test57: negative\n");
2824 MyThreadArray
t(Writer
, Writer
, Reader
, Reader
);
2828 printf("\tGLOB=%d\n", GLOB
);
2830 REGISTER_TEST(Run
, 57)
2831 } // namespace test57
2834 // test58: TN. User defined synchronization. {{{1
2841 // Correctly synchronized test, but the common lockset is empty.
2842 // The variables FLAG1 and FLAG2 used for synchronization and as
2843 // temporary variables for swapping two global values.
2844 // Such kind of synchronization is rarely used (Excluded from all tests??).
2863 printf("test58:\n");
2864 MyThreadArray
t(Worker1
, Worker2
);
2867 printf("\tGLOB1=%d\n", GLOB1
);
2868 printf("\tGLOB2=%d\n", GLOB2
);
2870 REGISTER_TEST2(Run
, 58, FEATURE
|EXCLUDE_FROM_ALL
)
2871 } // namespace test58
2875 // test59: TN. User defined synchronization. Annotated {{{1
2883 // same as test 58 but annotated
2887 ANNOTATE_CONDVAR_SIGNAL(&COND2
);
2888 while(!FLAG2
) usleep(1);
2889 ANNOTATE_CONDVAR_WAIT(&COND1
);
2895 ANNOTATE_CONDVAR_SIGNAL(&COND1
);
2896 while(!FLAG1
) usleep(1);
2897 ANNOTATE_CONDVAR_WAIT(&COND2
);
2902 printf("test59: negative\n");
2903 ANNOTATE_BENIGN_RACE(&FLAG1
, "synchronization via 'safe' race");
2904 ANNOTATE_BENIGN_RACE(&FLAG2
, "synchronization via 'safe' race");
2905 MyThreadArray
t(Worker1
, Worker2
);
2908 printf("\tGLOB1=%d\n", GLOB1
);
2909 printf("\tGLOB2=%d\n", GLOB2
);
2911 REGISTER_TEST2(Run
, 59, FEATURE
|NEEDS_ANNOTATIONS
)
2912 } // namespace test59
2915 // test60: TN. Correct synchronization using signal-wait {{{1
2924 // same as test 59 but synchronized with signal-wait.
2937 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
2954 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
2961 printf("test60: negative\n");
2962 MyThreadArray
t(Worker1
, Worker2
);
2965 printf("\tGLOB1=%d\n", GLOB1
);
2966 printf("\tGLOB2=%d\n", GLOB2
);
2968 REGISTER_TEST2(Run
, 60, FEATURE
|NEEDS_ANNOTATIONS
)
2969 } // namespace test60
2972 // test61: TN. Synchronization via Mutex as in happens-before, annotated. {{{1
2976 int *P1
= NULL
, *P2
= NULL
;
2978 // In this test Mutex lock/unlock operations introduce happens-before relation.
2979 // We annotate the code so that MU is treated as in pure happens-before detector.
2983 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU
);
3008 printf("test61: negative\n");
3009 MyThreadArray
t(Putter
, Getter
);
3012 printf("\tGLOB=%d\n", GLOB
);
3014 REGISTER_TEST2(Run
, 61, FEATURE
|NEEDS_ANNOTATIONS
)
3015 } // namespace test61
3018 // test62: STAB. Create as many segments as possible. {{{1
3020 // Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments.
3021 // A better scheme is to implement garbage collection for segments.
3022 ProducerConsumerQueue
Q(INT_MAX
);
3023 const int N
= 1 << 22;
3026 for (int i
= 0; i
< N
; i
++){
3027 if ((i
% (N
/ 8)) == 0) {
3028 printf("i=%d\n", i
);
3035 for (int i
= 0; i
< N
; i
++)
3040 printf("test62:\n");
3041 MyThreadArray
t(Putter
, Getter
);
3045 REGISTER_TEST2(Run
, 62, STABILITY
|EXCLUDE_FROM_ALL
)
3046 } // namespace test62
3049 // test63: STAB. Create as many segments as possible and do it fast. {{{1
3051 // Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments.
3052 // A better scheme is to implement garbage collection for segments.
3053 const int N
= 1 << 24;
3057 for (int i
= 0; i
< N
; i
++){
3058 if ((i
% (N
/ 8)) == 0) {
3059 printf("i=%d\n", i
);
3061 ANNOTATE_CONDVAR_SIGNAL(&C
);
3069 printf("test63:\n");
3070 MyThreadArray
t(Putter
, Getter
);
3074 REGISTER_TEST2(Run
, 63, STABILITY
|EXCLUDE_FROM_ALL
)
3075 } // namespace test63
3078 // test64: TP. T2 happens-before T3, but T1 is independent. Reads in T1/T2. {{{1
3080 // True race between T1 and T3:
3083 // 1. read(GLOB) (sleep)
3085 // b. Q.Put() -----> A. Q.Get()
3091 ProducerConsumerQueue
Q(INT_MAX
);
3110 FAST_MODE_INIT(&GLOB
);
3111 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test64: TP.");
3112 printf("test64: positive\n");
3113 MyThreadArray
t(T1
, T2
, T3
);
3116 printf("\tGLOB=%d\n", GLOB
);
3118 REGISTER_TEST(Run
, 64)
3119 } // namespace test64
3122 // test65: TP. T2 happens-before T3, but T1 is independent. Writes in T1/T2. {{{1
3124 // Similar to test64.
3125 // True race between T1 and T3:
3130 // 3. MU.Unlock() (sleep)
3134 // d. Q.Put() -----> A. Q.Get()
3141 ProducerConsumerQueue
Q(INT_MAX
);
3164 FAST_MODE_INIT(&GLOB
);
3165 if (!Tsan_PureHappensBefore())
3166 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test65. TP.");
3167 printf("test65: positive\n");
3168 MyThreadArray
t(T1
, T2
, T3
);
3171 printf("\tGLOB=%d\n", GLOB
);
3173 REGISTER_TEST(Run
, 65)
3174 } // namespace test65
3177 // test66: TN. Two separate pairs of signaller/waiter using the same CV. {{{1
3204 while (C1
!= 1) CV
.Wait(&MU
);
3205 ANNOTATE_CONDVAR_WAIT(&CV
);
3212 while (C2
!= 1) CV
.Wait(&MU
);
3213 ANNOTATE_CONDVAR_WAIT(&CV
);
3219 printf("test66: negative\n");
3220 MyThreadArray
t(Signaller1
, Signaller2
, Waiter1
, Waiter2
);
3223 printf("\tGLOB=%d/%d\n", GLOB1
, GLOB2
);
3225 REGISTER_TEST2(Run
, 66, FEATURE
|NEEDS_ANNOTATIONS
)
3226 } // namespace test66
3229 // test67: FN. Race between Signaller1 and Waiter2 {{{1
3231 // Similar to test66, but there is a real race here.
3233 // Here we create a happens-before arc between Signaller1 and Waiter2
3234 // even though there should be no such arc.
3235 // However, it's probably improssible (or just very hard) to avoid it.
3259 while (C1
!= 1) CV
.Wait(&MU
);
3260 ANNOTATE_CONDVAR_WAIT(&CV
);
3266 while (C2
!= 1) CV
.Wait(&MU
);
3267 ANNOTATE_CONDVAR_WAIT(&CV
);
3273 FAST_MODE_INIT(&GLOB
);
3274 ANNOTATE_EXPECT_RACE(&GLOB
, "test67. FN. Race between Signaller1 and Waiter2");
3275 printf("test67: positive\n");
3276 MyThreadArray
t(Signaller1
, Signaller2
, Waiter1
, Waiter2
);
3279 printf("\tGLOB=%d\n", GLOB
);
3281 REGISTER_TEST2(Run
, 67, FEATURE
|NEEDS_ANNOTATIONS
|EXCLUDE_FROM_ALL
)
3282 } // namespace test67
3285 // test68: TP. Writes are protected by MU, reads are not. {{{1
3287 // In this test, all writes to GLOB are protected by a mutex
3288 // but some reads go unprotected.
3289 // This is certainly a race, but in some cases such code could occur in
3290 // a correct program. For example, the unprotected reads may be used
3291 // for showing statistics and are not required to be precise.
3294 const int N_writers
= 3;
3298 for (int i
= 0; i
< 100; i
++) {
3317 if (COND
== N_writers
)
3325 FAST_MODE_INIT(&GLOB
);
3326 ANNOTATE_EXPECT_RACE(&GLOB
, "TP. Writes are protected, reads are not.");
3327 printf("test68: positive\n");
3328 MyThreadArray
t(Reader
, Writer
, Writer
, Writer
);
3331 printf("\tGLOB=%d\n", GLOB
);
3333 REGISTER_TEST(Run
, 68)
3334 } // namespace test68
3339 // This is the same as test68, but annotated.
3340 // We do not want to annotate GLOB as a benign race
3341 // because we want to allow racy reads only in certain places.
3346 const int N_writers
= 3;
3351 for (int i
= 0; i
< 10; i
++) {
3366 ANNOTATE_IGNORE_READS_BEGIN();
3368 ANNOTATE_IGNORE_READS_END();
3372 if (COND
== N_writers
)
3380 printf("test69: negative\n");
3381 MyThreadArray
t(Reader
, Writer
, Writer
, Writer
);
3384 printf("\tGLOB=%d\n", GLOB
);
3386 REGISTER_TEST(Run
, 69)
3387 } // namespace test69
3389 // test70: STAB. Check that TRACE_MEMORY works. {{{1
3393 printf("test70: negative\n");
3394 ANNOTATE_TRACE_MEMORY(&GLOB
);
3396 printf("\tGLOB=%d\n", GLOB
);
3398 REGISTER_TEST(Run
, 70)
3399 } // namespace test70
3403 // test71: TN. strlen, index. {{{1
3405 // This test is a reproducer for a benign race in strlen (as well as index, etc).
3406 // Some implementations of strlen may read up to 7 bytes past the end of the string
3407 // thus touching memory which may not belong to this string.
3408 // Such race is benign because the data read past the end of the string is not used.
3410 // Here, we allocate a 8-byte aligned string str and initialize first 5 bytes.
3411 // Then one thread calls strlen(str) (as well as index & rindex)
3412 // and another thread initializes str[5]..str[7].
3414 // This can be fixed in Helgrind by intercepting strlen and replacing it
3415 // with a simpler implementation.
3420 CHECK(strlen(str
) == 4);
3421 CHECK(index(str
, 'X') == str
);
3422 CHECK(index(str
, 'x') == str
+1);
3423 CHECK(index(str
, 'Y') == NULL
);
3424 CHECK(rindex(str
, 'X') == str
+2);
3425 CHECK(rindex(str
, 'x') == str
+3);
3426 CHECK(rindex(str
, 'Y') == NULL
);
3442 printf("test71: negative (strlen & index)\n");
3443 MyThread
t1(WorkerY
);
3444 MyThread
t2(WorkerX
);
3449 printf("\tstrX=%s; strY=%s\n", str
, str
+5);
3451 REGISTER_TEST(Run
, 71)
3452 } // namespace test71
3455 // test72: STAB. Stress test for the number of segment sets (SSETs). {{{1
3458 // Variation of test33.
3459 // Instead of creating Nlog*N_iter threads,
3460 // we create Nlog threads and do N_iter barriers.
3462 const int N_iter
= 30;
3463 const int Nlog
= 16;
3464 const int N
= 1 << Nlog
;
3465 static int64_t ARR1
[N
];
3466 static int64_t ARR2
[N
];
3467 Barrier
*barriers
[N_iter
];
3478 long t
__attribute__((unused
)) = t0
;
3480 for (int it
= 0; it
< N_iter
; it
++) {
3482 //printf("Iter: %d; %ld %ld\n", it, clock() - t, clock() - t0);
3485 // Iterate N_iter times, block on barrier after each iteration.
3486 // This way Helgrind will create new segments after each barrier.
3488 for (int x
= 0; x
< 2; x
++) {
3489 // run the inner loop twice.
3490 // When a memory location is accessed second time it is likely
3491 // that the state (SVal) will be unchanged.
3492 // The memory machine may optimize this case.
3493 for (int i
= 0; i
< N
; i
++) {
3494 // ARR1[i] and ARR2[N-1-i] are accessed by threads from i-th subset
3496 CHECK(ARR1
[i
] == 0);
3497 CHECK(ARR2
[N
-1-i
] == 0);
3501 barriers
[it
]->Block();
3507 printf("test72:\n");
3509 std::vector
<MyThread
*> vec(Nlog
);
3511 for (int i
= 0; i
< N_iter
; i
++)
3512 barriers
[i
] = new Barrier(Nlog
);
3514 // Create and start Nlog threads
3515 for (int i
= 0; i
< Nlog
; i
++) {
3516 vec
[i
] = new MyThread(Worker
);
3520 // Join all threads.
3521 for (int i
= 0; i
< Nlog
; i
++) {
3525 for (int i
= 0; i
< N_iter
; i
++)
3528 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
3529 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/
3531 REGISTER_TEST2(Run
, 72, STABILITY
|PERFORMANCE
|EXCLUDE_FROM_ALL
);
3532 #endif // NO_BARRIER
3533 } // namespace test72
3536 // test73: STAB. Stress test for the number of (SSETs), different access sizes. {{{1
3539 // Variation of test72.
3540 // We perform accesses of different sizes to the same location.
3542 const int N_iter
= 2;
3543 const int Nlog
= 16;
3544 const int N
= 1 << Nlog
;
3545 union uint64_union
{
3551 static uint64_union ARR1
[N
];
3552 union uint32_union
{
3557 static uint32_union ARR2
[N
];
3558 Barrier
*barriers
[N_iter
];
3568 for (int it
= 0; it
< N_iter
; it
++) {
3569 // Iterate N_iter times, block on barrier after each iteration.
3570 // This way Helgrind will create new segments after each barrier.
3572 for (int x
= 0; x
< 4; x
++) {
3573 for (int i
= 0; i
< N
; i
++) {
3574 // ARR1[i] are accessed by threads from i-th subset
3576 for (int off
= 0; off
< (1 << x
); off
++) {
3578 case 0: CHECK(ARR1
[i
].u64
[off
] == 0); break;
3579 case 1: CHECK(ARR1
[i
].u32
[off
] == 0); break;
3580 case 2: CHECK(ARR1
[i
].u16
[off
] == 0); break;
3581 case 3: CHECK(ARR1
[i
].u8
[off
] == 0); break;
3584 case 1: CHECK(ARR2
[i
].u32
[off
] == 0); break;
3585 case 2: CHECK(ARR2
[i
].u16
[off
] == 0); break;
3586 case 3: CHECK(ARR2
[i
].u8
[off
] == 0); break;
3592 barriers
[it
]->Block();
3599 printf("test73:\n");
3601 std::vector
<MyThread
*> vec(Nlog
);
3603 for (int i
= 0; i
< N_iter
; i
++)
3604 barriers
[i
] = new Barrier(Nlog
);
3606 // Create and start Nlog threads
3607 for (int i
= 0; i
< Nlog
; i
++) {
3608 vec
[i
] = new MyThread(Worker
);
3612 // Join all threads.
3613 for (int i
= 0; i
< Nlog
; i
++) {
3617 for (int i
= 0; i
< N_iter
; i
++)
3620 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
3621 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/
3623 REGISTER_TEST2(Run
, 73, STABILITY
|PERFORMANCE
|EXCLUDE_FROM_ALL
);
3624 #endif // NO_BARRIER
3625 } // namespace test73
3628 // test74: PERF. A lot of lock/unlock calls. {{{1
3630 const int N
= 100000;
3633 printf("test74: perf\n");
3634 for (int i
= 0; i
< N
; i
++ ) {
3639 REGISTER_TEST(Run
, 74)
3640 } // namespace test74
3643 // test75: TN. Test for sem_post, sem_wait, sem_trywait. {{{1
3660 sem_trywait(&sem
[1]);
3666 sem_init(&sem
[0], 0, 0);
3667 sem_init(&sem
[1], 0, 0);
3669 printf("test75: negative\n");
3671 MyThreadArray
t(Poster
, Waiter
);
3677 MyThreadArray
t(Poster
, TryWaiter
);
3681 printf("\tGLOB=%d\n", GLOB
);
3683 sem_destroy(&sem
[0]);
3684 sem_destroy(&sem
[1]);
3687 REGISTER_TEST(Run
, 75)
3688 } // namespace test75
3690 // RefCountedClass {{{1
3691 struct RefCountedClass
{
3694 annotate_unref_
= false;
3699 ~RefCountedClass() {
3700 CHECK(ref_
== 0); // race may be reported here
3701 int data_val
= data_
; // and here
3702 // if MU is not annotated
3705 printf("\tRefCountedClass::data_ = %d\n", data_val
);
3725 bool do_delete
= ref_
== 0;
3726 if (annotate_unref_
) {
3727 ANNOTATE_CONDVAR_SIGNAL(this);
3731 if (annotate_unref_
) {
3732 ANNOTATE_CONDVAR_WAIT(this);
3738 static void Annotate_MU() {
3739 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU
);
3741 void AnnotateUnref() {
3742 annotate_unref_
= true;
3744 void Annotate_Race() {
3745 ANNOTATE_BENIGN_RACE(&this->data_
, "needs annotation");
3746 ANNOTATE_BENIGN_RACE(&this->ref_
, "needs annotation");
3749 bool annotate_unref_
;
3752 Mutex mu_
; // protects data_
3755 static Mutex MU
; // protects ref_
3758 Mutex
RefCountedClass::MU
;
3760 // test76: FP. Ref counting, no annotations. {{{1
3765 RefCountedClass
*object
= NULL
;
3769 object
->AccessData();
3773 printf("test76: false positive (ref counting)\n");
3774 object
= new RefCountedClass
;
3775 object
->Annotate_Race();
3776 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
3780 REGISTER_TEST2(Run
, 76, FEATURE
)
3781 #endif // NO_BARRIER
3782 } // namespace test76
3786 // test77: TN. Ref counting, MU is annotated. {{{1
3789 // same as test76, but RefCountedClass::MU is annotated.
3792 RefCountedClass
*object
= NULL
;
3796 object
->AccessData();
3800 printf("test77: true negative (ref counting), mutex is annotated\n");
3801 RefCountedClass::Annotate_MU();
3802 object
= new RefCountedClass
;
3803 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
3807 REGISTER_TEST(Run
, 77)
3808 #endif // NO_BARRIER
3809 } // namespace test77
3813 // test78: TN. Ref counting, Unref is annotated. {{{1
3816 // same as test76, but RefCountedClass::Unref is annotated.
3819 RefCountedClass
*object
= NULL
;
3823 object
->AccessData();
3827 printf("test78: true negative (ref counting), Unref is annotated\n");
3828 RefCountedClass::Annotate_MU();
3829 object
= new RefCountedClass
;
3830 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
3834 REGISTER_TEST(Run
, 78)
3835 #endif // NO_BARRIER
3836 } // namespace test78
3840 // test79 TN. Swap. {{{1
3843 typedef __gnu_cxx::hash_map
<int, int> map_t
;
3845 typedef std::map
<int, int> map_t
;
3850 // Here we use swap to pass MAP between threads.
3851 // The synchronization is correct, but w/o ANNOTATE_MUTEX_IS_USED_AS_CONDVAR
3852 // Helgrind will complain.
3857 // We swap the new empty map 'tmp' with 'MAP'.
3860 // tmp (which is the old version of MAP) is destroyed here.
3865 MAP
[1]++; // Just update MAP under MU.
3869 void Worker3() { Worker1(); }
3870 void Worker4() { Worker2(); }
3873 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU
);
3874 printf("test79: negative\n");
3875 MyThreadArray
t(Worker1
, Worker2
, Worker3
, Worker4
);
3879 REGISTER_TEST(Run
, 79)
3880 } // namespace test79
3883 // AtomicRefCountedClass. {{{1
3884 // Same as RefCountedClass, but using atomic ops instead of mutex.
3885 struct AtomicRefCountedClass
{
3887 AtomicRefCountedClass() {
3888 annotate_unref_
= false;
3893 ~AtomicRefCountedClass() {
3894 CHECK(ref_
== 0); // race may be reported here
3895 int data_val
= data_
; // and here
3898 printf("\tRefCountedClass::data_ = %d\n", data_val
);
3908 AtomicIncrement(&ref_
, 1);
3912 // DISCLAIMER: I am not sure I've implemented this correctly
3913 // (might require some memory barrier, etc).
3914 // But this implementation of reference counting is enough for
3915 // the purpose of Helgrind demonstration.
3916 AtomicIncrement(&ref_
, -1);
3917 if (annotate_unref_
) { ANNOTATE_CONDVAR_SIGNAL(this); }
3919 if (annotate_unref_
) { ANNOTATE_CONDVAR_WAIT(this); }
3924 void AnnotateUnref() {
3925 annotate_unref_
= true;
3927 void Annotate_Race() {
3928 ANNOTATE_BENIGN_RACE(&this->data_
, "needs annotation");
3931 bool annotate_unref_
;
3934 int data_
; // under mu_
3936 int ref_
; // used in atomic ops.
3939 // test80: FP. Ref counting with atomics, no annotations. {{{1
3944 AtomicRefCountedClass
*object
= NULL
;
3948 object
->AccessData();
3949 object
->Unref(); // All the tricky stuff is here.
3952 printf("test80: false positive (ref counting)\n");
3953 object
= new AtomicRefCountedClass
;
3954 object
->Annotate_Race();
3955 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
3959 REGISTER_TEST2(Run
, 80, FEATURE
|EXCLUDE_FROM_ALL
)
3960 #endif // NO_BARRIER
3961 } // namespace test80
3964 // test81: TN. Ref counting with atomics, Unref is annotated. {{{1
3967 // same as test80, but Unref is annotated.
3970 AtomicRefCountedClass
*object
= NULL
;
3974 object
->AccessData();
3975 object
->Unref(); // All the tricky stuff is here.
3978 printf("test81: negative (annotated ref counting)\n");
3979 object
= new AtomicRefCountedClass
;
3980 object
->AnnotateUnref();
3981 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
3985 REGISTER_TEST2(Run
, 81, FEATURE
|EXCLUDE_FROM_ALL
)
3986 #endif // NO_BARRIER
3987 } // namespace test81
3990 // test82: Object published w/o synchronization. {{{1
3993 // Writer creates a new object and makes the pointer visible to the Reader.
3994 // Reader waits until the object pointer is non-null and reads the object.
3996 // On Core 2 Duo this test will sometimes (quite rarely) fail in
3997 // the CHECK below, at least if compiled with -O2.
3999 // The sequence of events::
4000 // Thread1: Thread2:
4001 // a. arr_[...] = ...
4003 // A. ... = foo[i]; // non NULL
4004 // B. ... = arr_[...];
4006 // Since there is no proper synchronization, during the even (B)
4007 // Thread2 may not see the result of the event (a).
4008 // On x86 and x86_64 this happens due to compiler reordering instructions.
4009 // On other arcitectures it may also happen due to cashe inconsistency.
4014 idx_
= rand() % 1024;
4016 // __asm__ __volatile__("" : : : "memory"); // this fixes!
4018 static void check(volatile FOO
*foo
) {
4019 CHECK(foo
->arr_
[foo
->idx_
] == 77777);
4026 const int N
= 100000;
4027 static volatile FOO
*foo
[N
];
4031 for (int i
= 0; i
< N
; i
++) {
4038 for (int i
= 0; i
< N
; i
++) {
4040 MU
.Lock(); // this is NOT a synchronization,
4041 MU
.Unlock(); // it just helps foo[i] to become visible in Reader.
4043 if ((i
% 100) == 0) {
4044 printf("rd %d\n", i
);
4046 // At this point Reader() sees the new value of foo[i]
4047 // but in very rare cases will not see the new value of foo[i]->arr_.
4048 // Thus this CHECK will sometimes fail.
4054 printf("test82: positive\n");
4055 MyThreadArray
t(Writer
, Reader
);
4059 REGISTER_TEST2(Run
, 82, FEATURE
|EXCLUDE_FROM_ALL
)
4060 } // namespace test82
4063 // test83: Object published w/o synchronization (simple version){{{1
4065 // A simplified version of test83 (example of a wrong code).
4066 // This test, though incorrect, will almost never fail.
4067 volatile static int *ptr
= NULL
;
4077 MU
.Lock(); // Not a synchronization!
4084 // printf("test83: positive\n");
4085 MyThreadArray
t(Writer
, Reader
);
4089 REGISTER_TEST2(Run
, 83, FEATURE
|EXCLUDE_FROM_ALL
)
4090 } // namespace test83
4093 // test84: TP. True race (regression test for a bug related to atomics){{{1
4095 // Helgrind should not create HB arcs for the bus lock even when
4096 // --pure-happens-before=yes is used.
4097 // Bug found in by Bart Van Assche, the test is taken from
4098 // valgrind file drd/tests/atomic_var.c.
4100 /* s_dummy[] ensures that s_x and s_y are not in the same cache line. */
4101 static char s_dummy
[512] = {0};
4104 void thread_func_1()
4107 AtomicIncrement(&s_x
, 1);
4110 void thread_func_2()
4112 while (AtomicIncrement(&s_x
, 0) == 0)
4114 printf("y = %d\n", s_y
);
4119 CHECK(s_dummy
[0] == 0); // Avoid compiler warning about 's_dummy unused'.
4120 printf("test84: positive\n");
4121 FAST_MODE_INIT(&s_y
);
4122 ANNOTATE_EXPECT_RACE_FOR_TSAN(&s_y
, "test84: TP. true race.");
4123 MyThreadArray
t(thread_func_1
, thread_func_2
);
4127 REGISTER_TEST(Run
, 84)
4128 } // namespace test84
4131 // test85: Test for RunningOnValgrind(). {{{1
4135 printf("test85: RunningOnValgrind() = %d\n", RunningOnValgrind());
4137 REGISTER_TEST(Run
, 85)
4138 } // namespace test85
4141 // test86: Test for race inside DTOR: racey write to vptr. Benign. {{{1
4143 // This test shows a racey access to vptr (the pointer to vtbl).
4144 // We have class A and class B derived from A.
4145 // Both classes have a virtual function f() and a virtual DTOR.
4146 // We create an object 'A *a = new B'
4147 // and pass this object from Thread1 to Thread2.
4148 // Thread2 calls a->f(). This call reads a->vtpr.
4149 // Thread1 deletes the object. B::~B waits untill the object can be destroyed
4150 // (flag_stopped == true) but at the very beginning of B::~B
4151 // a->vptr is written to.
4152 // So, we have a race on a->vptr.
4153 // On this particular test this race is benign, but test87 shows
4154 // how such race could harm.
4160 // 2. Q.Put(a); ------------\ .
4161 // \--------------------> a. a = Q.Get();
4163 // /--------- c. flag_stopped = true;
4165 // waits untill flag_stopped <------/
4169 bool flag_stopped
= false;
4172 ProducerConsumerQueue
Q(INT_MAX
); // Used to pass A* between threads.
4175 A() { printf("A::A()\n"); }
4176 virtual ~A() { printf("A::~A()\n"); }
4177 virtual void f() { }
4179 uintptr_t padding
[15];
4180 } __attribute__ ((aligned (64)));
4183 B() { printf("B::B()\n"); }
4185 // The race is here. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
4186 printf("B::~B()\n");
4187 // wait until flag_stopped is true.
4188 mu
.LockWhen(Condition(&ArgIsTrue
, &flag_stopped
));
4190 printf("B::~B() done\n");
4192 virtual void f() { }
4197 if (!Tsan_FastMode())
4198 ANNOTATE_EXPECT_RACE(a
, "test86: expected race on a->vptr");
4199 printf("Waiter: B created\n");
4201 usleep(100000); // so that Worker calls a->f() first.
4202 printf("Waiter: deleting B\n");
4204 printf("Waiter: B deleted\n");
4206 printf("Waiter: done\n");
4210 A
*a
= reinterpret_cast<A
*>(Q
.Get());
4211 printf("Worker: got A\n");
4215 flag_stopped
= true;
4218 printf("Worker: done\n");
4222 printf("test86: positive, race inside DTOR\n");
4223 MyThreadArray
t(Waiter
, Worker
);
4227 REGISTER_TEST(Run
, 86)
4228 } // namespace test86
4231 // test87: Test for race inside DTOR: racey write to vptr. Harmful.{{{1
4233 // A variation of test86 where the race is harmful.
4234 // Here we have class C derived from B.
4235 // We create an object 'A *a = new C' in Thread1 and pass it to Thread2.
4236 // Thread2 calls a->f().
4237 // Thread1 calls 'delete a'.
4238 // It first calls C::~C, then B::~B where it rewrites the vptr to point
4239 // to B::vtbl. This is a problem because Thread2 might not have called a->f()
4240 // and now it will call B::f instead of C::f.
4242 bool flag_stopped
= false;
4245 ProducerConsumerQueue
Q(INT_MAX
); // Used to pass A* between threads.
4248 A() { printf("A::A()\n"); }
4249 virtual ~A() { printf("A::~A()\n"); }
4250 virtual void f() = 0; // pure virtual.
4254 B() { printf("B::B()\n"); }
4256 // The race is here. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
4257 printf("B::~B()\n");
4258 // wait until flag_stopped is true.
4259 mu
.LockWhen(Condition(&ArgIsTrue
, &flag_stopped
));
4261 printf("B::~B() done\n");
4263 virtual void f() = 0; // pure virtual.
4267 C() { printf("C::C()\n"); }
4268 virtual ~C() { printf("C::~C()\n"); }
4269 virtual void f() { }
4279 A
*a
= reinterpret_cast<A
*>(Q
.Get());
4283 flag_stopped
= true;
4284 ANNOTATE_CONDVAR_SIGNAL(&mu
);
4289 printf("test87: positive, race inside DTOR\n");
4290 MyThreadArray
t(Waiter
, Worker
);
4294 REGISTER_TEST2(Run
, 87, FEATURE
|EXCLUDE_FROM_ALL
)
4295 } // namespace test87
4298 // test88: Test for ANNOTATE_IGNORE_WRITES_*{{{1
4300 // a recey write annotated with ANNOTATE_IGNORE_WRITES_BEGIN/END.
4303 ANNOTATE_IGNORE_WRITES_BEGIN();
4305 ANNOTATE_IGNORE_WRITES_END();
4308 printf("test88: negative, test for ANNOTATE_IGNORE_WRITES_*\n");
4313 printf("\tGLOB=%d\n", GLOB
);
4315 REGISTER_TEST(Run
, 88)
4316 } // namespace test88
4319 // test89: Test for debug info. {{{1
4321 // Simlpe races with different objects (stack, heap globals; scalars, structs).
4322 // Also, if run with --trace-level=2 this test will show a sequence of
4323 // CTOR and DTOR calls.
4331 ANNOTATE_TRACE_MEMORY(&a
);
4340 B() { CHECK(a
== 1); }
4341 virtual ~B() { CHECK(a
== 3); }
4345 virtual ~C() { a
= 3; }
4351 STRUCT
*STACK_STRUCT
;
4352 STRUCT
*HEAP_STRUCT
;
4358 STACK_STRUCT
->b
= 1;
4366 STRUCT stack_struct
;
4367 STACK_STRUCT
= &stack_struct
;
4369 HEAP_STRUCT
= new STRUCT
;
4371 printf("test89: negative\n");
4372 MyThreadArray
t(Worker
, Worker
);
4379 printf("Using 'a->a': %d\n", a
->a
);
4382 REGISTER_TEST2(Run
, 89, FEATURE
|EXCLUDE_FROM_ALL
)
4383 } // namespace test89
4386 // test90: FP. Test for a safely-published pointer (read-only). {{{1
4388 // The Publisher creates an object and safely publishes it under a mutex.
4389 // Readers access the object read-only.
4392 // Without annotations Helgrind will issue a false positive in Reader().
4394 // Choices for annotations:
4395 // -- ANNOTATE_CONDVAR_SIGNAL/ANNOTATE_CONDVAR_WAIT
4396 // -- ANNOTATE_MUTEX_IS_USED_AS_CONDVAR
4397 // -- ANNOTATE_PUBLISH_MEMORY_RANGE.
4404 GLOB
= (int*)memalign(64, sizeof(int));
4406 if (!Tsan_PureHappensBefore() && !Tsan_FastMode())
4407 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB
, "test90. FP. This is a false positve");
4419 CHECK(*p
== 777); // Race is reported here.
4426 printf("test90: false positive (safely published pointer).\n");
4427 MyThreadArray
t(Publisher
, Reader
, Reader
, Reader
);
4430 printf("\t*GLOB=%d\n", *GLOB
);
4433 REGISTER_TEST(Run
, 90)
4434 } // namespace test90
4437 // test91: FP. Test for a safely-published pointer (read-write). {{{1
4439 // Similar to test90.
4440 // The Publisher creates an object and safely publishes it under a mutex MU1.
4441 // Accessors get the object under MU1 and access it (read/write) under MU2.
4443 // Without annotations Helgrind will issue a false positive in Accessor().
4451 GLOB
= (int*)memalign(64, sizeof(int));
4453 if (!Tsan_PureHappensBefore() && !Tsan_FastMode())
4454 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB
, "test91. FP. This is a false positve");
4466 (*p
)++; // Race is reported here.
4475 printf("test91: false positive (safely published pointer, read/write).\n");
4476 MyThreadArray
t(Publisher
, Accessor
, Accessor
, Accessor
);
4479 printf("\t*GLOB=%d\n", *GLOB
);
4482 REGISTER_TEST(Run
, 91)
4483 } // namespace test91
4486 // test92: TN. Test for a safely-published pointer (read-write), annotated. {{{1
4488 // Similar to test91, but annotated with ANNOTATE_PUBLISH_MEMORY_RANGE.
4491 // Publisher: Accessors:
4495 // 3. ANNOTATE_PUBLISH_...(GLOB) -------\ .
4496 // 4. MU1.Unlock() \ .
4499 // \ c. MU1.Unlock()
4500 // \--> d. Access GLOB
4502 // A happens-before arc is created between ANNOTATE_PUBLISH_MEMORY_RANGE and
4503 // accesses to GLOB.
4515 for (int i
= 0; i
< 10; i
++) {
4518 // This annotation should go right before the object is published.
4519 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB
, sizeof(*GLOB
));
4523 void Accessor(int index
) {
4530 p
->arr
[index
]++; // W/o the annotations the race will be reported here.
4531 CHECK(p
->arr
[index
] == 778);
4538 void Accessor0() { Accessor(0); }
4539 void Accessor5() { Accessor(5); }
4540 void Accessor9() { Accessor(9); }
4543 printf("test92: safely published pointer, read/write, annotated.\n");
4544 MyThreadArray
t(Publisher
, Accessor0
, Accessor5
, Accessor9
);
4547 printf("\t*GLOB=%d\n", GLOB
->arr
[0]);
4549 REGISTER_TEST(Run
, 92)
4550 } // namespace test92
4553 // test93: TP. Test for incorrect usage of ANNOTATE_PUBLISH_MEMORY_RANGE. {{{1
4563 // Incorrect, used after the memory has been accessed in another thread.
4564 ANNOTATE_PUBLISH_MEMORY_RANGE(&GLOB
, sizeof(GLOB
));
4568 printf("test93: positive, misuse of ANNOTATE_PUBLISH_MEMORY_RANGE\n");
4569 MyThreadArray
t(Reader
, Publisher
);
4572 printf("\tGLOB=%d\n", GLOB
);
4574 REGISTER_TEST2(Run
, 93, FEATURE
|EXCLUDE_FROM_ALL
)
4575 } // namespace test93
4578 // test94: TP. Check do_cv_signal/fake segment logic {{{1
4588 usleep(10000); // Make sure the waiter blocks.
4598 usleep(1000*1000); // Make sure CV2.Signal() "happens after" CV.Signal()
4599 usleep(10000); // Make sure the waiter blocks.
4617 GLOB
= 2; // READ: no HB-relation between CV.Signal and CV2.Wait !
4620 FAST_MODE_INIT(&GLOB
);
4621 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test94: TP.");
4622 printf("test94: TP. Check do_cv_signal/fake segment logic\n");
4623 MyThreadArray
mta(Thr1
, Thr2
, Thr3
, Thr4
);
4626 printf("\tGLOB=%d\n", GLOB
);
4628 REGISTER_TEST(Run
, 94);
4629 } // namespace test94
4631 // test95: TP. Check do_cv_signal/fake segment logic {{{1
4641 usleep(1000*1000); // Make sure CV2.Signal() "happens before" CV.Signal()
4642 usleep(10000); // Make sure the waiter blocks.
4652 usleep(10000); // Make sure the waiter blocks.
4670 GLOB
= 2; // READ: no HB-relation between CV.Signal and CV2.Wait !
4673 FAST_MODE_INIT(&GLOB
);
4674 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test95: TP.");
4675 printf("test95: TP. Check do_cv_signal/fake segment logic\n");
4676 MyThreadArray
mta(Thr1
, Thr2
, Thr3
, Thr4
);
4679 printf("\tGLOB=%d\n", GLOB
);
4681 REGISTER_TEST(Run
, 95);
4682 } // namespace test95
4684 // test96: TN. tricky LockSet behaviour {{{1
4685 // 3 threads access the same memory with three different
4686 // locksets: {A, B}, {B, C}, {C, A}.
4687 // These locksets have empty intersection
4712 printf("test96: FP. tricky LockSet behaviour\n");
4713 ANNOTATE_TRACE_MEMORY(&GLOB
);
4714 MyThreadArray
mta(Thread1
, Thread2
, Thread3
);
4718 printf("\tGLOB=%d\n", GLOB
);
4720 REGISTER_TEST(Run
, 96);
4721 } // namespace test96
4723 // test97: This test shows false negative with --fast-mode=yes {{{1
4725 const int HG_CACHELINE_SIZE
= 64;
4729 const int ARRAY_SIZE
= HG_CACHELINE_SIZE
* 4 / sizeof(int);
4730 int array
[ARRAY_SIZE
];
4731 int * GLOB
= &array
[ARRAY_SIZE
/2];
4733 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points
4734 to a memory inside a CacheLineZ which is inside array's memory range
4739 CHECK(777 == *GLOB
);
4743 MyThreadArray
t(Reader
);
4744 if (!Tsan_FastMode())
4745 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB
, "test97: TP. FN with --fast-mode=yes");
4746 printf("test97: This test shows false negative with --fast-mode=yes\n");
4753 REGISTER_TEST2(Run
, 97, FEATURE
)
4754 } // namespace test97
4756 // test98: Synchronization via read/write (or send/recv). {{{1
4758 // The synchronization here is done by a pair of read/write calls
4759 // that create a happens-before arc. Same may be done with send/recv.
4760 // Such synchronization is quite unusual in real programs
4761 // (why would one synchronizae via a file or socket?), but
4762 // quite possible in unittests where one threads runs for producer
4763 // and one for consumer.
4765 // A race detector has to create a happens-before arcs for
4766 // {read,send}->{write,recv} even if the file descriptors are different.
4775 const char *str
= "Hey there!\n";
4776 IGNORE_RETURN_VALUE(write(fd_out
, str
, strlen(str
) + 1));
4781 while (read(fd_in
, buff
, 100) == 0)
4783 printf("read: %s\n", buff
);
4788 printf("test98: negative, synchronization via I/O\n");
4791 // we open two files, on for reading and one for writing,
4792 // but the files are actually the same (symlinked).
4793 sprintf(out_name
, "/tmp/racecheck_unittest_out.%ld", (long) getpid());
4794 fd_out
= creat(out_name
, O_WRONLY
| S_IRWXU
);
4796 // symlink() is not supported on Darwin. Copy the output file name.
4797 strcpy(in_name
, out_name
);
4799 sprintf(in_name
, "/tmp/racecheck_unittest_in.%ld", (long) getpid());
4800 IGNORE_RETURN_VALUE(symlink(out_name
, in_name
));
4802 fd_in
= open(in_name
, 0, O_RDONLY
);
4805 MyThreadArray
t(Writer
, Reader
);
4808 printf("\tGLOB=%d\n", GLOB
);
4815 REGISTER_TEST(Run
, 98)
4816 } // namespace test98
4819 // test99: TP. Unit test for a bug in LockWhen*. {{{1
4826 static void Thread1() {
4827 for (int i
= 0; i
< 100; i
++) {
4828 mu
.LockWhenWithTimeout(Condition(&ArgIsTrue
, &GLOB
), 5);
4835 static void Thread2() {
4836 for (int i
= 0; i
< 100; i
++) {
4844 printf("test99: regression test for LockWhen*\n");
4845 MyThreadArray
t(Thread1
, Thread2
);
4849 REGISTER_TEST(Run
, 99);
4850 } // namespace test99
4853 // test100: Test for initialization bit. {{{1
4881 printf("test100: test for initialization bit. \n");
4882 MyThreadArray
t(Creator
, Worker1
, Worker2
);
4883 ANNOTATE_TRACE_MEMORY(&G1
);
4884 ANNOTATE_TRACE_MEMORY(&G2
);
4885 ANNOTATE_TRACE_MEMORY(&G3
);
4886 ANNOTATE_TRACE_MEMORY(&G4
);
4890 REGISTER_TEST2(Run
, 100, FEATURE
|EXCLUDE_FROM_ALL
)
4891 } // namespace test100
4894 // test101: TN. Two signals and two waits. {{{1
4938 printf("test101: negative\n");
4939 MyThreadArray
t(Waiter
, Signaller
);
4942 printf("\tGLOB=%d\n", GLOB
);
4944 REGISTER_TEST(Run
, 101)
4945 } // namespace test101
4947 // test102: --fast-mode=yes vs. --initialization-bit=yes {{{1
4949 const int HG_CACHELINE_SIZE
= 64;
4953 const int ARRAY_SIZE
= HG_CACHELINE_SIZE
* 4 / sizeof(int);
4954 int array
[ARRAY_SIZE
+ 1];
4955 int * GLOB
= &array
[ARRAY_SIZE
/2];
4957 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points
4958 to a memory inside a CacheLineZ which is inside array's memory range
4963 CHECK(777 == GLOB
[0]);
4965 CHECK(777 == GLOB
[1]);
4969 MyThreadArray
t(Reader
);
4970 if (!Tsan_FastMode())
4971 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB
+0, "test102: TP. FN with --fast-mode=yes");
4972 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB
+1, "test102: TP");
4973 printf("test102: --fast-mode=yes vs. --initialization-bit=yes\n");
4982 REGISTER_TEST2(Run
, 102, FEATURE
)
4983 } // namespace test102
4985 // test103: Access different memory locations with different LockSets {{{1
4987 const int N_MUTEXES
= 6;
4988 const int LOCKSET_INTERSECTION_SIZE
= 3;
4990 int data
[1 << LOCKSET_INTERSECTION_SIZE
] = {0};
4991 Mutex MU
[N_MUTEXES
];
4993 inline int LS_to_idx (int ls
) {
4994 return (ls
>> (N_MUTEXES
- LOCKSET_INTERSECTION_SIZE
))
4995 & ((1 << LOCKSET_INTERSECTION_SIZE
) - 1);
4999 for (int ls
= 0; ls
< (1 << N_MUTEXES
); ls
++) {
5000 if (LS_to_idx(ls
) == 0)
5002 for (int m
= 0; m
< N_MUTEXES
; m
++)
5006 data
[LS_to_idx(ls
)]++;
5008 for (int m
= N_MUTEXES
- 1; m
>= 0; m
--)
5015 printf("test103: Access different memory locations with different LockSets\n");
5016 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
5020 REGISTER_TEST2(Run
, 103, FEATURE
)
5021 } // namespace test103
5023 // test104: TP. Simple race (write vs write). Heap mem. {{{1
5038 GLOB
= (int*)memalign(64, sizeof(int));
5040 ANNOTATE_EXPECT_RACE(GLOB
, "test104. TP.");
5041 ANNOTATE_TRACE_MEMORY(GLOB
);
5042 printf("test104: positive\n");
5044 printf("\tGLOB=%d\n", *GLOB
);
5047 REGISTER_TEST(Run
, 104);
5048 } // namespace test104
5051 // test105: Checks how stack grows. {{{1
5056 int ar
[32] __attribute__((unused
));
5057 // ANNOTATE_TRACE_MEMORY(&ar[0]);
5058 // ANNOTATE_TRACE_MEMORY(&ar[31]);
5064 int ar
[32] __attribute__((unused
));
5065 // ANNOTATE_TRACE_MEMORY(&ar[0]);
5066 // ANNOTATE_TRACE_MEMORY(&ar[31]);
5073 printf("test105: negative\n");
5078 printf("\tGLOB=%d\n", GLOB
);
5080 REGISTER_TEST(Run
, 105)
5081 } // namespace test105
5084 // test106: TN. pthread_once. {{{1
5087 static pthread_once_t once
= PTHREAD_ONCE_INIT
;
5090 ANNOTATE_TRACE_MEMORY(GLOB
);
5095 pthread_once(&once
, Init
);
5099 pthread_once(&once
, Init
);
5100 CHECK(*GLOB
== 777);
5105 printf("test106: negative\n");
5106 MyThreadArray
t(Worker0
, Worker1
, Worker1
, Worker1
);
5109 printf("\tGLOB=%d\n", *GLOB
);
5111 REGISTER_TEST2(Run
, 106, FEATURE
)
5112 } // namespace test106
5115 // test107: Test for ANNOTATE_EXPECT_RACE {{{1
5119 printf("test107: negative\n");
5120 ANNOTATE_EXPECT_RACE(&GLOB
, "No race in fact. Just checking the tool.");
5121 printf("\tGLOB=%d\n", GLOB
);
5123 REGISTER_TEST2(Run
, 107, FEATURE
|EXCLUDE_FROM_ALL
)
5124 } // namespace test107
5127 // test108: TN. initialization of static object. {{{1
5129 // Here we have a function-level static object.
5130 // Starting from gcc 4 this is therad safe,
5131 // but is is not thread safe with many other compilers.
5133 // Helgrind supports this kind of initialization by
5134 // intercepting __cxa_guard_acquire/__cxa_guard_release
5135 // and ignoring all accesses between them.
5136 // Helgrind also intercepts pthread_once in the same manner.
5140 ANNOTATE_TRACE_MEMORY(&a_
);
5143 void Check() const { CHECK(a_
== 42); }
5148 const Foo
*GetFoo() {
5149 static const Foo
*foo
= new Foo();
5158 const Foo
*foo
= GetFoo();
5164 printf("test108: negative, initialization of static object\n");
5165 MyThreadArray
t(Worker0
, Worker
, Worker
);
5169 REGISTER_TEST2(Run
, 108, FEATURE
)
5170 } // namespace test108
5173 // test109: TN. Checking happens before between parent and child threads. {{{1
5175 // Check that the detector correctly connects
5176 // pthread_create with the new thread
5178 // thread exit with pthread_join
5182 void Worker(void *a
) {
5184 // printf("--Worker : %ld %p\n", (int*)a - GLOB, (void*)pthread_self());
5190 printf("test109: negative\n");
5192 for (int i
= 0; i
< N
; i
++) {
5193 t
[i
] = new MyThread(Worker
, &GLOB
[i
]);
5195 for (int i
= 0; i
< N
; i
++) {
5196 ANNOTATE_TRACE_MEMORY(&GLOB
[i
]);
5199 // printf("--Started: %p\n", (void*)t[i]->tid());
5201 for (int i
= 0; i
< N
; i
++) {
5202 // printf("--Joining: %p\n", (void*)t[i]->tid());
5204 // printf("--Joined : %p\n", (void*)t[i]->tid());
5207 for (int i
= 0; i
< N
; i
++) delete t
[i
];
5209 printf("\tGLOB=%d\n", GLOB
[13]);
5211 REGISTER_TEST(Run
, 109)
5212 } // namespace test109
5215 // test110: TP. Simple races with stack, global and heap objects. {{{1
5228 union pi_pv_union
{ int* pi
; void* pv
; } POSIX_MEMALIGN
;
5246 (*(POSIX_MEMALIGN
.pi
))++;
5256 MALLOC
= (int*)malloc(sizeof(int));
5257 CALLOC
= (int*)calloc(1, sizeof(int));
5258 REALLOC
= (int*)realloc(NULL
, sizeof(int));
5259 VALLOC
= (int*)valloc(sizeof(int));
5260 PVALLOC
= (int*)valloc(sizeof(int)); // TODO: pvalloc breaks helgrind.
5261 MEMALIGN
= (int*)memalign(64, sizeof(int));
5262 CHECK(0 == posix_memalign(&POSIX_MEMALIGN
.pv
, 64, sizeof(int)));
5263 MMAP
= (int*)mmap(NULL
, sizeof(int), PROT_READ
| PROT_WRITE
,
5264 MAP_PRIVATE
| MAP_ANON
, -1, 0);
5267 NEW_ARR
= new int[10];
5270 FAST_MODE_INIT(STACK
);
5271 ANNOTATE_EXPECT_RACE(STACK
, "real race on stack object");
5272 FAST_MODE_INIT(&GLOB
);
5273 ANNOTATE_EXPECT_RACE(&GLOB
, "real race on global object");
5274 FAST_MODE_INIT(&STATIC
);
5275 ANNOTATE_EXPECT_RACE(&STATIC
, "real race on a static global object");
5276 FAST_MODE_INIT(MALLOC
);
5277 ANNOTATE_EXPECT_RACE(MALLOC
, "real race on a malloc-ed object");
5278 FAST_MODE_INIT(CALLOC
);
5279 ANNOTATE_EXPECT_RACE(CALLOC
, "real race on a calloc-ed object");
5280 FAST_MODE_INIT(REALLOC
);
5281 ANNOTATE_EXPECT_RACE(REALLOC
, "real race on a realloc-ed object");
5282 FAST_MODE_INIT(VALLOC
);
5283 ANNOTATE_EXPECT_RACE(VALLOC
, "real race on a valloc-ed object");
5284 FAST_MODE_INIT(PVALLOC
);
5285 ANNOTATE_EXPECT_RACE(PVALLOC
, "real race on a pvalloc-ed object");
5286 FAST_MODE_INIT(MEMALIGN
);
5287 ANNOTATE_EXPECT_RACE(MEMALIGN
, "real race on a memalign-ed object");
5288 FAST_MODE_INIT(POSIX_MEMALIGN
.pi
);
5289 ANNOTATE_EXPECT_RACE(POSIX_MEMALIGN
.pi
, "real race on a posix_memalign-ed object");
5290 FAST_MODE_INIT(MMAP
);
5291 ANNOTATE_EXPECT_RACE(MMAP
, "real race on a mmap-ed object");
5293 FAST_MODE_INIT(NEW
);
5294 ANNOTATE_EXPECT_RACE(NEW
, "real race on a new-ed object");
5295 FAST_MODE_INIT(NEW_ARR
);
5296 ANNOTATE_EXPECT_RACE(NEW_ARR
, "real race on a new[]-ed object");
5298 MyThreadArray
t(Worker
, Worker
, Worker
);
5301 printf("test110: positive (race on a stack object)\n");
5302 printf("\tSTACK=%d\n", *STACK
);
5312 free(POSIX_MEMALIGN
.pv
);
5313 munmap(MMAP
, sizeof(int));
5317 REGISTER_TEST(Run
, 110)
5318 } // namespace test110
5321 // test111: TN. Unit test for a bug related to stack handling. {{{1
5328 void write_to_p(char *p
, int val
) {
5329 for (int i
= 0; i
< N
; i
++)
5333 static bool ArgIsTrue(bool *arg
) {
5334 // printf("ArgIsTrue: %d tid=%d\n", *arg, (int)pthread_self());
5335 return *arg
== true;
5340 write_to_p(some_stack
, 1);
5341 mu
.LockWhen(Condition(&ArgIsTrue
, &COND
));
5347 char some_more_stack
[N
];
5348 write_to_p(some_stack
, 2);
5349 write_to_p(some_more_stack
, 2);
5368 printf("test111: regression test\n");
5369 MyThreadArray
t(Worker1
, Worker1
, Worker2
);
5370 // AnnotateSetVerbosity(__FILE__, __LINE__, 3);
5373 // AnnotateSetVerbosity(__FILE__, __LINE__, 1);
5375 REGISTER_TEST2(Run
, 111, FEATURE
)
5376 } // namespace test111
5378 // test112: STAB. Test for ANNOTATE_PUBLISH_MEMORY_RANGE{{{1
5381 const int N
= 64 * 5;
5383 bool ready
= false; // under mu
5384 int beg
, end
; // under mu
5390 bool is_ready
= false;
5402 for (int i
= b
; i
< e
; i
++) {
5408 void PublishRange(int b
, int e
) {
5409 MyThreadArray
t(Worker
, Worker
);
5410 ready
= false; // runs before other threads
5413 ANNOTATE_NEW_MEMORY(GLOB
+ b
, e
- b
);
5414 ANNOTATE_TRACE_MEMORY(GLOB
+ b
);
5415 for (int j
= b
; j
< e
; j
++) {
5418 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB
+ b
, e
- b
);
5431 printf("test112: stability (ANNOTATE_PUBLISH_MEMORY_RANGE)\n");
5432 GLOB
= new char [N
];
5434 PublishRange(0, 10);
5437 PublishRange(12, 13);
5438 PublishRange(10, 14);
5440 PublishRange(15, 17);
5441 PublishRange(16, 18);
5443 // do few more random publishes.
5444 for (int i
= 0; i
< 20; i
++) {
5445 const int begin
= rand() % N
;
5446 const int size
= (rand() % (N
- begin
)) + 1;
5448 CHECK(begin
+ size
<= N
);
5449 PublishRange(begin
, begin
+ size
);
5452 printf("GLOB = %d\n", (int)GLOB
[0]);
5454 REGISTER_TEST2(Run
, 112, STABILITY
)
5455 } // namespace test112
5458 // test113: PERF. A lot of lock/unlock calls. Many locks {{{1
5460 const int kNumIter
= 100000;
5461 const int kNumLocks
= 7;
5462 Mutex MU
[kNumLocks
];
5464 printf("test113: perf\n");
5465 for (int i
= 0; i
< kNumIter
; i
++ ) {
5466 for (int j
= 0; j
< kNumLocks
; j
++) {
5467 if (i
& (1 << j
)) MU
[j
].Lock();
5469 for (int j
= kNumLocks
- 1; j
>= 0; j
--) {
5470 if (i
& (1 << j
)) MU
[j
].Unlock();
5474 REGISTER_TEST(Run
, 113)
5475 } // namespace test113
5478 // test114: STAB. Recursive lock. {{{1
5485 static int foo
= Bar();
5489 static int x
= Foo();
5493 printf("test114: stab\n");
5494 MyThreadArray
t(Worker
, Worker
);
5498 REGISTER_TEST(Run
, 114)
5499 } // namespace test114
5502 // test115: TN. sem_open. {{{1
5506 const char *kSemName
= "drt-test-sem";
5510 sem_t
*DoSemOpen() {
5511 // TODO: there is some race report inside sem_open
5512 // for which suppressions do not work... (???)
5513 ANNOTATE_IGNORE_WRITES_BEGIN();
5514 sem_t
*sem
= sem_open(kSemName
, O_CREAT
, 0600, 3);
5515 ANNOTATE_IGNORE_WRITES_END();
5528 // if the detector observes a happens-before arc between
5529 // sem_open and sem_wait, it will be silent.
5530 sem_t
*sem
= DoSemOpen();
5532 CHECK(sem
!= SEM_FAILED
);
5533 CHECK(sem_wait(sem
) == 0);
5541 printf("test115: stab (sem_open())\n");
5543 // just check that sem_open is not completely broken
5544 sem_unlink(kSemName
);
5545 sem_t
* sem
= DoSemOpen();
5546 CHECK(sem
!= SEM_FAILED
);
5547 CHECK(sem_wait(sem
) == 0);
5548 sem_unlink(kSemName
);
5550 // check that sem_open and sem_wait create a happens-before arc.
5551 MyThreadArray
t(Worker
, Worker
, Worker
);
5555 sem_unlink(kSemName
);
5557 REGISTER_TEST(Run
, 115)
5558 } // namespace test115
5561 // test116: TN. some operations with string<> objects. {{{1
5565 string A
[10], B
[10], C
[10];
5566 for (int i
= 0; i
< 1000; i
++) {
5567 for (int j
= 0; j
< 10; j
++) {
5571 a
= "sdl;fkjhasdflksj df";
5572 b
= "sdf sdf;ljsd ";
5580 for (int j
= 0; j
< 10; j
++) {
5592 printf("test116: negative (strings)\n");
5593 MyThreadArray
t(Worker
, Worker
, Worker
);
5597 REGISTER_TEST2(Run
, 116, FEATURE
|EXCLUDE_FROM_ALL
)
5598 } // namespace test116
5600 // test117: TN. Many calls to function-scope static init. {{{1
5609 void Worker(void *a
) {
5610 static int foo
= Foo();
5615 printf("test117: negative\n");
5617 for (int i
= 0; i
< N
; i
++) {
5618 t
[i
] = new MyThread(Worker
);
5620 for (int i
= 0; i
< N
; i
++) {
5623 for (int i
= 0; i
< N
; i
++) {
5626 for (int i
= 0; i
< N
; i
++) delete t
[i
];
5628 REGISTER_TEST(Run
, 117)
5629 } // namespace test117
5633 // test118 PERF: One signal, multiple waits. {{{1
5636 const int kNumIter
= 2000000;
5639 ANNOTATE_CONDVAR_SIGNAL(&GLOB
);
5642 for (int i
= 0; i
< kNumIter
; i
++) {
5643 ANNOTATE_CONDVAR_WAIT(&GLOB
);
5644 if (i
== kNumIter
/ 2)
5649 printf("test118: perf\n");
5650 MyThreadArray
t(Signaller
, Waiter
, Signaller
, Waiter
);
5653 printf("\tGLOB=%d\n", GLOB
);
5655 REGISTER_TEST(Run
, 118)
5656 } // namespace test118
5659 // test119: TP. Testing that malloc does not introduce any HB arc. {{{1
5672 printf("test119: positive (checking if malloc creates HB arcs)\n");
5673 FAST_MODE_INIT(&GLOB
);
5674 if (!(Tsan_PureHappensBefore() && kMallocUsesMutex
))
5675 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "true race");
5676 MyThreadArray
t(Worker1
, Worker2
);
5679 printf("\tGLOB=%d\n", GLOB
);
5681 REGISTER_TEST(Run
, 119)
5682 } // namespace test119
5685 // test120: TP. Thread1: write then read. Thread2: read. {{{1
5691 CHECK(GLOB
); // read
5696 CHECK(GLOB
>= 0); // read
5700 FAST_MODE_INIT(&GLOB
);
5701 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "TP (T1: write then read, T2: read)");
5702 printf("test120: positive\n");
5703 MyThreadArray
t(Thread1
, Thread2
);
5707 printf("\tGLOB=%d\n", GLOB
);
5709 REGISTER_TEST(Run
, 120)
5710 } // namespace test120
5713 // test121: TP. Example of double-checked-locking {{{1
5717 } __attribute__ ((aligned (64)));
5724 MutexLock
lock(&mu
);
5726 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo
, "test121. Double-checked locking (ptr)");
5728 if (!Tsan_FastMode())
5729 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo
->a
, "test121. Double-checked locking (obj)");
5737 CHECK(foo
&& foo
->a
== 42);
5740 void Worker1() { UseMe(); }
5741 void Worker2() { UseMe(); }
5742 void Worker3() { UseMe(); }
5746 FAST_MODE_INIT(&foo
);
5747 printf("test121: TP. Example of double-checked-locking\n");
5748 MyThreadArray
t1(Worker1
, Worker2
, Worker3
);
5753 REGISTER_TEST(Run
, 121)
5754 } // namespace test121
5756 // test122 TP: Simple test with RWLock {{{1
5762 void WriteWhileHoldingReaderLock(int *p
) {
5764 ReaderLockScoped
lock(&mu
); // Reader lock for writing. -- bug.
5768 void CorrectWrite(int *p
) {
5769 WriterLockScoped
lock(&mu
);
5773 void Thread1() { WriteWhileHoldingReaderLock(&VAR1
); }
5774 void Thread2() { CorrectWrite(&VAR1
); }
5775 void Thread3() { CorrectWrite(&VAR2
); }
5776 void Thread4() { WriteWhileHoldingReaderLock(&VAR2
); }
5780 printf("test122: positive (rw-lock)\n");
5783 ANNOTATE_TRACE_MEMORY(&VAR1
);
5784 ANNOTATE_TRACE_MEMORY(&VAR2
);
5785 if (!Tsan_PureHappensBefore()) {
5786 ANNOTATE_EXPECT_RACE_FOR_TSAN(&VAR1
, "test122. TP. ReaderLock-ed while writing");
5787 ANNOTATE_EXPECT_RACE_FOR_TSAN(&VAR2
, "test122. TP. ReaderLock-ed while writing");
5789 MyThreadArray
t(Thread1
, Thread2
, Thread3
, Thread4
);
5793 REGISTER_TEST(Run
, 122)
5794 } // namespace test122
5797 // test123 TP: accesses of different sizes. {{{1
5809 // Q. Hey dude, why so many functions?
5810 // A. I need different stack traces for different accesses.
5812 void Wr64_0() { MEM
[0].u64
[0] = 1; }
5813 void Wr64_1() { MEM
[1].u64
[0] = 1; }
5814 void Wr64_2() { MEM
[2].u64
[0] = 1; }
5815 void Wr64_3() { MEM
[3].u64
[0] = 1; }
5816 void Wr64_4() { MEM
[4].u64
[0] = 1; }
5817 void Wr64_5() { MEM
[5].u64
[0] = 1; }
5818 void Wr64_6() { MEM
[6].u64
[0] = 1; }
5819 void Wr64_7() { MEM
[7].u64
[0] = 1; }
5821 void Wr32_0() { MEM
[0].u32
[0] = 1; }
5822 void Wr32_1() { MEM
[1].u32
[1] = 1; }
5823 void Wr32_2() { MEM
[2].u32
[0] = 1; }
5824 void Wr32_3() { MEM
[3].u32
[1] = 1; }
5825 void Wr32_4() { MEM
[4].u32
[0] = 1; }
5826 void Wr32_5() { MEM
[5].u32
[1] = 1; }
5827 void Wr32_6() { MEM
[6].u32
[0] = 1; }
5828 void Wr32_7() { MEM
[7].u32
[1] = 1; }
5830 void Wr16_0() { MEM
[0].u16
[0] = 1; }
5831 void Wr16_1() { MEM
[1].u16
[1] = 1; }
5832 void Wr16_2() { MEM
[2].u16
[2] = 1; }
5833 void Wr16_3() { MEM
[3].u16
[3] = 1; }
5834 void Wr16_4() { MEM
[4].u16
[0] = 1; }
5835 void Wr16_5() { MEM
[5].u16
[1] = 1; }
5836 void Wr16_6() { MEM
[6].u16
[2] = 1; }
5837 void Wr16_7() { MEM
[7].u16
[3] = 1; }
5839 void Wr8_0() { MEM
[0].u8
[0] = 1; }
5840 void Wr8_1() { MEM
[1].u8
[1] = 1; }
5841 void Wr8_2() { MEM
[2].u8
[2] = 1; }
5842 void Wr8_3() { MEM
[3].u8
[3] = 1; }
5843 void Wr8_4() { MEM
[4].u8
[4] = 1; }
5844 void Wr8_5() { MEM
[5].u8
[5] = 1; }
5845 void Wr8_6() { MEM
[6].u8
[6] = 1; }
5846 void Wr8_7() { MEM
[7].u8
[7] = 1; }
5892 void W00() { WriteAll64(); }
5893 void W01() { WriteAll64(); }
5894 void W02() { WriteAll64(); }
5896 void W10() { WriteAll32(); }
5897 void W11() { WriteAll32(); }
5898 void W12() { WriteAll32(); }
5900 void W20() { WriteAll16(); }
5901 void W21() { WriteAll16(); }
5902 void W22() { WriteAll16(); }
5904 void W30() { WriteAll8(); }
5905 void W31() { WriteAll8(); }
5906 void W32() { WriteAll8(); }
5908 typedef void (*F
)(void);
5910 void TestTwoSizes(F f1
, F f2
) {
5911 // first f1, then f2
5912 ANNOTATE_NEW_MEMORY(&MEM
, sizeof(MEM
));
5913 memset(&MEM
, 0, sizeof(MEM
));
5914 MyThreadArray
t1(f1
, f2
);
5918 ANNOTATE_NEW_MEMORY(&MEM
, sizeof(MEM
));
5919 memset(&MEM
, 0, sizeof(MEM
));
5920 MyThreadArray
t2(f2
, f1
);
5926 printf("test123: positive (different sizes)\n");
5927 TestTwoSizes(W00
, W10
);
5928 // TestTwoSizes(W01, W20);
5929 // TestTwoSizes(W02, W30);
5930 // TestTwoSizes(W11, W21);
5931 // TestTwoSizes(W12, W31);
5932 // TestTwoSizes(W22, W32);
5935 REGISTER_TEST2(Run
, 123, FEATURE
|EXCLUDE_FROM_ALL
)
5936 } // namespace test123
5939 // test124: What happens if we delete an unlocked lock? {{{1
5941 // This test does not worg with pthreads (you can't call
5942 // pthread_mutex_destroy on a locked lock).
5946 Mutex
*a_large_local_array_of_mutexes
;
5947 a_large_local_array_of_mutexes
= new Mutex
[N
];
5948 for (int i
= 0; i
< N
; i
++) {
5949 a_large_local_array_of_mutexes
[i
].Lock();
5951 delete []a_large_local_array_of_mutexes
;
5956 printf("test124: negative\n");
5957 MyThreadArray
t(Worker
, Worker
, Worker
);
5960 printf("\tGLOB=%d\n", GLOB
);
5962 REGISTER_TEST2(Run
, 124, FEATURE
|EXCLUDE_FROM_ALL
)
5963 } // namespace test124
5966 // test125 TN: Backwards lock (annotated). {{{1
5968 // This test uses "Backwards mutex" locking protocol.
5969 // We take a *reader* lock when writing to a per-thread data
5970 // (GLOB[thread_num]) and we take a *writer* lock when we
5971 // are reading from the entire array at once.
5973 // Such locking protocol is not understood by ThreadSanitizer's
5974 // hybrid state machine. So, you either have to use a pure-happens-before
5975 // detector ("tsan --pure-happens-before") or apply pure happens-before mode
5976 // to this particular lock by using ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu).
5978 const int n_threads
= 3;
5980 int GLOB
[n_threads
];
5982 int adder_num
; // updated atomically.
5985 int my_num
= AtomicIncrement(&adder_num
, 1);
5987 ReaderLockScoped
lock(&mu
);
5994 WriterLockScoped
lock(&mu
);
5995 for (int i
= 0; i
< n_threads
; i
++) {
5999 printf("sum=%d\n", sum
);
6003 printf("test125: negative\n");
6005 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu
);
6007 // run Adders, then Aggregator
6009 MyThreadArray
t(Adder
, Adder
, Adder
, Aggregator
);
6014 // Run Aggregator first.
6017 MyThreadArray
t(Aggregator
, Adder
, Adder
, Adder
);
6023 REGISTER_TEST(Run
, 125)
6024 } // namespace test125
6026 // test126 TN: test for BlockingCounter {{{1
6028 BlockingCounter
*blocking_counter
;
6031 CHECK(blocking_counter
);
6033 blocking_counter
->DecrementCount();
6036 printf("test126: negative\n");
6037 MyThreadArray
t(Worker
, Worker
, Worker
);
6038 blocking_counter
= new BlockingCounter(3);
6040 blocking_counter
->Wait();
6043 printf("\tGLOB=%d\n", GLOB
);
6045 REGISTER_TEST(Run
, 126)
6046 } // namespace test126
6049 // test127. Bad code: unlocking a mutex locked by another thread. {{{1
6060 printf("test127: unlocking a mutex locked by another thread.\n");
6061 MyThreadArray
t(Thread1
, Thread2
);
6065 REGISTER_TEST(Run
, 127)
6066 } // namespace test127
6068 // test128. Suppressed code in concurrent accesses {{{1
6069 // Please use --suppressions=unittest.supp flag when running this test.
6079 void ThisFunctionShouldBeSuppressed() {
6083 printf("test128: Suppressed code in concurrent accesses.\n");
6084 MyThreadArray
t(Worker
, ThisFunctionShouldBeSuppressed
);
6088 REGISTER_TEST2(Run
, 128, FEATURE
| EXCLUDE_FROM_ALL
)
6089 } // namespace test128
6091 // test129: TN. Synchronization via ReaderLockWhen(). {{{1
6095 bool WeirdCondition(int* param
) {
6096 *param
= GLOB
; // a write into Waiter's memory
6101 MU
.ReaderLockWhen(Condition(WeirdCondition
, ¶m
));
6107 usleep(100000); // Make sure the waiter blocks.
6110 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
6113 printf("test129: Synchronization via ReaderLockWhen()\n");
6114 MyThread
mt(Waiter
, NULL
, "Waiter Thread");
6118 printf("\tGLOB=%d\n", GLOB
);
6120 REGISTER_TEST2(Run
, 129, FEATURE
);
6121 } // namespace test129
6123 // test130: TN. Per-thread. {{{1
6126 // This test verifies that the race detector handles
6127 // thread-local storage (TLS) correctly.
6128 // As of 09-03-30 ThreadSanitizer has a bug:
6130 // - Thread1 touches per_thread_global
6132 // - Thread2 starts (and there is no happens-before relation between it and
6134 // - Thread2 touches per_thread_global
6135 // It may happen so that Thread2 will have per_thread_global in the same address
6136 // as Thread1. Since there is no happens-before relation between threads,
6137 // ThreadSanitizer reports a race.
6139 // test131 does the same for stack.
6141 static __thread
int per_thread_global
[10] = {0};
6143 void RealWorker() { // Touch per_thread_global.
6144 per_thread_global
[1]++;
6148 void Worker() { // Spawn few threads that touch per_thread_global.
6149 MyThreadArray
t(RealWorker
, RealWorker
);
6153 void Worker0() { sleep(0); Worker(); }
6154 void Worker1() { sleep(1); Worker(); }
6155 void Worker2() { sleep(2); Worker(); }
6156 void Worker3() { sleep(3); Worker(); }
6159 printf("test130: Per-thread\n");
6160 MyThreadArray
t1(Worker0
, Worker1
, Worker2
, Worker3
);
6163 printf("\tper_thread_global=%d\n", per_thread_global
[1]);
6165 REGISTER_TEST(Run
, 130)
6167 } // namespace test130
6170 // test131: TN. Stack. {{{1
6172 // Same as test130, but for stack.
6174 void RealWorker() { // Touch stack.
6179 void Worker() { // Spawn few threads that touch stack.
6180 MyThreadArray
t(RealWorker
, RealWorker
);
6184 void Worker0() { sleep(0); Worker(); }
6185 void Worker1() { sleep(1); Worker(); }
6186 void Worker2() { sleep(2); Worker(); }
6187 void Worker3() { sleep(3); Worker(); }
6190 printf("test131: stack\n");
6191 MyThreadArray
t(Worker0
, Worker1
, Worker2
, Worker3
);
6195 REGISTER_TEST(Run
, 131)
6196 } // namespace test131
6199 // test132: TP. Simple race (write vs write). Works in fast-mode. {{{1
6202 void Worker() { GLOB
= 1; }
6205 FAST_MODE_INIT(&GLOB
);
6206 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test132");
6207 printf("test132: positive; &GLOB=%p\n", &GLOB
);
6208 ANNOTATE_TRACE_MEMORY(&GLOB
);
6210 MyThreadArray
t(Worker
, Worker
);
6218 REGISTER_TEST(Run
, 132);
6219 } // namespace test132
6222 // test133: TP. Simple race (write vs write). Works in fast mode. {{{1
6224 // Same as test132, but everything is run from a separate thread spawned from
6227 void Worker() { GLOB
= 1; }
6230 FAST_MODE_INIT(&GLOB
);
6231 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test133");
6232 printf("test133: positive; &GLOB=%p\n", &GLOB
);
6233 ANNOTATE_TRACE_MEMORY(&GLOB
);
6235 MyThreadArray
t(Worker
, Worker
);
6244 REGISTER_TEST(Run
, 133);
6245 } // namespace test133
6248 // test134 TN. Swap. Variant of test79. {{{1
6251 typedef __gnu_cxx::hash_map
<int, int> map_t
;
6253 typedef std::map
<int, int> map_t
;
6257 // Here we use swap to pass map between threads.
6258 // The synchronization is correct, but w/o the annotation
6259 // any hybrid detector will complain.
6261 // Swap is very unfriendly to the lock-set (and hybrid) race detectors.
6262 // Since tmp is destructed outside the mutex, we need to have a happens-before
6263 // arc between any prior access to map and here.
6264 // Since the internals of tmp are created ouside the mutex and are passed to
6265 // other thread, we need to have a h-b arc between here and any future access.
6266 // These arcs can be created by HAPPENS_{BEFORE,AFTER} annotations, but it is
6267 // much simpler to apply pure-happens-before mode to the mutex mu.
6270 MutexLock
lock(&mu
);
6271 ANNOTATE_HAPPENS_AFTER(&map
);
6272 // We swap the new empty map 'tmp' with 'map'.
6274 ANNOTATE_HAPPENS_BEFORE(&map
);
6275 // tmp (which is the old version of map) is destroyed here.
6279 MutexLock
lock(&mu
);
6280 ANNOTATE_HAPPENS_AFTER(&map
);
6282 ANNOTATE_HAPPENS_BEFORE(&map
);
6286 printf("test134: negative (swap)\n");
6287 // ********************** Shorter way: ***********************
6288 // ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu);
6289 MyThreadArray
t(Worker
, Worker
, Swapper
, Worker
, Worker
);
6293 REGISTER_TEST(Run
, 134)
6294 } // namespace test134
6296 // test135 TN. Swap. Variant of test79. {{{1
6300 const long SIZE
= 65536;
6301 for (int i
= 0; i
< 32; i
++) {
6302 int *ptr
= (int*)mmap(NULL
, SIZE
, PROT_READ
| PROT_WRITE
,
6303 MAP_PRIVATE
| MAP_ANON
, -1, 0);
6310 MyThreadArray
t(SubWorker
, SubWorker
, SubWorker
, SubWorker
);
6316 printf("test135: negative (mmap)\n");
6317 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
6321 REGISTER_TEST(Run
, 135)
6322 } // namespace test135
6324 // test136. Unlock twice. {{{1
6327 printf("test136: unlock twice\n");
6328 pthread_mutexattr_t attr
;
6329 CHECK(0 == pthread_mutexattr_init(&attr
));
6330 CHECK(0 == pthread_mutexattr_settype(&attr
, PTHREAD_MUTEX_ERRORCHECK
));
6333 CHECK(0 == pthread_mutex_init(&mu
, &attr
));
6334 CHECK(0 == pthread_mutex_lock(&mu
));
6335 CHECK(0 == pthread_mutex_unlock(&mu
));
6336 int ret_unlock
= pthread_mutex_unlock(&mu
); // unlocking twice.
6337 int ret_destroy
= pthread_mutex_destroy(&mu
);
6338 printf(" pthread_mutex_unlock returned %d\n", ret_unlock
);
6339 printf(" pthread_mutex_destroy returned %d\n", ret_destroy
);
6343 REGISTER_TEST(Run
, 136)
6344 } // namespace test136
6346 // test137 TP. Races on stack variables. {{{1
6349 ProducerConsumerQueue
q(10);
6353 int *tmp
= (int*)q
.Get();
6355 int *racey
= &stack
;
6359 // We may miss the races if we sleep less due to die_memory events...
6364 printf("test137: TP. Races on stack variables.\n");
6366 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
6372 REGISTER_TEST2(Run
, 137, FEATURE
| EXCLUDE_FROM_ALL
)
6373 } // namespace test137
6375 // test138 FN. Two closures hit the same thread in ThreadPool. {{{1
6385 FAST_MODE_INIT(&GLOB
);
6386 printf("test138: FN. Two closures hit the same thread in ThreadPool.\n");
6388 // When using thread pools, two concurrent callbacks might be scheduled
6389 // onto the same executor thread. As a result, unnecessary happens-before
6390 // relation may be introduced between callbacks.
6391 // If we set the number of executor threads to 1, any known data
6392 // race detector will be silent. However, the same situation may happen
6393 // with any number of executor threads (with some probability).
6396 tp
.Add(NewCallback(Worker
));
6397 tp
.Add(NewCallback(Worker
));
6400 REGISTER_TEST2(Run
, 138, FEATURE
)
6401 } // namespace test138
6403 // test139: FN. A true race hidden by reference counting annotation. {{{1
6406 RefCountedClass
*obj
;
6409 GLOB
++; // First access.
6416 GLOB
++; // Second access.
6420 FAST_MODE_INIT(&GLOB
);
6421 printf("test139: FN. A true race hidden by reference counting annotation.\n");
6423 obj
= new RefCountedClass
;
6424 obj
->AnnotateUnref();
6427 MyThreadArray
mt(Worker1
, Worker2
);
6432 REGISTER_TEST2(Run
, 139, FEATURE
)
6433 } // namespace test139
6435 // test140 TN. Swap. Variant of test79 and test134. {{{1
6438 typedef __gnu_cxx::hash_map
<int, int> Container
;
6440 typedef std::map
<int,int> Container
;
6443 static Container container
;
6445 // Here we use swap to pass a Container between threads.
6446 // The synchronization is correct, but w/o the annotation
6447 // any hybrid detector will complain.
6449 // Unlike the test134, we try to have a minimal set of annotations
6450 // so that extra h-b arcs do not hide other races.
6452 // Swap is very unfriendly to the lock-set (and hybrid) race detectors.
6453 // Since tmp is destructed outside the mutex, we need to have a happens-before
6454 // arc between any prior access to map and here.
6455 // Since the internals of tmp are created ouside the mutex and are passed to
6456 // other thread, we need to have a h-b arc between here and any future access.
6458 // We want to be able to annotate swapper so that we don't need to annotate
6462 tmp
[1] = tmp
[2] = tmp
[3] = 0;
6464 MutexLock
lock(&mu
);
6465 container
.swap(tmp
);
6466 // we are unpublishing the old container.
6467 ANNOTATE_UNPUBLISH_MEMORY_RANGE(&container
, sizeof(container
));
6468 // we are publishing the new container.
6469 ANNOTATE_PUBLISH_MEMORY_RANGE(&container
, sizeof(container
));
6473 // tmp (which is the old version of container) is destroyed here.
6477 MutexLock
lock(&mu
);
6479 int *v
= &container
[2];
6480 for (int i
= 0; i
< 10; i
++) {
6481 // if uncommented, this will break ANNOTATE_UNPUBLISH_MEMORY_RANGE():
6482 // ANNOTATE_HAPPENS_BEFORE(v);
6490 printf("test140: negative (swap) %p\n", &container
);
6491 MyThreadArray
t(Worker
, Worker
, Swapper
, Worker
, Worker
);
6495 REGISTER_TEST(Run
, 140)
6496 } // namespace test140
6498 // test141 FP. unlink/fopen, rmdir/opendir. {{{1
6502 char *dir_name
= NULL
,
6508 // unlink deletes a file 'filename'
6509 // which exits spin-loop in Waiter1().
6510 printf(" Deleting file...\n");
6511 CHECK(unlink(filename
) == 0);
6516 while ((tmp
= fopen(filename
, "r")) != NULL
) {
6520 printf(" ...file has been deleted\n");
6527 // rmdir deletes a directory 'dir_name'
6528 // which exit spin-loop in Waker().
6529 printf(" Deleting directory...\n");
6530 CHECK(rmdir(dir_name
) == 0);
6535 while ((tmp
= opendir(dir_name
)) != NULL
) {
6539 printf(" ...directory has been deleted\n");
6544 FAST_MODE_INIT(&GLOB1
);
6545 FAST_MODE_INIT(&GLOB2
);
6546 printf("test141: FP. unlink/fopen, rmdir/opendir.\n");
6548 dir_name
= strdup("/tmp/tsan-XXXXXX");
6549 IGNORE_RETURN_VALUE(mkdtemp(dir_name
));
6551 filename
= strdup((std::string() + dir_name
+ "/XXXXXX").c_str());
6552 const int fd
= mkstemp(filename
);
6556 MyThreadArray
mta1(Waker1
, Waiter1
);
6560 MyThreadArray
mta2(Waker2
, Waiter2
);
6568 REGISTER_TEST(Run
, 141)
6569 } // namespace test141
6572 // Simple FIFO queue annotated with PCQ annotations. {{{1
6573 class FifoMessageQueue
{
6575 FifoMessageQueue() { ANNOTATE_PCQ_CREATE(this); }
6576 ~FifoMessageQueue() { ANNOTATE_PCQ_DESTROY(this); }
6577 // Send a message. 'message' should be positive.
6578 void Put(int message
) {
6580 MutexLock
lock(&mu_
);
6581 ANNOTATE_PCQ_PUT(this);
6584 // Return the message from the queue and pop it
6585 // or return 0 if there are no messages.
6587 MutexLock
lock(&mu_
);
6588 if (q_
.empty()) return 0;
6589 int res
= q_
.front();
6591 ANNOTATE_PCQ_GET(this);
6600 // test142: TN. Check PCQ_* annotations. {{{1
6602 // Putter writes to array[i] and sends a message 'i'.
6603 // Getters receive messages and read array[message].
6604 // PCQ_* annotations calm down the hybrid detectors.
6612 for (int i
= 1; i
<= N
; i
++) {
6620 int non_zero_received
= 0;
6621 for (int i
= 1; i
<= N
; i
++) {
6624 CHECK(array
[res
] = res
* res
);
6625 non_zero_received
++;
6629 printf("T=%zd: non_zero_received=%d\n",
6630 (size_t)pthread_self(), non_zero_received
);
6634 printf("test142: tests PCQ annotations\n");
6635 MyThreadArray
t(Putter
, Getter
, Getter
);
6639 REGISTER_TEST(Run
, 142)
6640 } // namespace test142
6643 // test143: TP. Check PCQ_* annotations. {{{1
6646 // We have a race on GLOB between Putter and one of the Getters.
6647 // Pure h-b will not see it.
6648 // If FifoMessageQueue was annotated using HAPPENS_BEFORE/AFTER, the race would
6650 // PCQ_* annotations do not hide this race.
6663 CHECK(GLOB
== 1); // Race here
6668 if (!Tsan_PureHappensBefore()) {
6669 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "true races");
6671 printf("test143: tests PCQ annotations (true positive)\n");
6672 MyThreadArray
t(Putter
, Getter
, Getter
);
6676 REGISTER_TEST(Run
, 143);
6677 } // namespace test143
6687 REGISTER_TEST2(Run
, 300, RACE_DEMO
)
6688 } // namespace test300
6690 // test301: Simple race. {{{1
6692 Mutex mu1
; // This Mutex guards var.
6693 Mutex mu2
; // This Mutex is not related to var.
6694 int var
; // GUARDED_BY(mu1)
6696 void Thread1() { // Runs in thread named 'test-thread-1'.
6697 MutexLock
lock(&mu1
); // Correct Mutex.
6701 void Thread2() { // Runs in thread named 'test-thread-2'.
6702 MutexLock
lock(&mu2
); // Wrong Mutex.
6708 printf("test301: simple race.\n");
6709 MyThread
t1(Thread1
, NULL
, "test-thread-1");
6710 MyThread
t2(Thread2
, NULL
, "test-thread-2");
6716 REGISTER_TEST2(Run
, 301, RACE_DEMO
)
6717 } // namespace test301
6719 // test302: Complex race which happens at least twice. {{{1
6721 // In this test we have many different accesses to GLOB and only one access
6722 // is not synchronized properly.
6728 for(int i
= 0; i
< 100; i
++) {
6731 // This read is protected correctly.
6732 MU1
.Lock(); CHECK(GLOB
>= 0); MU1
.Unlock();
6735 // Here we used the wrong lock! The reason of the race is here.
6736 MU2
.Lock(); CHECK(GLOB
>= 0); MU2
.Unlock();
6739 // This read is protected correctly.
6740 MU1
.Lock(); CHECK(GLOB
>= 0); MU1
.Unlock();
6743 // This write is protected correctly.
6744 MU1
.Lock(); GLOB
++; MU1
.Unlock();
6747 // sleep a bit so that the threads interleave
6748 // and the race happens at least twice.
6754 printf("test302: Complex race that happens twice.\n");
6755 MyThread
t1(Worker
), t2(Worker
);
6758 t1
.Join(); t2
.Join();
6760 REGISTER_TEST2(Run
, 302, RACE_DEMO
)
6761 } // namespace test302
6764 // test303: Need to trace the memory to understand the report. {{{1
6769 void Worker1() { CHECK(GLOB
>= 0); }
6770 void Worker2() { MU
.Lock(); GLOB
=1; MU
.Unlock();}
6773 printf("test303: a race that needs annotations.\n");
6774 ANNOTATE_TRACE_MEMORY(&GLOB
);
6775 MyThreadArray
t(Worker1
, Worker2
);
6779 REGISTER_TEST2(Run
, 303, RACE_DEMO
)
6780 } // namespace test303
6784 // test304: Can not trace the memory, since it is a library object. {{{1
6791 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6792 MU
.Lock(); CHECK(STR
->length() >= 4); MU
.Unlock();
6796 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6797 CHECK(STR
->length() >= 4); // Unprotected!
6801 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6802 MU
.Lock(); CHECK(STR
->length() >= 4); MU
.Unlock();
6806 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6807 MU
.Lock(); *STR
+= " + a very very long string"; MU
.Unlock();
6811 STR
= new string ("The String");
6812 printf("test304: a race where memory tracing does not work.\n");
6813 MyThreadArray
t(Worker1
, Worker2
, Worker3
, Worker4
);
6817 printf("%s\n", STR
->c_str());
6820 REGISTER_TEST2(Run
, 304, RACE_DEMO
)
6821 } // namespace test304
6825 // test305: A bit more tricky: two locks used inconsistenly. {{{1
6829 // In this test GLOB is protected by MU1 and MU2, but inconsistently.
6830 // The TRACES observed by helgrind are:
6831 // TRACE[1]: Access{T2/S2 wr} -> new State{Mod; #LS=2; #SS=1; T2/S2}
6832 // TRACE[2]: Access{T4/S9 wr} -> new State{Mod; #LS=1; #SS=2; T2/S2, T4/S9}
6833 // TRACE[3]: Access{T5/S13 wr} -> new State{Mod; #LS=1; #SS=3; T2/S2, T4/S9, T5/S13}
6834 // TRACE[4]: Access{T6/S19 wr} -> new State{Mod; #LS=0; #SS=4; T2/S2, T4/S9, T5/S13, T6/S19}
6836 // The guilty access is either Worker2() or Worker4(), depending on
6837 // which mutex is supposed to protect GLOB.
6840 void Worker1() { MU1
.Lock(); MU2
.Lock(); GLOB
=1; MU2
.Unlock(); MU1
.Unlock(); }
6841 void Worker2() { MU1
.Lock(); GLOB
=2; MU1
.Unlock(); }
6842 void Worker3() { MU1
.Lock(); MU2
.Lock(); GLOB
=3; MU2
.Unlock(); MU1
.Unlock(); }
6843 void Worker4() { MU2
.Lock(); GLOB
=4; MU2
.Unlock(); }
6846 ANNOTATE_TRACE_MEMORY(&GLOB
);
6847 printf("test305: simple race.\n");
6848 MyThread
t1(Worker1
), t2(Worker2
), t3(Worker3
), t4(Worker4
);
6849 t1
.Start(); usleep(100);
6850 t2
.Start(); usleep(100);
6851 t3
.Start(); usleep(100);
6852 t4
.Start(); usleep(100);
6853 t1
.Join(); t2
.Join(); t3
.Join(); t4
.Join();
6855 REGISTER_TEST2(Run
, 305, RACE_DEMO
)
6856 } // namespace test305
6858 // test306: Two locks are used to protect a var. {{{1
6861 // Thread1 and Thread2 access the var under two locks.
6862 // Thread3 uses no locks.
6866 void Worker1() { MU1
.Lock(); MU2
.Lock(); GLOB
=1; MU2
.Unlock(); MU1
.Unlock(); }
6867 void Worker2() { MU1
.Lock(); MU2
.Lock(); GLOB
=3; MU2
.Unlock(); MU1
.Unlock(); }
6868 void Worker3() { GLOB
=4; }
6871 ANNOTATE_TRACE_MEMORY(&GLOB
);
6872 printf("test306: simple race.\n");
6873 MyThread
t1(Worker1
), t2(Worker2
), t3(Worker3
);
6874 t1
.Start(); usleep(100);
6875 t2
.Start(); usleep(100);
6876 t3
.Start(); usleep(100);
6877 t1
.Join(); t2
.Join(); t3
.Join();
6879 REGISTER_TEST2(Run
, 306, RACE_DEMO
)
6880 } // namespace test306
6882 // test307: Simple race, code with control flow {{{1
6885 volatile /*to fake the compiler*/ bool some_condition
= true;
6890 int FunctionWithControlFlow() {
6891 int unrelated_stuff
= 0;
6893 SomeFunc(); // "--keep-history=1" will point somewhere here.
6894 if (some_condition
) { // Or here
6895 if (some_condition
) {
6896 unrelated_stuff
++; // Or here.
6898 (*GLOB
)++; // "--keep-history=2" will point here (experimental).
6902 return unrelated_stuff
;
6905 void Worker1() { FunctionWithControlFlow(); }
6906 void Worker2() { Worker1(); }
6907 void Worker3() { Worker2(); }
6908 void Worker4() { Worker3(); }
6913 printf("test307: simple race, code with control flow\n");
6914 MyThreadArray
t1(Worker1
, Worker2
, Worker3
, Worker4
);
6918 REGISTER_TEST2(Run
, 307, RACE_DEMO
)
6919 } // namespace test307
6921 // test308: Example of double-checked-locking {{{1
6927 static int is_inited
= 0;
6945 CHECK(foo
&& foo
->a
== 42);
6948 void Worker1() { UseMe(); }
6949 void Worker2() { UseMe(); }
6950 void Worker3() { UseMe(); }
6954 ANNOTATE_TRACE_MEMORY(&is_inited
);
6955 printf("test308: Example of double-checked-locking\n");
6956 MyThreadArray
t1(Worker1
, Worker2
, Worker3
);
6960 REGISTER_TEST2(Run
, 308, RACE_DEMO
)
6961 } // namespace test308
6963 // test309: Simple race on an STL object. {{{1
6972 GLOB
="Booooooooooo";
6976 printf("test309: simple race on an STL object.\n");
6977 MyThread
t1(Worker1
), t2(Worker2
);
6980 t1
.Join(); t2
.Join();
6982 REGISTER_TEST2(Run
, 309, RACE_DEMO
)
6983 } // namespace test309
6985 // test310: One more simple race. {{{1
6987 int *PTR
= NULL
; // GUARDED_BY(mu1)
6989 Mutex mu1
; // Protects PTR.
6990 Mutex mu2
; // Unrelated to PTR.
6991 Mutex mu3
; // Unrelated to PTR.
6994 MutexLock
lock3(&mu3
); // This lock is unrelated to PTR.
6995 MutexLock
lock1(&mu1
); // Protect PTR.
7000 MutexLock
lock2(&mu2
); // This lock is unrelated to PTR.
7001 MutexLock
lock1(&mu1
); // Protect PTR.
7002 int some_unrelated_stuff
= 0;
7003 if (some_unrelated_stuff
== 0)
7004 some_unrelated_stuff
++;
7010 MutexLock
lock2(&mu2
); // Oh, gosh, this is a wrong mutex!
7014 // Some functions to make the stack trace non-trivial.
7015 void DoWrite1() { Writer1(); }
7016 void Thread1() { DoWrite1(); }
7018 void DoWrite2() { Writer2(); }
7019 void Thread2() { DoWrite2(); }
7021 void DoRead() { Reader(); }
7022 void Thread3() { DoRead(); }
7025 printf("test310: simple race.\n");
7027 ANNOTATE_TRACE_MEMORY(PTR
);
7029 MyThread
t1(Thread1
, NULL
, "writer1"),
7030 t2(Thread2
, NULL
, "writer2"),
7031 t3(Thread3
, NULL
, "buggy reader");
7034 usleep(100000); // Let the writers go first.
7041 REGISTER_TEST2(Run
, 310, RACE_DEMO
)
7042 } // namespace test310
7044 // test311: Yet another simple race. {{{1
7046 int *PTR
= NULL
; // GUARDED_BY(mu1)
7048 Mutex mu1
; // Protects PTR.
7049 Mutex mu2
; // Unrelated to PTR.
7050 Mutex mu3
; // Unrelated to PTR.
7052 void GoodWriter1() {
7053 MutexLock
lock3(&mu3
); // This lock is unrelated to PTR.
7054 MutexLock
lock1(&mu1
); // Protect PTR.
7058 void GoodWriter2() {
7059 MutexLock
lock2(&mu2
); // This lock is unrelated to PTR.
7060 MutexLock
lock1(&mu1
); // Protect PTR.
7065 MutexLock
lock1(&mu1
); // Protect PTR.
7069 void BuggyWriter() {
7070 MutexLock
lock2(&mu2
); // Wrong mutex!
7074 // Some functions to make the stack trace non-trivial.
7075 void DoWrite1() { GoodWriter1(); }
7076 void Thread1() { DoWrite1(); }
7078 void DoWrite2() { GoodWriter2(); }
7079 void Thread2() { DoWrite2(); }
7081 void DoGoodRead() { GoodReader(); }
7082 void Thread3() { DoGoodRead(); }
7084 void DoBadWrite() { BuggyWriter(); }
7085 void Thread4() { DoBadWrite(); }
7088 printf("test311: simple race.\n");
7090 ANNOTATE_TRACE_MEMORY(PTR
);
7092 MyThread
t1(Thread1
, NULL
, "good writer1"),
7093 t2(Thread2
, NULL
, "good writer2"),
7094 t3(Thread3
, NULL
, "good reader"),
7095 t4(Thread4
, NULL
, "buggy writer");
7098 // t2 goes after t3. This way a pure happens-before detector has no chance.
7101 usleep(100000); // Let the good folks go first.
7109 REGISTER_TEST2(Run
, 311, RACE_DEMO
)
7110 } // namespace test311
7112 // test312: A test with a very deep stack. {{{1
7115 void RaceyWrite() { GLOB
++; }
7116 void Func1() { RaceyWrite(); }
7117 void Func2() { Func1(); }
7118 void Func3() { Func2(); }
7119 void Func4() { Func3(); }
7120 void Func5() { Func4(); }
7121 void Func6() { Func5(); }
7122 void Func7() { Func6(); }
7123 void Func8() { Func7(); }
7124 void Func9() { Func8(); }
7125 void Func10() { Func9(); }
7126 void Func11() { Func10(); }
7127 void Func12() { Func11(); }
7128 void Func13() { Func12(); }
7129 void Func14() { Func13(); }
7130 void Func15() { Func14(); }
7131 void Func16() { Func15(); }
7132 void Func17() { Func16(); }
7133 void Func18() { Func17(); }
7134 void Func19() { Func18(); }
7135 void Worker() { Func19(); }
7137 printf("test312: simple race with deep stack.\n");
7138 MyThreadArray
t(Worker
, Worker
, Worker
);
7142 REGISTER_TEST2(Run
, 312, RACE_DEMO
)
7143 } // namespace test312
7145 // test313 TP: test for thread graph output {{{1
7147 BlockingCounter
*blocking_counter
;
7150 // Worker(N) will do 2^N increments of GLOB, each increment in a separate thread
7151 void Worker(long depth
) {
7155 pool
.StartWorkers();
7156 pool
.Add(NewCallback(Worker
, depth
-1));
7157 pool
.Add(NewCallback(Worker
, depth
-1));
7159 GLOB
++; // Race here
7163 printf("test313: positive\n");
7165 printf("\tGLOB=%d\n", GLOB
);
7167 REGISTER_TEST2(Run
, 313, RACE_DEMO
)
7168 } // namespace test313
7172 // test400: Demo of a simple false positive. {{{1
7175 static vector
<int> *vec
; // GUARDED_BY(mu);
7177 void InitAllBeforeStartingThreads() {
7178 vec
= new vector
<int>;
7184 MutexLock
lock(&mu
);
7189 MutexLock
lock(&mu
);
7193 //---- Sub-optimal code ---------
7194 size_t NumberOfElementsLeft() {
7195 MutexLock
lock(&mu
);
7199 void WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly() {
7200 while(NumberOfElementsLeft()) {
7201 ; // sleep or print or do nothing.
7203 // It is now safe to access vec w/o lock.
7204 // But a hybrid detector (like ThreadSanitizer) can't see it.
7206 // 1. Use pure happens-before detector (e.g. "tsan --pure-happens-before")
7207 // 2. Call ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu)
7208 // in InitAllBeforeStartingThreads()
7209 // 3. (preferred) Use WaitForAllThreadsToFinish_Good() (see below).
7210 CHECK(vec
->empty());
7214 //----- Better code -----------
7216 bool NoElementsLeft(vector
<int> *v
) {
7220 void WaitForAllThreadsToFinish_Good() {
7221 mu
.LockWhen(Condition(NoElementsLeft
, vec
));
7224 // It is now safe to access vec w/o lock.
7225 CHECK(vec
->empty());
7231 MyThreadArray
t(Thread1
, Thread2
);
7232 InitAllBeforeStartingThreads();
7234 WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly();
7235 // WaitForAllThreadsToFinish_Good();
7238 REGISTER_TEST2(Run
, 400, RACE_DEMO
)
7239 } // namespace test400
7241 // test401: Demo of false positive caused by reference counting. {{{1
7243 // A simplified example of reference counting.
7244 // DecRef() does ref count increment in a way unfriendly to race detectors.
7245 // DecRefAnnotated() does the same in a friendly way.
7247 static vector
<int> *vec
;
7248 static int ref_count
;
7250 void InitAllBeforeStartingThreads(int number_of_threads
) {
7251 vec
= new vector
<int>;
7253 ref_count
= number_of_threads
;
7256 // Correct, but unfriendly to race detectors.
7258 return AtomicIncrement(&ref_count
, -1);
7261 // Correct and friendly to race detectors.
7262 int DecRefAnnotated() {
7263 ANNOTATE_CONDVAR_SIGNAL(&ref_count
);
7264 int res
= AtomicIncrement(&ref_count
, -1);
7266 ANNOTATE_CONDVAR_WAIT(&ref_count
);
7271 void ThreadWorker() {
7272 CHECK(ref_count
> 0);
7273 CHECK(vec
->size() == 1);
7274 if (DecRef() == 0) { // Use DecRefAnnotated() instead!
7275 // No one uses vec now ==> delete it.
7276 delete vec
; // A false race may be reported here.
7282 MyThreadArray
t(ThreadWorker
, ThreadWorker
, ThreadWorker
);
7283 InitAllBeforeStartingThreads(3 /*number of threads*/);
7288 REGISTER_TEST2(Run
, 401, RACE_DEMO
)
7289 } // namespace test401
7291 // test501: Manually call PRINT_* annotations {{{1
7295 Mutex muCounter
, muGlob
[65];
7299 int myId
= ++COUNTER
;
7304 muGlob
[myId
].Lock();
7308 muGlob
[myId
].Unlock();
7312 MyThreadArray
ta (Worker
, Worker
, Worker
, Worker
);
7319 MyThreadArray
ta (Worker_1
, Worker_1
, Worker_1
, Worker_1
);
7326 ANNOTATE_RESET_STATS();
7327 printf("test501: Manually call PRINT_* annotations.\n");
7328 MyThreadArray
ta (Worker_2
, Worker_2
, Worker_2
, Worker_2
);
7332 ANNOTATE_PRINT_MEMORY_USAGE(0);
7333 ANNOTATE_PRINT_STATS();
7336 REGISTER_TEST2(Run
, 501, FEATURE
| EXCLUDE_FROM_ALL
)
7337 } // namespace test501
7339 // test502: produce lots of segments without cross-thread relations {{{1
7343 * This test produces ~1Gb of memory usage when run with the following options:
7346 * --trace-after-race=0
7355 for (int i
= 0; i
< 750000; i
++) {
7363 MyThreadArray
t(TP
, TP
);
7364 printf("test502: produce lots of segments without cross-thread relations\n");
7370 REGISTER_TEST2(Run
, 502, MEMORY_USAGE
| PRINT_STATS
| EXCLUDE_FROM_ALL
7372 } // namespace test502
7374 // test503: produce lots of segments with simple HB-relations {{{1
7375 // HB cache-miss rate is ~55%
7394 const int N_threads
= 32;
7395 const int ARRAY_SIZE
= 128;
7396 int GLOB
[ARRAY_SIZE
];
7397 ProducerConsumerQueue
*Q
[N_threads
];
7398 int GLOB_limit
= 100000;
7402 int myId
= AtomicIncrement(&count
, 1);
7404 ProducerConsumerQueue
&myQ
= *Q
[myId
], &nextQ
= *Q
[(myId
+1) % N_threads
];
7406 // this code produces a new SS with each new segment
7407 while (myQ
.Get() != NULL
) {
7408 for (int i
= 0; i
< ARRAY_SIZE
; i
++)
7411 if (myId
== 0 && GLOB
[0] > GLOB_limit
) {
7413 for (int i
= 0; i
< N_threads
; i
++)
7421 printf("test503: produce lots of segments with simple HB-relations\n");
7422 for (int i
= 0; i
< N_threads
; i
++)
7423 Q
[i
] = new ProducerConsumerQueue(1);
7427 ThreadPool
pool(N_threads
);
7428 pool
.StartWorkers();
7429 for (int i
= 0; i
< N_threads
; i
++) {
7430 pool
.Add(NewCallback(Worker
));
7432 } // all folks are joined here.
7434 for (int i
= 0; i
< N_threads
; i
++)
7438 REGISTER_TEST2(Run
, 503, MEMORY_USAGE
| PRINT_STATS
7439 | PERFORMANCE
| EXCLUDE_FROM_ALL
)
7440 } // namespace test503
7442 // test504: force massive cache fetch-wback (50% misses, mostly CacheLineZ) {{{1
7445 const int N_THREADS
= 2,
7446 HG_CACHELINE_COUNT
= 1 << 16,
7447 HG_CACHELINE_SIZE
= 1 << 6,
7448 HG_CACHE_SIZE
= HG_CACHELINE_COUNT
* HG_CACHELINE_SIZE
;
7450 // int gives us ~4x speed of the byte test
7451 // 4x array size gives us
7452 // total multiplier of 16x over the cachesize
7453 // so we can neglect the cached-at-the-end memory
7454 const int ARRAY_SIZE
= 4 * HG_CACHE_SIZE
,
7456 int array
[ARRAY_SIZE
];
7466 // all threads write to different memory locations,
7467 // so no synchronization mechanisms are needed
7468 int lower_bound
= ARRAY_SIZE
* (myId
-1) / N_THREADS
,
7469 upper_bound
= ARRAY_SIZE
* ( myId
) / N_THREADS
;
7470 for (int j
= 0; j
< ITERATIONS
; j
++)
7471 for (int i
= lower_bound
; i
< upper_bound
;
7472 i
+= HG_CACHELINE_SIZE
/ sizeof(array
[0])) {
7473 array
[i
] = i
; // each array-write generates a cache miss
7478 printf("test504: force massive CacheLineZ fetch-wback\n");
7479 MyThreadArray
t(Worker
, Worker
);
7484 REGISTER_TEST2(Run
, 504, PERFORMANCE
| PRINT_STATS
| EXCLUDE_FROM_ALL
)
7485 } // namespace test504
7487 // test505: force massive cache fetch-wback (60% misses) {{{1
7488 // modification of test504 - more threads, byte accesses and lots of mutexes
7489 // so it produces lots of CacheLineF misses (30-50% of CacheLineZ misses)
7492 const int N_THREADS
= 2,
7493 HG_CACHELINE_COUNT
= 1 << 16,
7494 HG_CACHELINE_SIZE
= 1 << 6,
7495 HG_CACHE_SIZE
= HG_CACHELINE_COUNT
* HG_CACHELINE_SIZE
;
7497 const int ARRAY_SIZE
= 4 * HG_CACHE_SIZE
,
7499 int64_t array
[ARRAY_SIZE
];
7505 const int N_MUTEXES
= 5;
7506 Mutex mu
[N_MUTEXES
];
7511 // all threads write to different memory locations,
7512 // so no synchronization mechanisms are needed
7513 int lower_bound
= ARRAY_SIZE
* (myId
-1) / N_THREADS
,
7514 upper_bound
= ARRAY_SIZE
* ( myId
) / N_THREADS
;
7515 for (int j
= 0; j
< ITERATIONS
; j
++)
7516 for (int mutex_id
= 0; mutex_id
< N_MUTEXES
; mutex_id
++) {
7517 Mutex
*m
= & mu
[mutex_id
];
7519 for (int i
= lower_bound
+ mutex_id
, cnt
= 0;
7521 i
+= HG_CACHELINE_SIZE
/ sizeof(array
[0]), cnt
++) {
7522 array
[i
] = i
; // each array-write generates a cache miss
7529 printf("test505: force massive CacheLineF fetch-wback\n");
7530 MyThreadArray
t(Worker
, Worker
);
7535 REGISTER_TEST2(Run
, 505, PERFORMANCE
| PRINT_STATS
| EXCLUDE_FROM_ALL
)
7536 } // namespace test505
7538 // test506: massive HB's using Barriers {{{1
7539 // HB cache miss is ~40%
7540 // segments consume 10x more memory than SSs
7541 // modification of test39
7544 // Same as test17 but uses Barrier class (pthread_barrier_t).
7546 const int N_threads
= 64,
7548 Barrier
*barrier
[ITERATIONS
];
7552 for (int i
= 0; i
< ITERATIONS
; i
++) {
7556 barrier
[i
]->Block();
7560 printf("test506: massive HB's using Barriers\n");
7561 for (int i
= 0; i
< ITERATIONS
; i
++) {
7562 barrier
[i
] = new Barrier(N_threads
);
7565 ThreadPool
pool(N_threads
);
7566 pool
.StartWorkers();
7567 for (int i
= 0; i
< N_threads
; i
++) {
7568 pool
.Add(NewCallback(Worker
));
7570 } // all folks are joined here.
7571 CHECK(GLOB
== N_threads
* ITERATIONS
);
7572 for (int i
= 0; i
< ITERATIONS
; i
++) {
7576 REGISTER_TEST2(Run
, 506, PERFORMANCE
| PRINT_STATS
| EXCLUDE_FROM_ALL
);
7577 #endif // NO_BARRIER
7578 } // namespace test506
7580 // test507: vgHelgrind_initIterAtFM/stackClear benchmark {{{1
7581 // vgHelgrind_initIterAtFM/stackClear consume ~8.5%/5.5% CPU
7583 const int N_THREADS
= 1,
7585 ITERATIONS
= 1 << 20;
7591 ANNOTATE_RWLOCK_CREATE(&temp
);
7594 ANNOTATE_RWLOCK_DESTROY(&temp
);
7601 for (int j
= 0; j
< ITERATIONS
; j
++) {
7607 printf("test507: vgHelgrind_initIterAtFM/stackClear benchmark\n");
7609 ThreadPool
pool(N_THREADS
);
7610 pool
.StartWorkers();
7611 for (int i
= 0; i
< N_THREADS
; i
++) {
7612 pool
.Add(NewCallback(Worker
));
7614 } // all folks are joined here.
7616 REGISTER_TEST2(Run
, 507, EXCLUDE_FROM_ALL
);
7617 } // namespace test507
7619 // test508: cmp_WordVecs_for_FM benchmark {{{1
7620 // 50+% of CPU consumption by cmp_WordVecs_for_FM
7622 const int N_THREADS
= 1,
7623 BUFFER_SIZE
= 1 << 10,
7624 ITERATIONS
= 1 << 9;
7630 ANNOTATE_RWLOCK_CREATE(&temp
);
7633 ANNOTATE_RWLOCK_DESTROY(&temp
);
7640 for (int j
= 0; j
< ITERATIONS
; j
++) {
7646 printf("test508: cmp_WordVecs_for_FM benchmark\n");
7648 ThreadPool
pool(N_THREADS
);
7649 pool
.StartWorkers();
7650 for (int i
= 0; i
< N_THREADS
; i
++) {
7651 pool
.Add(NewCallback(Worker
));
7653 } // all folks are joined here.
7655 REGISTER_TEST2(Run
, 508, EXCLUDE_FROM_ALL
);
7656 } // namespace test508
7658 // test509: avl_find_node benchmark {{{1
7659 // 10+% of CPU consumption by avl_find_node
7661 const int N_THREADS
= 16,
7662 ITERATIONS
= 1 << 8;
7665 std::vector
<Mutex
*> mu_list
;
7666 for (int i
= 0; i
< ITERATIONS
; i
++) {
7667 Mutex
* mu
= new Mutex();
7668 mu_list
.push_back(mu
);
7671 for (int i
= ITERATIONS
- 1; i
>= 0; i
--) {
7672 Mutex
* mu
= mu_list
[i
];
7679 printf("test509: avl_find_node benchmark\n");
7681 ThreadPool
pool(N_THREADS
);
7682 pool
.StartWorkers();
7683 for (int i
= 0; i
< N_THREADS
; i
++) {
7684 pool
.Add(NewCallback(Worker
));
7686 } // all folks are joined here.
7688 REGISTER_TEST2(Run
, 509, EXCLUDE_FROM_ALL
);
7689 } // namespace test509
7691 // test510: SS-recycle test {{{1
7692 // this tests shows the case where only ~1% of SS are recycled
7694 const int N_THREADS
= 16,
7695 ITERATIONS
= 1 << 10;
7700 for (int i
= 0; i
< ITERATIONS
; i
++) {
7701 ANNOTATE_CONDVAR_SIGNAL((void*)0xDeadBeef);
7708 //ANNOTATE_BENIGN_RACE(&GLOB, "Test");
7709 printf("test510: SS-recycle test\n");
7711 ThreadPool
pool(N_THREADS
);
7712 pool
.StartWorkers();
7713 for (int i
= 0; i
< N_THREADS
; i
++) {
7714 pool
.Add(NewCallback(Worker
));
7716 } // all folks are joined here.
7718 REGISTER_TEST2(Run
, 510, MEMORY_USAGE
| PRINT_STATS
| EXCLUDE_FROM_ALL
);
7719 } // namespace test510
7721 // test511: Segment refcounting test ('1' refcounting) {{{1
7726 for (int i
= 0; i
< 300; i
++) {
7727 ANNOTATE_CONDVAR_SIGNAL(&GLOB
);
7730 ANNOTATE_CONDVAR_WAIT(&GLOB
);
7732 ANNOTATE_PRINT_MEMORY_USAGE(0);
7735 REGISTER_TEST2(Run
, 511, MEMORY_USAGE
| PRINT_STATS
| EXCLUDE_FROM_ALL
);
7736 } // namespace test511
7738 // test512: Segment refcounting test ('S' refcounting) {{{1
7744 sem_init(&SEM
, 0, 0);
7745 for (int i
= 0; i
< 300; i
++) {
7751 ANNOTATE_PRINT_MEMORY_USAGE(0);*/
7755 REGISTER_TEST2(Run
, 512, MEMORY_USAGE
| PRINT_STATS
| EXCLUDE_FROM_ALL
);
7756 } // namespace test512
7758 // test513: --fast-mode benchmark {{{1
7761 const int N_THREADS
= 2,
7762 HG_CACHELINE_SIZE
= 1 << 6,
7763 ARRAY_SIZE
= HG_CACHELINE_SIZE
* 512,
7765 // MUTEX_ID_MASK = (1 << MUTEX_ID_BITS) - 1;
7767 // Each thread has its own cacheline and tackles with it intensively
7768 const int ITERATIONS
= 1024;
7769 int array
[N_THREADS
][ARRAY_SIZE
];
7773 Mutex mutex_arr
[N_THREADS
][MUTEX_ID_BITS
];
7780 // all threads write to different memory locations
7781 for (int j
= 0; j
< ITERATIONS
; j
++) {
7782 int mutex_mask
= j
& MUTEX_ID_BITS
;
7783 for (int m
= 0; m
< MUTEX_ID_BITS
; m
++)
7784 if (mutex_mask
& (1 << m
))
7785 mutex_arr
[myId
][m
].Lock();
7787 for (int i
= 0; i
< ARRAY_SIZE
; i
++) {
7791 for (int m
= 0; m
< MUTEX_ID_BITS
; m
++)
7792 if (mutex_mask
& (1 << m
))
7793 mutex_arr
[myId
][m
].Unlock();
7798 printf("test513: --fast-mode benchmark\n");
7800 ThreadPool
pool(N_THREADS
);
7801 pool
.StartWorkers();
7802 for (int i
= 0; i
< N_THREADS
; i
++) {
7803 pool
.Add(NewCallback(Worker
));
7805 } // all folks are joined here.
7808 REGISTER_TEST2(Run
, 513, PERFORMANCE
| PRINT_STATS
| EXCLUDE_FROM_ALL
)
7809 } // namespace test513
7812 // vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=marker