2 This file is part of Valgrind, a dynamic binary instrumentation
5 Copyright (C) 2008-2008 Google Inc
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2 of the
11 License, or (at your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
21 The GNU General Public License is contained in the file COPYING.
24 // Author: Konstantin Serebryany <opensource@google.com>
26 // This file contains a set of unit tests for a data race detection tool.
30 // This test can be compiled with pthreads (default) or
31 // with any other library that supports threads, locks, cond vars, etc.
33 // To compile with pthreads:
34 // g++ racecheck_unittest.cc dynamic_annotations.cc
35 // -lpthread -g -DDYNAMIC_ANNOTATIONS=1
37 // To compile with different library:
38 // 1. cp thread_wrappers_pthread.h thread_wrappers_yourlib.h
39 // 2. edit thread_wrappers_yourlib.h
40 // 3. add '-DTHREAD_WRAPPERS="thread_wrappers_yourlib.h"' to your compilation.
44 // This test must not include any other file specific to threading library,
45 // everything should be inside THREAD_WRAPPERS.
46 #ifndef THREAD_WRAPPERS
47 # define THREAD_WRAPPERS "thread_wrappers_pthread.h"
49 #include THREAD_WRAPPERS
51 #ifndef NEEDS_SEPERATE_RW_LOCK
52 #define RWLock Mutex // Mutex does work as an rw-lock.
53 #define WriterLockScoped MutexLock
54 #define ReaderLockScoped ReaderMutexLock
55 #endif // !NEEDS_SEPERATE_RW_LOCK
58 // Helgrind memory usage testing stuff
59 // If not present in dynamic_annotations.h/.cc - ignore
60 #ifndef ANNOTATE_RESET_STATS
61 #define ANNOTATE_RESET_STATS() do { } while(0)
63 #ifndef ANNOTATE_PRINT_STATS
64 #define ANNOTATE_PRINT_STATS() do { } while(0)
66 #ifndef ANNOTATE_PRINT_MEMORY_USAGE
67 #define ANNOTATE_PRINT_MEMORY_USAGE(a) do { } while(0)
71 // A function that allows to suppress gcc's warnings about
72 // unused return values in a portable way.
74 static inline void IGNORE_RETURN_VALUE(T v
)
82 #include <cstring> // strlen(), index(), rindex()
85 #include <sys/types.h>
88 #include <sys/mman.h> // mmap
90 #include <stdint.h> // uintptr_t
99 #include <strings.h> // index(), rindex()
103 // - Stability tests (marked STAB)
104 // - Performance tests (marked PERF)
106 // - TN (true negative) : no race exists and the tool is silent.
107 // - TP (true positive) : a race exists and reported.
108 // - FN (false negative): a race exists but not reported.
109 // - FP (false positive): no race exists but the tool reports it.
111 // The feature tests are marked according to the behavior of helgrind 3.3.0.
113 // TP and FP tests are annotated with ANNOTATE_EXPECT_RACE,
114 // so, no error reports should be seen when running under helgrind.
116 // When some of the FP cases are fixed in helgrind we'll need
117 // to update this test.
119 // Each test resides in its own namespace.
120 // Namespaces are named test01, test02, ...
121 // Please, *DO NOT* change the logic of existing tests nor rename them.
122 // Create a new test instead.
124 // Some tests use sleep()/usleep().
125 // This is not a synchronization, but a simple way to trigger
126 // some specific behaviour of the race detector's scheduler.
128 // Globals and utilities used by several tests. {{{1
133 typedef void (*void_func_void_t
)(void);
137 PERFORMANCE
= 1 << 2,
138 EXCLUDE_FROM_ALL
= 1 << 3,
139 NEEDS_ANNOTATIONS
= 1 << 4,
141 MEMORY_USAGE
= 1 << 6,
145 // Put everything into stderr.
147 #define printf(args...) \
150 fprintf(stderr, args);\
151 printf_mu.Unlock(); \
156 gettimeofday(&tv
, NULL
);
157 return (tv
.tv_sec
* 1000L) + (tv
.tv_usec
/ 1000L);
163 Test(void_func_void_t f
, int flags
)
167 Test() : f_(0), flags_(0) {}
169 ANNOTATE_RESET_STATS();
170 if (flags_
& PERFORMANCE
) {
171 long start
= GetTimeInMs();
173 long end
= GetTimeInMs();
174 printf ("Time: %4ldms\n", end
-start
);
177 if (flags_
& PRINT_STATS
)
178 ANNOTATE_PRINT_STATS();
179 if (flags_
& MEMORY_USAGE
)
180 ANNOTATE_PRINT_MEMORY_USAGE(0);
183 std::map
<int, Test
> TheMapOfTests
;
185 #define NOINLINE __attribute__ ((noinline))
186 extern "C" void NOINLINE
AnnotateSetVerbosity(const char *, int, int) {};
190 TestAdder(void_func_void_t f
, int id
, int flags
= FEATURE
) {
191 // AnnotateSetVerbosity(__FILE__, __LINE__, 0);
192 CHECK(TheMapOfTests
.count(id
) == 0);
193 TheMapOfTests
[id
] = Test(f
, flags
);
197 #define REGISTER_TEST(f, id) TestAdder add_test_##id (f, id);
198 #define REGISTER_TEST2(f, id, flags) TestAdder add_test_##id (f, id, flags);
200 static bool ArgIsOne(int *arg
) { return *arg
== 1; };
201 static bool ArgIsZero(int *arg
) { return *arg
== 0; };
202 static bool ArgIsTrue(bool *arg
) { return *arg
== true; };
204 // Call ANNOTATE_EXPECT_RACE only if 'machine' env variable is defined.
205 // Useful to test against several different machines.
206 // Supported machines so far:
207 // MSM_HYBRID1 -- aka MSMProp1
208 // MSM_HYBRID1_INIT_STATE -- aka MSMProp1 with --initialization-state=yes
209 // MSM_THREAD_SANITIZER -- ThreadSanitizer's state machine
210 #define ANNOTATE_EXPECT_RACE_FOR_MACHINE(mem, descr, machine) \
211 while(getenv(machine)) {\
212 ANNOTATE_EXPECT_RACE(mem, descr); \
216 #define ANNOTATE_EXPECT_RACE_FOR_TSAN(mem, descr) \
217 ANNOTATE_EXPECT_RACE_FOR_MACHINE(mem, descr, "MSM_THREAD_SANITIZER")
219 inline bool Tsan_PureHappensBefore() {
223 inline bool Tsan_FastMode() {
224 return getenv("TSAN_FAST_MODE") != NULL
;
227 // Initialize *(mem) to 0 if Tsan_FastMode.
228 #define FAST_MODE_INIT(mem) do { if (Tsan_FastMode()) { *(mem) = 0; } } while(0)
230 #ifndef MAIN_INIT_ACTION
231 #define MAIN_INIT_ACTION
236 int main(int argc
, char** argv
) { // {{{1
238 printf("FLAGS [phb=%i, fm=%i]\n", Tsan_PureHappensBefore(), Tsan_FastMode());
239 if (argc
== 2 && !strcmp(argv
[1], "benchmark")) {
240 for (std::map
<int,Test
>::iterator it
= TheMapOfTests
.begin();
241 it
!= TheMapOfTests
.end(); ++it
) {
242 if(!(it
->second
.flags_
& PERFORMANCE
)) continue;
245 } else if (argc
== 2 && !strcmp(argv
[1], "demo")) {
246 for (std::map
<int,Test
>::iterator it
= TheMapOfTests
.begin();
247 it
!= TheMapOfTests
.end(); ++it
) {
248 if(!(it
->second
.flags_
& RACE_DEMO
)) continue;
251 } else if (argc
> 1) {
252 // the tests are listed in command line flags
253 for (int i
= 1; i
< argc
; i
++) {
254 int f_num
= atoi(argv
[i
]);
255 CHECK(TheMapOfTests
.count(f_num
));
256 TheMapOfTests
[f_num
].Run();
259 bool run_tests_with_annotations
= false;
260 if (getenv("DRT_ALLOW_ANNOTATIONS")) {
261 run_tests_with_annotations
= true;
263 for (std::map
<int,Test
>::iterator it
= TheMapOfTests
.begin();
264 it
!= TheMapOfTests
.end();
266 if(it
->second
.flags_
& EXCLUDE_FROM_ALL
) continue;
267 if(it
->second
.flags_
& RACE_DEMO
) continue;
268 if((it
->second
.flags_
& NEEDS_ANNOTATIONS
)
269 && run_tests_with_annotations
== false) continue;
275 #ifdef THREAD_WRAPPERS_PTHREAD_H
279 // An array of threads. Create/start/join all elements at once. {{{1
280 class MyThreadArray
{
282 static const int kSize
= 5;
283 typedef void (*F
) (void);
284 MyThreadArray(F f1
, F f2
= NULL
, F f3
= NULL
, F f4
= NULL
, F f5
= NULL
) {
285 ar_
[0] = new MyThread(f1
);
286 ar_
[1] = f2
? new MyThread(f2
) : NULL
;
287 ar_
[2] = f3
? new MyThread(f3
) : NULL
;
288 ar_
[3] = f4
? new MyThread(f4
) : NULL
;
289 ar_
[4] = f5
? new MyThread(f5
) : NULL
;
292 for(int i
= 0; i
< kSize
; i
++) {
301 for(int i
= 0; i
< kSize
; i
++) {
309 for(int i
= 0; i
< kSize
; i
++) {
314 MyThread
*ar_
[kSize
];
323 printf("test00: negative\n");
324 printf("\tGLOB=%d\n", GLOB
);
326 REGISTER_TEST(Run
, 00)
327 } // namespace test00
330 // test01: TP. Simple race (write vs write). {{{1
340 const timespec delay
= { 0, 100 * 1000 * 1000 };
341 nanosleep(&delay
, 0);
346 FAST_MODE_INIT(&GLOB
);
347 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test01. TP.");
348 ANNOTATE_TRACE_MEMORY(&GLOB
);
349 printf("test01: positive\n");
351 const int tmp
= GLOB
;
352 printf("\tGLOB=%d\n", tmp
);
354 REGISTER_TEST(Run
, 1);
355 } // namespace test01
358 // test02: TN. Synchronization via CondVar. {{{1
361 // Two write accesses to GLOB are synchronized because
362 // the pair of CV.Signal() and CV.Wait() establish happens-before relation.
367 // 3. MU.Lock() a. write(GLOB)
370 // /--- d. CV.Signal()
371 // 4. while(COND) / e. MU.Unlock()
378 usleep(100000); // Make sure the waiter blocks.
391 pool
.Add(NewCallback(Waker
));
399 printf("test02: negative\n");
401 printf("\tGLOB=%d\n", GLOB
);
403 REGISTER_TEST(Run
, 2);
404 } // namespace test02
407 // test03: TN. Synchronization via LockWhen, signaller gets there first. {{{1
410 // Two write accesses to GLOB are synchronized via conditional critical section.
411 // Note that LockWhen() happens first (we use sleep(1) to make sure)!
419 // /--- d. MU.Unlock()
420 // 3. MU.LockWhen(COND==1) <---/
426 usleep(100000); // Make sure the waiter blocks.
430 COND
= 1; // We are done! Tell the Waiter.
431 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
437 pool
.Add(NewCallback(Waker
));
438 MU
.LockWhen(Condition
<int>(&ArgIsOne
, &COND
)); // calls ANNOTATE_CONDVAR_WAIT
439 MU
.Unlock(); // Waker is done!
444 printf("test03: negative\n");
446 printf("\tGLOB=%d\n", GLOB
);
448 REGISTER_TEST2(Run
, 3, FEATURE
|NEEDS_ANNOTATIONS
);
449 } // namespace test03
451 // test04: TN. Synchronization via PCQ. {{{1
454 ProducerConsumerQueue
Q(INT_MAX
);
455 // Two write accesses to GLOB are separated by PCQ Put/Get.
459 // 2. Q.Put() ---------\ .
460 // \-------> a. Q.Get()
475 printf("test04: negative\n");
476 MyThreadArray
t(Putter
, Getter
);
479 printf("\tGLOB=%d\n", GLOB
);
481 REGISTER_TEST(Run
, 4);
482 } // namespace test04
485 // test05: FP. Synchronization via CondVar, but waiter does not block. {{{1
486 // Since CondVar::Wait() is not called, we get a false positive.
489 // Two write accesses to GLOB are synchronized via CondVar.
490 // But race detector can not see it.
491 // See this for details:
492 // http://www.valgrind.org/docs/manual/hg-manual.html#hg-manual.effective-use.
497 // 3. MU.Lock() a. write(GLOB)
501 // 4. while(COND) e. MU.Unlock()
502 // CV.Wait(MU) <<< not called
519 pool
.Add(NewCallback(Waker
));
520 usleep(100000); // Make sure the signaller gets first.
528 FAST_MODE_INIT(&GLOB
);
529 if (!Tsan_PureHappensBefore())
530 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test05. FP. Unavoidable in hybrid scheme.");
531 printf("test05: unavoidable false positive\n");
533 printf("\tGLOB=%d\n", GLOB
);
535 REGISTER_TEST(Run
, 5);
536 } // namespace test05
539 // test06: TN. Synchronization via CondVar, but Waker gets there first. {{{1
542 // Same as test05 but we annotated the Wait() loop.
547 // 3. MU.Lock() a. write(GLOB)
550 // /------- d. CV.Signal()
551 // 4. while(COND) / e. MU.Unlock()
552 // CV.Wait(MU) <<< not called /
553 // 6. ANNOTATE_CONDVAR_WAIT(CV, MU) <----/
571 pool
.Add(NewCallback(Waker
));
572 usleep(100000); // Make sure the signaller gets first.
576 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
582 printf("test06: negative\n");
584 printf("\tGLOB=%d\n", GLOB
);
586 REGISTER_TEST2(Run
, 6, FEATURE
|NEEDS_ANNOTATIONS
);
587 } // namespace test06
590 // test07: TN. Synchronization via LockWhen(), Signaller is observed first. {{{1
594 // Two write accesses to GLOB are synchronized via conditional critical section.
595 // LockWhen() is observed after COND has been set (due to sleep).
596 // Unlock() calls ANNOTATE_CONDVAR_SIGNAL().
598 // Waiter: Signaller:
600 // 2. Start(Signaller)
604 // /--- d. MU.Unlock calls ANNOTATE_CONDVAR_SIGNAL
605 // 3. MU.LockWhen(COND==1) <---/
613 COND
= true; // We are done! Tell the Waiter.
614 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
618 MyThread
t(Signaller
);
620 usleep(100000); // Make sure the signaller gets there first.
622 MU
.LockWhen(Condition
<bool>(&ArgIsTrue
, &COND
));// calls ANNOTATE_CONDVAR_WAIT
623 MU
.Unlock(); // Signaller is done!
625 GLOB
= 2; // If LockWhen didn't catch the signal, a race may be reported here.
629 printf("test07: negative\n");
631 printf("\tGLOB=%d\n", GLOB
);
633 REGISTER_TEST2(Run
, 7, FEATURE
|NEEDS_ANNOTATIONS
);
634 } // namespace test07
636 // test08: TN. Synchronization via thread start/join. {{{1
639 // Three accesses to GLOB are separated by thread start/join.
643 // 2. Start(Worker) ------------>
645 // 3. Join(Worker) <------------
659 printf("test08: negative\n");
661 printf("\tGLOB=%d\n", GLOB
);
663 REGISTER_TEST(Run
, 8);
664 } // namespace test08
667 // test09: TP. Simple race (read vs write). {{{1
670 // A simple data race between writer and reader.
671 // Write happens after read (enforced by sleep).
672 // Usually, easily detectable by a race detector.
682 ANNOTATE_TRACE_MEMORY(&GLOB
);
683 FAST_MODE_INIT(&GLOB
);
684 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test09. TP.");
685 printf("test09: positive\n");
686 MyThreadArray
t(Writer
, Reader
);
689 printf("\tGLOB=%d\n", GLOB
);
691 REGISTER_TEST(Run
, 9);
692 } // namespace test09
695 // test10: FN. Simple race (write vs read). {{{1
698 // A simple data race between writer and reader.
699 // Write happens before Read (enforced by sleep),
700 // otherwise this test is the same as test09.
703 // 1. write(GLOB) a. sleep(long enough so that GLOB
704 // is most likely initialized by Writer)
708 // Eraser algorithm does not detect the race here,
709 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
720 FAST_MODE_INIT(&GLOB
);
721 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test10. TP. FN in MSMHelgrind.");
722 printf("test10: positive\n");
723 MyThreadArray
t(Writer
, Reader
);
726 printf("\tGLOB=%d\n", GLOB
);
728 REGISTER_TEST(Run
, 10);
729 } // namespace test10
732 // test11: FP. Synchronization via CondVar, 2 workers. {{{1
733 // This test is properly synchronized, but currently (Dec 2007)
734 // helgrind reports a false positive.
736 // Parent: Worker1, Worker2:
737 // 1. Start(workers) a. read(GLOB)
738 // 2. MU.Lock() b. MU.Lock()
739 // 3. while(COND != 2) /-------- c. CV.Signal()
740 // CV.Wait(&MU) <-------/ d. MU.Unlock()
760 MyThreadArray
t(Worker
, Worker
);
775 // ANNOTATE_EXPECT_RACE(&GLOB, "test11. FP. Fixed by MSMProp1.");
776 printf("test11: negative\n");
778 printf("\tGLOB=%d\n", GLOB
);
780 REGISTER_TEST(Run
, 11);
781 } // namespace test11
784 // test12: FP. Synchronization via Mutex, then via PCQ. {{{1
787 // This test is properly synchronized, but currently (Dec 2007)
788 // helgrind reports a false positive.
790 // First, we write to GLOB under MU, then we synchronize via PCQ,
791 // which is essentially a semaphore.
794 // 1. MU.Lock() a. MU.Lock()
795 // 2. write(GLOB) <---- MU ----> b. write(GLOB)
796 // 3. MU.Unlock() c. MU.Unlock()
797 // 4. Q.Put() ---------------> d. Q.Get()
800 ProducerConsumerQueue
Q(INT_MAX
);
821 // ANNOTATE_EXPECT_RACE(&GLOB, "test12. FP. Fixed by MSMProp1.");
822 printf("test12: negative\n");
823 MyThreadArray
t(Putter
, Getter
);
826 printf("\tGLOB=%d\n", GLOB
);
828 REGISTER_TEST(Run
, 12);
829 } // namespace test12
832 // test13: FP. Synchronization via Mutex, then via LockWhen. {{{1
835 // This test is essentially the same as test12, but uses LockWhen
839 // 1. MU.Lock() a. MU.Lock()
840 // 2. write(GLOB) <---------- MU ----------> b. write(GLOB)
841 // 3. MU.Unlock() c. MU.Unlock()
844 // 6. ANNOTATE_CONDVAR_SIGNAL -------\ .
845 // 7. MU.Unlock() \ .
846 // \----> d. MU.LockWhen(COND == 1)
858 ANNOTATE_CONDVAR_SIGNAL(&MU
);
867 MU
.LockWhen(Condition
<int>(&ArgIsOne
, &COND
));
873 // ANNOTATE_EXPECT_RACE(&GLOB, "test13. FP. Fixed by MSMProp1.");
874 printf("test13: negative\n");
877 MyThreadArray
t(Waker
, Waiter
);
881 printf("\tGLOB=%d\n", GLOB
);
883 REGISTER_TEST2(Run
, 13, FEATURE
|NEEDS_ANNOTATIONS
);
884 } // namespace test13
887 // test14: FP. Synchronization via PCQ, reads, 2 workers. {{{1
890 // This test is properly synchronized, but currently (Dec 2007)
891 // helgrind reports a false positive.
893 // This test is similar to test11, but uses PCQ (semaphore).
895 // Putter2: Putter1: Getter:
896 // 1. read(GLOB) a. read(GLOB)
897 // 2. Q2.Put() ----\ b. Q1.Put() -----\ .
898 // \ \--------> A. Q1.Get()
899 // \----------------------------------> B. Q2.Get()
901 ProducerConsumerQueue
Q1(INT_MAX
), Q2(INT_MAX
);
917 // ANNOTATE_EXPECT_RACE(&GLOB, "test14. FP. Fixed by MSMProp1.");
918 printf("test14: negative\n");
919 MyThreadArray
t(Getter
, Putter1
, Putter2
);
922 printf("\tGLOB=%d\n", GLOB
);
924 REGISTER_TEST(Run
, 14);
925 } // namespace test14
928 // test15: TN. Synchronization via LockWhen. One waker and 2 waiters. {{{1
930 // Waker: Waiter1, Waiter2:
934 // 4. ANNOTATE_CONDVAR_SIGNAL ------------> a. MU.LockWhen(COND == 1)
935 // 5. MU.Unlock() b. MU.Unlock()
946 ANNOTATE_CONDVAR_SIGNAL(&MU
);
951 MU
.LockWhen(Condition
<int>(&ArgIsOne
, &COND
));
959 printf("test15: negative\n");
960 MyThreadArray
t(Waker
, Waiter
, Waiter
);
963 printf("\tGLOB=%d\n", GLOB
);
965 REGISTER_TEST(Run
, 15);
966 } // namespace test15
969 // test16: FP. Barrier (emulated by CV), 2 threads. {{{1
972 // 1. MU.Lock() a. MU.Lock()
973 // 2. write(GLOB) <------------ MU ----------> b. write(GLOB)
974 // 3. MU.Unlock() c. MU.Unlock()
975 // 4. MU2.Lock() d. MU2.Lock()
976 // 5. COND-- e. COND--
977 // 6. ANNOTATE_CONDVAR_SIGNAL(MU2) ---->V .
978 // 7. MU2.Await(COND == 0) <------------+------ f. ANNOTATE_CONDVAR_SIGNAL(MU2)
979 // 8. MU2.Unlock() V-----> g. MU2.Await(COND == 0)
980 // 9. read(GLOB) h. MU2.Unlock()
984 // TODO: This way we may create too many edges in happens-before graph.
985 // Arndt Mühlenfeld in his PhD (TODO: link) suggests creating special nodes in
986 // happens-before graph to reduce the total number of edges.
1001 ANNOTATE_CONDVAR_SIGNAL(&MU2
);
1002 MU2
.Await(Condition
<int>(&ArgIsZero
, &COND
));
1009 // ANNOTATE_EXPECT_RACE(&GLOB, "test16. FP. Fixed by MSMProp1 + Barrier support.");
1011 printf("test16: negative\n");
1012 MyThreadArray
t(Worker
, Worker
);
1015 printf("\tGLOB=%d\n", GLOB
);
1017 REGISTER_TEST2(Run
, 16, FEATURE
|NEEDS_ANNOTATIONS
);
1018 } // namespace test16
1021 // test17: FP. Barrier (emulated by CV), 3 threads. {{{1
1023 // Same as test16, but with 3 threads.
1035 ANNOTATE_CONDVAR_SIGNAL(&MU2
);
1036 MU2
.Await(Condition
<int>(&ArgIsZero
, &COND
));
1043 // ANNOTATE_EXPECT_RACE(&GLOB, "test17. FP. Fixed by MSMProp1 + Barrier support.");
1045 printf("test17: negative\n");
1046 MyThreadArray
t(Worker
, Worker
, Worker
);
1049 printf("\tGLOB=%d\n", GLOB
);
1051 REGISTER_TEST2(Run
, 17, FEATURE
|NEEDS_ANNOTATIONS
);
1052 } // namespace test17
1055 // test18: TN. Synchronization via Await(), signaller gets there first. {{{1
1059 // Same as test03, but uses Mutex::Await() instead of Mutex::LockWhen().
1062 usleep(100000); // Make sure the waiter blocks.
1066 COND
= 1; // We are done! Tell the Waiter.
1067 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1071 pool
.StartWorkers();
1073 pool
.Add(NewCallback(Waker
));
1076 MU
.Await(Condition
<int>(&ArgIsOne
, &COND
)); // calls ANNOTATE_CONDVAR_WAIT
1077 MU
.Unlock(); // Waker is done!
1082 printf("test18: negative\n");
1084 printf("\tGLOB=%d\n", GLOB
);
1086 REGISTER_TEST2(Run
, 18, FEATURE
|NEEDS_ANNOTATIONS
);
1087 } // namespace test18
1089 // test19: TN. Synchronization via AwaitWithTimeout(). {{{1
1092 // Same as test18, but with AwaitWithTimeout. Do not timeout.
1095 usleep(100000); // Make sure the waiter blocks.
1099 COND
= 1; // We are done! Tell the Waiter.
1100 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1104 pool
.StartWorkers();
1106 pool
.Add(NewCallback(Waker
));
1109 CHECK(MU
.AwaitWithTimeout(Condition
<int>(&ArgIsOne
, &COND
), INT_MAX
));
1115 printf("test19: negative\n");
1117 printf("\tGLOB=%d\n", GLOB
);
1119 REGISTER_TEST2(Run
, 19, FEATURE
|NEEDS_ANNOTATIONS
);
1120 } // namespace test19
1122 // test20: TP. Incorrect synchronization via AwaitWhen(), timeout. {{{1
1126 // True race. We timeout in AwaitWhen.
1133 pool
.StartWorkers();
1135 pool
.Add(NewCallback(Waker
));
1138 CHECK(!MU
.AwaitWithTimeout(Condition
<int>(&ArgIsOne
, &COND
), 100));
1144 FAST_MODE_INIT(&GLOB
);
1145 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test20. TP.");
1146 printf("test20: positive\n");
1148 printf("\tGLOB=%d\n", GLOB
);
1150 REGISTER_TEST2(Run
, 20, FEATURE
|NEEDS_ANNOTATIONS
);
1151 } // namespace test20
1153 // test21: TP. Incorrect synchronization via LockWhenWithTimeout(). {{{1
1156 // True race. We timeout in LockWhenWithTimeout().
1164 pool
.StartWorkers();
1166 pool
.Add(NewCallback(Waker
));
1168 CHECK(!MU
.LockWhenWithTimeout(Condition
<int>(&ArgIsOne
, &COND
), 100));
1174 FAST_MODE_INIT(&GLOB
);
1175 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test21. TP.");
1176 printf("test21: positive\n");
1178 printf("\tGLOB=%d\n", GLOB
);
1180 REGISTER_TEST2(Run
, 21, FEATURE
|NEEDS_ANNOTATIONS
);
1181 } // namespace test21
1183 // test22: TP. Incorrect synchronization via CondVar::WaitWithTimeout(). {{{1
1187 // True race. We timeout in CondVar::WaitWithTimeout().
1194 pool
.StartWorkers();
1196 pool
.Add(NewCallback(Waker
));
1198 int64_t ms_left_to_wait
= 100;
1199 int64_t deadline_ms
= GetCurrentTimeMillis() + ms_left_to_wait
;
1201 while(COND
!= 1 && ms_left_to_wait
> 0) {
1202 CV
.WaitWithTimeout(&MU
, ms_left_to_wait
);
1203 ms_left_to_wait
= deadline_ms
- GetCurrentTimeMillis();
1210 FAST_MODE_INIT(&GLOB
);
1211 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test22. TP.");
1212 printf("test22: positive\n");
1214 printf("\tGLOB=%d\n", GLOB
);
1216 REGISTER_TEST(Run
, 22);
1217 } // namespace test22
1219 // test23: TN. TryLock, ReaderLock, ReaderTryLock. {{{1
1221 // Correct synchronization with TryLock, Lock, ReaderTryLock, ReaderLock.
1224 void Worker_TryLock() {
1225 for (int i
= 0; i
< 20; i
++) {
1237 void Worker_ReaderTryLock() {
1238 for (int i
= 0; i
< 20; i
++) {
1240 if (MU
.ReaderTryLock()) {
1250 void Worker_ReaderLock() {
1251 for (int i
= 0; i
< 20; i
++) {
1259 void Worker_Lock() {
1260 for (int i
= 0; i
< 20; i
++) {
1269 printf("test23: negative\n");
1270 MyThreadArray
t(Worker_TryLock
,
1271 Worker_ReaderTryLock
,
1277 printf("\tGLOB=%d\n", GLOB
);
1279 REGISTER_TEST(Run
, 23);
1280 } // namespace test23
1282 // test24: TN. Synchronization via ReaderLockWhen(). {{{1
1286 // Same as test03, but uses ReaderLockWhen().
1289 usleep(100000); // Make sure the waiter blocks.
1293 COND
= 1; // We are done! Tell the Waiter.
1294 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1298 pool
.StartWorkers();
1300 pool
.Add(NewCallback(Waker
));
1301 MU
.ReaderLockWhen(Condition
<int>(&ArgIsOne
, &COND
));
1307 printf("test24: negative\n");
1309 printf("\tGLOB=%d\n", GLOB
);
1311 REGISTER_TEST2(Run
, 24, FEATURE
|NEEDS_ANNOTATIONS
);
1312 } // namespace test24
1314 // test25: TN. Synchronization via ReaderLockWhenWithTimeout(). {{{1
1318 // Same as test24, but uses ReaderLockWhenWithTimeout().
1319 // We do not timeout.
1322 usleep(100000); // Make sure the waiter blocks.
1326 COND
= 1; // We are done! Tell the Waiter.
1327 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1331 pool
.StartWorkers();
1333 pool
.Add(NewCallback(Waker
));
1334 CHECK(MU
.ReaderLockWhenWithTimeout(Condition
<int>(&ArgIsOne
, &COND
), INT_MAX
));
1340 printf("test25: negative\n");
1342 printf("\tGLOB=%d\n", GLOB
);
1344 REGISTER_TEST2(Run
, 25, FEATURE
|NEEDS_ANNOTATIONS
);
1345 } // namespace test25
1347 // test26: TP. Incorrect synchronization via ReaderLockWhenWithTimeout(). {{{1
1351 // Same as test25, but we timeout and incorrectly assume happens-before.
1359 pool
.StartWorkers();
1361 pool
.Add(NewCallback(Waker
));
1362 CHECK(!MU
.ReaderLockWhenWithTimeout(Condition
<int>(&ArgIsOne
, &COND
), 100));
1368 FAST_MODE_INIT(&GLOB
);
1369 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test26. TP");
1370 printf("test26: positive\n");
1372 printf("\tGLOB=%d\n", GLOB
);
1374 REGISTER_TEST2(Run
, 26, FEATURE
|NEEDS_ANNOTATIONS
);
1375 } // namespace test26
1378 // test27: TN. Simple synchronization via SpinLock. {{{1
1391 printf("test27: negative\n");
1392 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
1395 printf("\tGLOB=%d\n", GLOB
);
1397 REGISTER_TEST2(Run
, 27, FEATURE
|NEEDS_ANNOTATIONS
);
1398 #endif // NO_SPINLOCK
1399 } // namespace test27
1402 // test28: TN. Synchronization via Mutex, then PCQ. 3 threads {{{1
1404 // Putter1: Getter: Putter2:
1405 // 1. MU.Lock() A. MU.Lock()
1406 // 2. write(GLOB) B. write(GLOB)
1407 // 3. MU.Unlock() C. MU.Unlock()
1408 // 4. Q.Put() ---------\ /------- D. Q.Put()
1409 // 5. MU.Lock() \-------> a. Q.Get() / E. MU.Lock()
1410 // 6. read(GLOB) b. Q.Get() <---------/ F. read(GLOB)
1411 // 7. MU.Unlock() (sleep) G. MU.Unlock()
1413 ProducerConsumerQueue
Q(INT_MAX
);
1437 printf("test28: negative\n");
1438 MyThreadArray
t(Getter
, Putter
, Putter
);
1441 printf("\tGLOB=%d\n", GLOB
);
1443 REGISTER_TEST(Run
, 28);
1444 } // namespace test28
1447 // test29: TN. Synchronization via Mutex, then PCQ. 4 threads. {{{1
1449 // Similar to test28, but has two Getters and two PCQs.
1450 ProducerConsumerQueue
*Q1
, *Q2
;
1454 void Putter(ProducerConsumerQueue
*q
) {
1468 void Putter1() { Putter(Q1
); }
1469 void Putter2() { Putter(Q2
); }
1476 usleep(48000); // TODO: remove this when FP in test32 is fixed.
1480 printf("test29: negative\n");
1481 Q1
= new ProducerConsumerQueue(INT_MAX
);
1482 Q2
= new ProducerConsumerQueue(INT_MAX
);
1483 MyThreadArray
t(Getter
, Getter
, Putter1
, Putter2
);
1486 printf("\tGLOB=%d\n", GLOB
);
1490 REGISTER_TEST(Run
, 29);
1491 } // namespace test29
1494 // test30: TN. Synchronization via 'safe' race. Writer vs multiple Readers. {{{1
1496 // This test shows a very risky kind of synchronization which is very easy
1497 // to get wrong. Actually, I am not sure I've got it right.
1499 // Writer: Reader1, Reader2, ..., ReaderN:
1500 // 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY
1501 // 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n)
1502 // 3. BOUNDARY++; c. read(GLOB[i]: i < n)
1504 // Here we have a 'safe' race on accesses to BOUNDARY and
1505 // no actual races on accesses to GLOB[]:
1506 // Writer writes to GLOB[i] where i>=BOUNDARY and then increments BOUNDARY.
1507 // Readers read BOUNDARY and read GLOB[i] where i<BOUNDARY.
1509 // I am not completely sure that this scheme guaranties no race between
1510 // accesses to GLOB since compilers and CPUs
1511 // are free to rearrange memory operations.
1512 // I am actually sure that this scheme is wrong unless we use
1513 // some smart memory fencing...
1518 volatile int BOUNDARY
= 0;
1521 for (int i
= 0; i
< N
; i
++) {
1522 CHECK(BOUNDARY
== i
);
1523 for (int j
= i
; j
< N
; j
++) {
1526 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY
+1));
1536 if (n
== 0) continue;
1537 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n
));
1538 for (int i
= 0; i
< n
; i
++) {
1539 CHECK(GLOB
[i
] == i
);
1546 FAST_MODE_INIT(&BOUNDARY
);
1547 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY
), "test30. Sync via 'safe' race.");
1548 printf("test30: negative\n");
1549 MyThreadArray
t(Writer
, Reader
, Reader
, Reader
);
1552 printf("\tGLOB=%d\n", GLOB
[N
-1]);
1554 REGISTER_TEST2(Run
, 30, FEATURE
|NEEDS_ANNOTATIONS
);
1555 } // namespace test30
1558 // test31: TN. Synchronization via 'safe' race. Writer vs Writer. {{{1
1560 // This test is similar to test30, but
1561 // it has one Writer instead of mulitple Readers.
1564 // 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY
1565 // 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n)
1566 // 3. BOUNDARY++; c. write(GLOB[i]: i < n)
1571 volatile int BOUNDARY
= 0;
1574 for (int i
= 0; i
< N
; i
++) {
1575 CHECK(BOUNDARY
== i
);
1576 for (int j
= i
; j
< N
; j
++) {
1579 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY
+1));
1589 if (n
== 0) continue;
1590 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n
));
1591 for (int i
= 0; i
< n
; i
++) {
1601 FAST_MODE_INIT(&BOUNDARY
);
1602 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY
), "test31. Sync via 'safe' race.");
1603 printf("test31: negative\n");
1604 MyThreadArray
t(Writer1
, Writer2
);
1607 printf("\tGLOB=%d\n", GLOB
[N
-1]);
1609 REGISTER_TEST2(Run
, 31, FEATURE
|NEEDS_ANNOTATIONS
);
1610 } // namespace test31
1613 // test32: FP. Synchronization via thread create/join. W/R. {{{1
1615 // This test is well synchronized but helgrind 3.3.0 reports a race.
1617 // Parent: Writer: Reader:
1618 // 1. Start(Reader) -----------------------\ .
1620 // 2. Start(Writer) ---\ \ .
1621 // \---> a. MU.Lock() \--> A. sleep(long enough)
1623 // /---- c. MU.Unlock()
1624 // 3. Join(Writer) <---/
1627 // /------------ D. MU.Unlock()
1628 // 4. Join(Reader) <----------------/
1632 // The call to sleep() in Reader is not part of synchronization,
1633 // it is required to trigger the false positive in helgrind 3.3.0.
1657 w
.Join(); // 'w' joins first.
1664 // ANNOTATE_EXPECT_RACE(&GLOB, "test32. FP. Fixed by MSMProp1.");
1665 printf("test32: negative\n");
1667 printf("\tGLOB=%d\n", GLOB
);
1670 REGISTER_TEST(Run
, 32);
1671 } // namespace test32
1674 // test33: STAB. Stress test for the number of thread sets (TSETs). {{{1
1677 // Here we access N memory locations from within log(N) threads.
1678 // We do it in such a way that helgrind creates nearly all possible TSETs.
1679 // Then we join all threads and start again (N_iter times).
1680 const int N_iter
= 48;
1681 const int Nlog
= 15;
1682 const int N
= 1 << Nlog
;
1692 for (int i
= 0; i
< N
; i
++) {
1693 // ARR[i] is accessed by threads from i-th subset
1701 printf("test33:\n");
1703 std::vector
<MyThread
*> vec(Nlog
);
1705 for (int j
= 0; j
< N_iter
; j
++) {
1706 // Create and start Nlog threads
1707 for (int i
= 0; i
< Nlog
; i
++) {
1708 vec
[i
] = new MyThread(Worker
);
1710 for (int i
= 0; i
< Nlog
; i
++) {
1713 // Join all threads.
1714 for (int i
= 0; i
< Nlog
; i
++) {
1718 printf("------------------\n");
1721 printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
1722 GLOB
, ARR
[1], ARR
[7], ARR
[N
-1]);
1724 REGISTER_TEST2(Run
, 33, STABILITY
|EXCLUDE_FROM_ALL
);
1725 } // namespace test33
1728 // test34: STAB. Stress test for the number of locks sets (LSETs). {{{1
1730 // Similar to test33, but for lock sets.
1732 const int N_iter
= 48;
1733 const int Nlog
= 10;
1734 const int N
= 1 << Nlog
;
1736 static Mutex
*MUs
[Nlog
];
1739 for (int i
= 0; i
< N
; i
++) {
1740 // ARR[i] is protected by MUs from i-th subset of all MUs
1741 for (int j
= 0; j
< Nlog
; j
++) if (i
& (1 << j
)) MUs
[j
]->Lock();
1743 for (int j
= 0; j
< Nlog
; j
++) if (i
& (1 << j
)) MUs
[j
]->Unlock();
1748 printf("test34:\n");
1749 for (int iter
= 0; iter
< N_iter
; iter
++) {
1750 for (int i
= 0; i
< Nlog
; i
++) {
1753 MyThreadArray
t(Worker
, Worker
);
1756 for (int i
= 0; i
< Nlog
; i
++) {
1759 printf("------------------\n");
1761 printf("\tGLOB=%d\n", GLOB
);
1763 REGISTER_TEST2(Run
, 34, STABILITY
|EXCLUDE_FROM_ALL
);
1764 } // namespace test34
1767 // test35: PERF. Lots of mutexes and lots of call to free(). {{{1
1769 // Helgrind 3.3.0 has very slow in shadow_mem_make_NoAccess(). Fixed locally.
1770 // With the fix helgrind runs this test about a minute.
1771 // Without the fix -- about 5 minutes. (on c2d 2.4GHz).
1773 // TODO: need to figure out the best way for performance testing.
1775 const int N_mu
= 25000;
1776 const int N_free
= 48000;
1779 for (int i
= 0; i
< N_free
; i
++)
1780 CHECK(777 == *ARR
[i
]);
1784 printf("test35:\n");
1785 std::vector
<Mutex
*> mus
;
1787 ARR
= new int *[N_free
];
1788 for (int i
= 0; i
< N_free
; i
++) {
1789 const int c
= N_free
/ N_mu
;
1791 mus
.push_back(new Mutex
);
1793 mus
.back()->Unlock();
1795 ARR
[i
] = new int(777);
1798 // Need to put all ARR[i] into shared state in order
1799 // to trigger the performance bug.
1800 MyThreadArray
t(Worker
, Worker
);
1804 for (int i
= 0; i
< N_free
; i
++) delete ARR
[i
];
1807 for (size_t i
= 0; i
< mus
.size(); i
++) {
1811 REGISTER_TEST2(Run
, 35, PERFORMANCE
|EXCLUDE_FROM_ALL
);
1812 } // namespace test35
1815 // test36: TN. Synchronization via Mutex, then PCQ. 3 threads. W/W {{{1
1817 // variation of test28 (W/W instead of W/R)
1819 // Putter1: Getter: Putter2:
1820 // 1. MU.Lock(); A. MU.Lock()
1821 // 2. write(GLOB) B. write(GLOB)
1822 // 3. MU.Unlock() C. MU.Unlock()
1823 // 4. Q.Put() ---------\ /------- D. Q.Put()
1824 // 5. MU1.Lock() \-------> a. Q.Get() / E. MU1.Lock()
1825 // 6. MU.Lock() b. Q.Get() <---------/ F. MU.Lock()
1826 // 7. write(GLOB) G. write(GLOB)
1827 // 8. MU.Unlock() H. MU.Unlock()
1828 // 9. MU1.Unlock() (sleep) I. MU1.Unlock()
1832 ProducerConsumerQueue
Q(INT_MAX
);
1860 printf("test36: negative \n");
1861 MyThreadArray
t(Getter
, Putter
, Putter
);
1864 printf("\tGLOB=%d\n", GLOB
);
1866 REGISTER_TEST(Run
, 36);
1867 } // namespace test36
1870 // test37: TN. Simple synchronization (write vs read). {{{1
1874 // Similar to test10, but properly locked.
1891 CHECK(GLOB
!= -777);
1896 printf("test37: negative\n");
1897 MyThreadArray
t(Writer
, Reader
);
1900 printf("\tGLOB=%d\n", GLOB
);
1902 REGISTER_TEST(Run
, 37);
1903 } // namespace test37
1906 // test38: TN. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1
1908 // Fusion of test29 and test36.
1910 // Putter1: Putter2: Getter1: Getter2:
1911 // MU1.Lock() MU1.Lock()
1912 // write(GLOB) write(GLOB)
1913 // MU1.Unlock() MU1.Unlock()
1914 // Q1.Put() Q2.Put()
1915 // Q1.Put() Q2.Put()
1916 // MU1.Lock() MU1.Lock()
1917 // MU2.Lock() MU2.Lock()
1918 // write(GLOB) write(GLOB)
1919 // MU2.Unlock() MU2.Unlock()
1920 // MU1.Unlock() MU1.Unlock() sleep sleep
1921 // Q1.Get() Q1.Get()
1922 // Q2.Get() Q2.Get()
1923 // MU2.Lock() MU2.Lock()
1924 // write(GLOB) write(GLOB)
1925 // MU2.Unlock() MU2.Unlock()
1929 ProducerConsumerQueue
*Q1
, *Q2
;
1933 void Putter(ProducerConsumerQueue
*q
) {
1949 void Putter1() { Putter(Q1
); }
1950 void Putter2() { Putter(Q2
); }
1961 usleep(48000); // TODO: remove this when FP in test32 is fixed.
1965 printf("test38: negative\n");
1966 Q1
= new ProducerConsumerQueue(INT_MAX
);
1967 Q2
= new ProducerConsumerQueue(INT_MAX
);
1968 MyThreadArray
t(Getter
, Getter
, Putter1
, Putter2
);
1971 printf("\tGLOB=%d\n", GLOB
);
1975 REGISTER_TEST(Run
, 38);
1976 } // namespace test38
1978 // test39: FP. Barrier. {{{1
1981 // Same as test17 but uses Barrier class (pthread_barrier_t).
1983 const int N_threads
= 3;
1984 Barrier
barrier(N_threads
);
1992 CHECK(GLOB
== N_threads
);
1995 ANNOTATE_TRACE_MEMORY(&GLOB
);
1996 // ANNOTATE_EXPECT_RACE(&GLOB, "test39. FP. Fixed by MSMProp1. Barrier.");
1997 printf("test39: negative\n");
1999 ThreadPool
pool(N_threads
);
2000 pool
.StartWorkers();
2001 for (int i
= 0; i
< N_threads
; i
++) {
2002 pool
.Add(NewCallback(Worker
));
2004 } // all folks are joined here.
2005 printf("\tGLOB=%d\n", GLOB
);
2007 REGISTER_TEST(Run
, 39);
2008 #endif // NO_BARRIER
2009 } // namespace test39
2012 // test40: FP. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1
2014 // Similar to test38 but with different order of events (due to sleep).
2016 // Putter1: Putter2: Getter1: Getter2:
2017 // MU1.Lock() MU1.Lock()
2018 // write(GLOB) write(GLOB)
2019 // MU1.Unlock() MU1.Unlock()
2020 // Q1.Put() Q2.Put()
2021 // Q1.Put() Q2.Put()
2022 // Q1.Get() Q1.Get()
2023 // Q2.Get() Q2.Get()
2024 // MU2.Lock() MU2.Lock()
2025 // write(GLOB) write(GLOB)
2026 // MU2.Unlock() MU2.Unlock()
2028 // MU1.Lock() MU1.Lock()
2029 // MU2.Lock() MU2.Lock()
2030 // write(GLOB) write(GLOB)
2031 // MU2.Unlock() MU2.Unlock()
2032 // MU1.Unlock() MU1.Unlock()
2035 ProducerConsumerQueue
*Q1
, *Q2
;
2039 void Putter(ProducerConsumerQueue
*q
) {
2056 void Putter1() { Putter(Q1
); }
2057 void Putter2() { Putter(Q2
); }
2067 usleep(48000); // TODO: remove this when FP in test32 is fixed.
2071 // ANNOTATE_EXPECT_RACE(&GLOB, "test40. FP. Fixed by MSMProp1. Complex Stuff.");
2072 printf("test40: negative\n");
2073 Q1
= new ProducerConsumerQueue(INT_MAX
);
2074 Q2
= new ProducerConsumerQueue(INT_MAX
);
2075 MyThreadArray
t(Getter
, Getter
, Putter1
, Putter2
);
2078 printf("\tGLOB=%d\n", GLOB
);
2082 REGISTER_TEST(Run
, 40);
2083 } // namespace test40
2085 // test41: TN. Test for race that appears when loading a dynamic symbol. {{{1
2088 ANNOTATE_NO_OP(NULL
); // An empty function, loaded from dll.
2091 printf("test41: negative\n");
2092 MyThreadArray
t(Worker
, Worker
, Worker
);
2096 REGISTER_TEST2(Run
, 41, FEATURE
|NEEDS_ANNOTATIONS
);
2097 } // namespace test41
2100 // test42: TN. Using the same cond var several times. {{{1
2118 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
2130 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
2143 // ANNOTATE_EXPECT_RACE(&GLOB, "test42. TN. debugging.");
2144 printf("test42: negative\n");
2145 MyThreadArray
t(Worker1
, Worker2
);
2148 printf("\tGLOB=%d\n", GLOB
);
2150 REGISTER_TEST2(Run
, 42, FEATURE
|NEEDS_ANNOTATIONS
);
2151 } // namespace test42
2161 // 3. read \--> a. Q.Get()
2164 ProducerConsumerQueue
Q(INT_MAX
);
2176 printf("test43: negative\n");
2177 MyThreadArray
t(Putter
, Getter
);
2180 printf("\tGLOB=%d\n", GLOB
);
2182 REGISTER_TEST(Run
, 43)
2183 } // namespace test43
2192 // 3. MU.Lock() \--> a. Q.Get()
2200 ProducerConsumerQueue
Q(INT_MAX
);
2216 // ANNOTATE_EXPECT_RACE(&GLOB, "test44. FP. Fixed by MSMProp1.");
2217 printf("test44: negative\n");
2218 MyThreadArray
t(Putter
, Getter
);
2221 printf("\tGLOB=%d\n", GLOB
);
2223 REGISTER_TEST(Run
, 44)
2224 } // namespace test44
2233 // 3. MU.Lock() \--> a. Q.Get()
2241 ProducerConsumerQueue
Q(INT_MAX
);
2257 printf("test45: negative\n");
2258 MyThreadArray
t(Putter
, Getter
);
2261 printf("\tGLOB=%d\n", GLOB
);
2263 REGISTER_TEST(Run
, 45)
2264 } // namespace test45
2274 // 4. MU.Unlock() (sleep)
2293 // If we move it to Run() we will get report in MSMHelgrind
2294 // due to its false positive (test32).
2296 printf("\tGLOB=%d\n", GLOB
);
2300 ANNOTATE_TRACE_MEMORY(&GLOB
);
2301 MyThreadArray
t(First
, Second
);
2305 REGISTER_TEST(Run
, 46)
2306 } // namespace test46
2309 // test47: TP. Not detected by pure happens-before detectors. {{{1
2311 // A true race that can not be detected by a pure happens-before
2317 // 3. MU.Unlock() (sleep)
2335 FAST_MODE_INIT(&GLOB
);
2336 if (!Tsan_PureHappensBefore())
2337 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test47. TP. Not detected by pure HB.");
2338 printf("test47: positive\n");
2339 MyThreadArray
t(First
, Second
);
2342 printf("\tGLOB=%d\n", GLOB
);
2344 REGISTER_TEST(Run
, 47)
2345 } // namespace test47
2348 // test48: FN. Simple race (single write vs multiple reads). {{{1
2351 // same as test10 but with single writer and multiple readers
2352 // A simple data race between single writer and multiple readers.
2353 // Write happens before Reads (enforced by sleep(1)),
2357 // 1. write(GLOB) a. sleep(long enough so that GLOB
2358 // is most likely initialized by Writer)
2362 // Eraser algorithm does not detect the race here,
2363 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
2370 CHECK(GLOB
!= -777);
2374 FAST_MODE_INIT(&GLOB
);
2375 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test48. TP. FN in MSMHelgrind.");
2376 printf("test48: positive\n");
2377 MyThreadArray
t(Writer
, Reader
,Reader
,Reader
);
2380 printf("\tGLOB=%d\n", GLOB
);
2382 REGISTER_TEST(Run
, 48)
2383 } // namespace test48
2386 // test49: FN. Simple race (single write vs multiple reads). {{{1
2389 // same as test10 but with multiple read operations done by a single reader
2390 // A simple data race between writer and readers.
2391 // Write happens before Read (enforced by sleep(1)),
2394 // 1. write(GLOB) a. sleep(long enough so that GLOB
2395 // is most likely initialized by Writer)
2402 // Eraser algorithm does not detect the race here,
2403 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
2410 CHECK(GLOB
!= -777);
2411 CHECK(GLOB
!= -777);
2412 CHECK(GLOB
!= -777);
2413 CHECK(GLOB
!= -777);
2417 FAST_MODE_INIT(&GLOB
);
2418 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test49. TP. FN in MSMHelgrind.");
2419 printf("test49: positive\n");
2420 MyThreadArray
t(Writer
, Reader
);
2423 printf("\tGLOB=%d\n", GLOB
);
2425 REGISTER_TEST(Run
, 49);
2426 } // namespace test49
2429 // test50: TP. Synchronization via CondVar. {{{1
2433 // Two last write accesses to GLOB are not synchronized
2438 // 3. MU.Lock() a. write(GLOB)
2441 // /--- d. CV.Signal()
2442 // 4. while(COND != 1) / e. MU.Unlock()
2443 // CV.Wait(MU) <---/
2445 // 6. write(GLOB) f. MU.Lock()
2451 usleep(100000); // Make sure the waiter blocks.
2468 pool
.StartWorkers();
2470 pool
.Add(NewCallback(Waker
));
2475 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
2481 FAST_MODE_INIT(&GLOB
);
2482 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test50. TP.");
2483 printf("test50: positive\n");
2485 printf("\tGLOB=%d\n", GLOB
);
2487 REGISTER_TEST2(Run
, 50, FEATURE
|NEEDS_ANNOTATIONS
);
2488 } // namespace test50
2491 // test51: TP. Synchronization via CondVar: problem with several signals. {{{1
2498 // scheduler dependent results because of several signals
2499 // second signal will be lost
2506 // 4. MU.Unlock() \ .
2507 // 5. write(GLOB) \ a. write(GLOB)
2510 // \--- d. CV.Signal()
2517 // LOST<---- i. CV.Signal()
2522 usleep(10000); // Make sure the waiter blocks.
2531 usleep(10000); // Make sure the waiter is signalled.
2537 CV
.Signal(); //Lost Signal
2544 pool
.StartWorkers();
2545 pool
.Add(NewCallback(Waker
));
2556 FAST_MODE_INIT(&GLOB
);
2557 ANNOTATE_EXPECT_RACE(&GLOB
, "test51. TP.");
2558 printf("test51: positive\n");
2560 printf("\tGLOB=%d\n", GLOB
);
2562 REGISTER_TEST(Run
, 51);
2563 } // namespace test51
2566 // test52: TP. Synchronization via CondVar: problem with several signals. {{{1
2572 // same as test51 but the first signal will be lost
2573 // scheduler dependent results because of several signals
2580 // LOST<---- d. CV.Signal()
2586 // 4. MU.Unlock() \ f. write(GLOB)
2587 // 5. write(GLOB) \ .
2590 // \--- i. CV.Signal()
2599 CV
.Signal(); //lost signal
2602 usleep(20000); // Make sure the waiter blocks
2614 pool
.StartWorkers();
2615 pool
.Add(NewCallback(Waker
));
2617 usleep(10000); // Make sure the first signal will be lost
2627 FAST_MODE_INIT(&GLOB
);
2628 ANNOTATE_EXPECT_RACE(&GLOB
, "test52. TP.");
2629 printf("test52: positive\n");
2631 printf("\tGLOB=%d\n", GLOB
);
2633 REGISTER_TEST(Run
, 52);
2634 } // namespace test52
2637 // test53: FP. Synchronization via implicit semaphore. {{{1
2639 // Correctly synchronized test, but the common lockset is empty.
2640 // The variable FLAG works as an implicit semaphore.
2641 // MSMHelgrind still does not complain since it does not maintain the lockset
2642 // at the exclusive state. But MSMProp1 does complain.
2646 // Initializer: Users
2654 // d. if (!f) goto a.
2664 void Initializer() {
2669 usleep(100000); // just in case
2680 // at this point Initializer will not access GLOB again
2682 CHECK(GLOB
>= 1000);
2688 FAST_MODE_INIT(&GLOB
);
2689 if (!Tsan_PureHappensBefore())
2690 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test53. FP. Implicit semaphore");
2691 printf("test53: FP. false positive, Implicit semaphore\n");
2692 MyThreadArray
t(Initializer
, User
, User
);
2695 printf("\tGLOB=%d\n", GLOB
);
2697 REGISTER_TEST(Run
, 53)
2698 } // namespace test53
2701 // test54: TN. Synchronization via implicit semaphore. Annotated {{{1
2703 // Same as test53, but annotated.
2708 void Initializer() {
2712 ANNOTATE_CONDVAR_SIGNAL(&GLOB
);
2714 usleep(100000); // just in case
2725 // at this point Initializer will not access GLOB again
2726 ANNOTATE_CONDVAR_WAIT(&GLOB
);
2728 CHECK(GLOB
>= 1000);
2734 printf("test54: negative\n");
2735 MyThreadArray
t(Initializer
, User
, User
);
2738 printf("\tGLOB=%d\n", GLOB
);
2740 REGISTER_TEST2(Run
, 54, FEATURE
|NEEDS_ANNOTATIONS
)
2741 } // namespace test54
2744 // test55: FP. Synchronization with TryLock. Not easy for race detectors {{{1
2746 // "Correct" synchronization with TryLock and Lock.
2748 // This scheme is actually very risky.
2749 // It is covered in detail in this video:
2750 // http://youtube.com/watch?v=mrvAqvtWYb4 (slide 36, near 50-th minute).
2754 void Worker_Lock() {
2759 void Worker_TryLock() {
2761 if (!MU
.TryLock()) {
2773 printf("test55:\n");
2774 MyThreadArray
t(Worker_Lock
, Worker_TryLock
);
2777 printf("\tGLOB=%d\n", GLOB
);
2779 REGISTER_TEST2(Run
, 55, FEATURE
|EXCLUDE_FROM_ALL
);
2780 } // namespace test55
2784 // test56: TP. Use of ANNOTATE_BENIGN_RACE. {{{1
2786 // For whatever reason the user wants to treat
2787 // a race on GLOB as a benign race.
2796 ANNOTATE_BENIGN_RACE(&GLOB
, "test56. Use of ANNOTATE_BENIGN_RACE.");
2797 ANNOTATE_BENIGN_RACE(&GLOB2
, "No race. The tool should be silent");
2798 printf("test56: positive\n");
2799 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
2802 printf("\tGLOB=%d\n", GLOB
);
2804 REGISTER_TEST2(Run
, 56, FEATURE
|NEEDS_ANNOTATIONS
)
2805 } // namespace test56
2808 // test57: TN: Correct use of atomics. {{{1
2812 for (int i
= 0; i
< 10; i
++) {
2813 AtomicIncrement(&GLOB
, 1);
2818 while (GLOB
< 20) usleep(1000);
2821 printf("test57: negative\n");
2822 MyThreadArray
t(Writer
, Writer
, Reader
, Reader
);
2826 printf("\tGLOB=%d\n", GLOB
);
2828 REGISTER_TEST(Run
, 57)
2829 } // namespace test57
2832 // test58: TN. User defined synchronization. {{{1
2839 // Correctly synchronized test, but the common lockset is empty.
2840 // The variables FLAG1 and FLAG2 used for synchronization and as
2841 // temporary variables for swapping two global values.
2842 // Such kind of synchronization is rarely used (Excluded from all tests??).
2861 printf("test58:\n");
2862 MyThreadArray
t(Worker1
, Worker2
);
2865 printf("\tGLOB1=%d\n", GLOB1
);
2866 printf("\tGLOB2=%d\n", GLOB2
);
2868 REGISTER_TEST2(Run
, 58, FEATURE
|EXCLUDE_FROM_ALL
)
2869 } // namespace test58
2873 // test59: TN. User defined synchronization. Annotated {{{1
2881 // same as test 58 but annotated
2885 ANNOTATE_CONDVAR_SIGNAL(&COND2
);
2886 while(!FLAG2
) usleep(1);
2887 ANNOTATE_CONDVAR_WAIT(&COND1
);
2893 ANNOTATE_CONDVAR_SIGNAL(&COND1
);
2894 while(!FLAG1
) usleep(1);
2895 ANNOTATE_CONDVAR_WAIT(&COND2
);
2900 printf("test59: negative\n");
2901 ANNOTATE_BENIGN_RACE(&FLAG1
, "synchronization via 'safe' race");
2902 ANNOTATE_BENIGN_RACE(&FLAG2
, "synchronization via 'safe' race");
2903 MyThreadArray
t(Worker1
, Worker2
);
2906 printf("\tGLOB1=%d\n", GLOB1
);
2907 printf("\tGLOB2=%d\n", GLOB2
);
2909 REGISTER_TEST2(Run
, 59, FEATURE
|NEEDS_ANNOTATIONS
)
2910 } // namespace test59
2913 // test60: TN. Correct synchronization using signal-wait {{{1
2922 // same as test 59 but synchronized with signal-wait.
2935 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
2952 ANNOTATE_CONDVAR_LOCK_WAIT(&CV
, &MU
);
2959 printf("test60: negative\n");
2960 MyThreadArray
t(Worker1
, Worker2
);
2963 printf("\tGLOB1=%d\n", GLOB1
);
2964 printf("\tGLOB2=%d\n", GLOB2
);
2966 REGISTER_TEST2(Run
, 60, FEATURE
|NEEDS_ANNOTATIONS
)
2967 } // namespace test60
2970 // test61: TN. Synchronization via Mutex as in happens-before, annotated. {{{1
2974 int *P1
= NULL
, *P2
= NULL
;
2976 // In this test Mutex lock/unlock operations introduce happens-before relation.
2977 // We annotate the code so that MU is treated as in pure happens-before detector.
2981 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU
);
3006 printf("test61: negative\n");
3007 MyThreadArray
t(Putter
, Getter
);
3010 printf("\tGLOB=%d\n", GLOB
);
3012 REGISTER_TEST2(Run
, 61, FEATURE
|NEEDS_ANNOTATIONS
)
3013 } // namespace test61
3016 // test62: STAB. Create as many segments as possible. {{{1
3018 // Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments.
3019 // A better scheme is to implement garbage collection for segments.
3020 ProducerConsumerQueue
Q(INT_MAX
);
3021 const int N
= 1 << 22;
3024 for (int i
= 0; i
< N
; i
++){
3025 if ((i
% (N
/ 8)) == 0) {
3026 printf("i=%d\n", i
);
3033 for (int i
= 0; i
< N
; i
++)
3038 printf("test62:\n");
3039 MyThreadArray
t(Putter
, Getter
);
3043 REGISTER_TEST2(Run
, 62, STABILITY
|EXCLUDE_FROM_ALL
)
3044 } // namespace test62
3047 // test63: STAB. Create as many segments as possible and do it fast. {{{1
3049 // Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments.
3050 // A better scheme is to implement garbage collection for segments.
3051 const int N
= 1 << 24;
3055 for (int i
= 0; i
< N
; i
++){
3056 if ((i
% (N
/ 8)) == 0) {
3057 printf("i=%d\n", i
);
3059 ANNOTATE_CONDVAR_SIGNAL(&C
);
3067 printf("test63:\n");
3068 MyThreadArray
t(Putter
, Getter
);
3072 REGISTER_TEST2(Run
, 63, STABILITY
|EXCLUDE_FROM_ALL
)
3073 } // namespace test63
3076 // test64: TP. T2 happens-before T3, but T1 is independent. Reads in T1/T2. {{{1
3078 // True race between T1 and T3:
3081 // 1. read(GLOB) (sleep)
3083 // b. Q.Put() -----> A. Q.Get()
3089 ProducerConsumerQueue
Q(INT_MAX
);
3108 FAST_MODE_INIT(&GLOB
);
3109 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test64: TP.");
3110 printf("test64: positive\n");
3111 MyThreadArray
t(T1
, T2
, T3
);
3114 printf("\tGLOB=%d\n", GLOB
);
3116 REGISTER_TEST(Run
, 64)
3117 } // namespace test64
3120 // test65: TP. T2 happens-before T3, but T1 is independent. Writes in T1/T2. {{{1
3122 // Similar to test64.
3123 // True race between T1 and T3:
3128 // 3. MU.Unlock() (sleep)
3132 // d. Q.Put() -----> A. Q.Get()
3139 ProducerConsumerQueue
Q(INT_MAX
);
3162 FAST_MODE_INIT(&GLOB
);
3163 if (!Tsan_PureHappensBefore())
3164 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test65. TP.");
3165 printf("test65: positive\n");
3166 MyThreadArray
t(T1
, T2
, T3
);
3169 printf("\tGLOB=%d\n", GLOB
);
3171 REGISTER_TEST(Run
, 65)
3172 } // namespace test65
3175 // test66: TN. Two separate pairs of signaller/waiter using the same CV. {{{1
3202 while (C1
!= 1) CV
.Wait(&MU
);
3203 ANNOTATE_CONDVAR_WAIT(&CV
);
3210 while (C2
!= 1) CV
.Wait(&MU
);
3211 ANNOTATE_CONDVAR_WAIT(&CV
);
3217 printf("test66: negative\n");
3218 MyThreadArray
t(Signaller1
, Signaller2
, Waiter1
, Waiter2
);
3221 printf("\tGLOB=%d/%d\n", GLOB1
, GLOB2
);
3223 REGISTER_TEST2(Run
, 66, FEATURE
|NEEDS_ANNOTATIONS
)
3224 } // namespace test66
3227 // test67: FN. Race between Signaller1 and Waiter2 {{{1
3229 // Similar to test66, but there is a real race here.
3231 // Here we create a happens-before arc between Signaller1 and Waiter2
3232 // even though there should be no such arc.
3233 // However, it's probably improssible (or just very hard) to avoid it.
3257 while (C1
!= 1) CV
.Wait(&MU
);
3258 ANNOTATE_CONDVAR_WAIT(&CV
);
3264 while (C2
!= 1) CV
.Wait(&MU
);
3265 ANNOTATE_CONDVAR_WAIT(&CV
);
3271 FAST_MODE_INIT(&GLOB
);
3272 ANNOTATE_EXPECT_RACE(&GLOB
, "test67. FN. Race between Signaller1 and Waiter2");
3273 printf("test67: positive\n");
3274 MyThreadArray
t(Signaller1
, Signaller2
, Waiter1
, Waiter2
);
3277 printf("\tGLOB=%d\n", GLOB
);
3279 REGISTER_TEST2(Run
, 67, FEATURE
|NEEDS_ANNOTATIONS
|EXCLUDE_FROM_ALL
)
3280 } // namespace test67
3283 // test68: TP. Writes are protected by MU, reads are not. {{{1
3285 // In this test, all writes to GLOB are protected by a mutex
3286 // but some reads go unprotected.
3287 // This is certainly a race, but in some cases such code could occur in
3288 // a correct program. For example, the unprotected reads may be used
3289 // for showing statistics and are not required to be precise.
3292 const int N_writers
= 3;
3296 for (int i
= 0; i
< 100; i
++) {
3315 if (COND
== N_writers
)
3323 FAST_MODE_INIT(&GLOB
);
3324 ANNOTATE_EXPECT_RACE(&GLOB
, "TP. Writes are protected, reads are not.");
3325 printf("test68: positive\n");
3326 MyThreadArray
t(Reader
, Writer
, Writer
, Writer
);
3329 printf("\tGLOB=%d\n", GLOB
);
3331 REGISTER_TEST(Run
, 68)
3332 } // namespace test68
3337 // This is the same as test68, but annotated.
3338 // We do not want to annotate GLOB as a benign race
3339 // because we want to allow racy reads only in certain places.
3344 const int N_writers
= 3;
3349 for (int i
= 0; i
< 10; i
++) {
3364 ANNOTATE_IGNORE_READS_BEGIN();
3366 ANNOTATE_IGNORE_READS_END();
3370 if (COND
== N_writers
)
3378 printf("test69: negative\n");
3379 MyThreadArray
t(Reader
, Writer
, Writer
, Writer
);
3382 printf("\tGLOB=%d\n", GLOB
);
3384 REGISTER_TEST(Run
, 69)
3385 } // namespace test69
3387 // test70: STAB. Check that TRACE_MEMORY works. {{{1
3391 printf("test70: negative\n");
3392 ANNOTATE_TRACE_MEMORY(&GLOB
);
3394 printf("\tGLOB=%d\n", GLOB
);
3396 REGISTER_TEST(Run
, 70)
3397 } // namespace test70
3401 // test71: TN. strlen, index. {{{1
3403 // This test is a reproducer for a benign race in strlen (as well as index, etc).
3404 // Some implementations of strlen may read up to 7 bytes past the end of the string
3405 // thus touching memory which may not belong to this string.
3406 // Such race is benign because the data read past the end of the string is not used.
3408 // Here, we allocate a 8-byte aligned string str and initialize first 5 bytes.
3409 // Then one thread calls strlen(str) (as well as index & rindex)
3410 // and another thread initializes str[5]..str[7].
3412 // This can be fixed in Helgrind by intercepting strlen and replacing it
3413 // with a simpler implementation.
3418 CHECK(strlen(str
) == 4);
3419 CHECK(index(str
, 'X') == str
);
3420 CHECK(index(str
, 'x') == str
+1);
3421 CHECK(index(str
, 'Y') == NULL
);
3422 CHECK(rindex(str
, 'X') == str
+2);
3423 CHECK(rindex(str
, 'x') == str
+3);
3424 CHECK(rindex(str
, 'Y') == NULL
);
3440 printf("test71: negative (strlen & index)\n");
3441 MyThread
t1(WorkerY
);
3442 MyThread
t2(WorkerX
);
3447 printf("\tstrX=%s; strY=%s\n", str
, str
+5);
3449 REGISTER_TEST(Run
, 71)
3450 } // namespace test71
3453 // test72: STAB. Stress test for the number of segment sets (SSETs). {{{1
3456 // Variation of test33.
3457 // Instead of creating Nlog*N_iter threads,
3458 // we create Nlog threads and do N_iter barriers.
3460 const int N_iter
= 30;
3461 const int Nlog
= 16;
3462 const int N
= 1 << Nlog
;
3463 static int64_t ARR1
[N
];
3464 static int64_t ARR2
[N
];
3465 Barrier
*barriers
[N_iter
];
3476 long t
__attribute__((unused
)) = t0
;
3478 for (int it
= 0; it
< N_iter
; it
++) {
3480 //printf("Iter: %d; %ld %ld\n", it, clock() - t, clock() - t0);
3483 // Iterate N_iter times, block on barrier after each iteration.
3484 // This way Helgrind will create new segments after each barrier.
3486 for (int x
= 0; x
< 2; x
++) {
3487 // run the inner loop twice.
3488 // When a memory location is accessed second time it is likely
3489 // that the state (SVal) will be unchanged.
3490 // The memory machine may optimize this case.
3491 for (int i
= 0; i
< N
; i
++) {
3492 // ARR1[i] and ARR2[N-1-i] are accessed by threads from i-th subset
3494 CHECK(ARR1
[i
] == 0);
3495 CHECK(ARR2
[N
-1-i
] == 0);
3499 barriers
[it
]->Block();
3505 printf("test72:\n");
3507 std::vector
<MyThread
*> vec(Nlog
);
3509 for (int i
= 0; i
< N_iter
; i
++)
3510 barriers
[i
] = new Barrier(Nlog
);
3512 // Create and start Nlog threads
3513 for (int i
= 0; i
< Nlog
; i
++) {
3514 vec
[i
] = new MyThread(Worker
);
3518 // Join all threads.
3519 for (int i
= 0; i
< Nlog
; i
++) {
3523 for (int i
= 0; i
< N_iter
; i
++)
3526 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
3527 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/
3529 REGISTER_TEST2(Run
, 72, STABILITY
|PERFORMANCE
|EXCLUDE_FROM_ALL
);
3530 #endif // NO_BARRIER
3531 } // namespace test72
3534 // test73: STAB. Stress test for the number of (SSETs), different access sizes. {{{1
3537 // Variation of test72.
3538 // We perform accesses of different sizes to the same location.
3540 const int N_iter
= 2;
3541 const int Nlog
= 16;
3542 const int N
= 1 << Nlog
;
3543 union uint64_union
{
3549 static uint64_union ARR1
[N
];
3550 union uint32_union
{
3555 static uint32_union ARR2
[N
];
3556 Barrier
*barriers
[N_iter
];
3566 for (int it
= 0; it
< N_iter
; it
++) {
3567 // Iterate N_iter times, block on barrier after each iteration.
3568 // This way Helgrind will create new segments after each barrier.
3570 for (int x
= 0; x
< 4; x
++) {
3571 for (int i
= 0; i
< N
; i
++) {
3572 // ARR1[i] are accessed by threads from i-th subset
3574 for (int off
= 0; off
< (1 << x
); off
++) {
3576 case 0: CHECK(ARR1
[i
].u64
[off
] == 0); break;
3577 case 1: CHECK(ARR1
[i
].u32
[off
] == 0); break;
3578 case 2: CHECK(ARR1
[i
].u16
[off
] == 0); break;
3579 case 3: CHECK(ARR1
[i
].u8
[off
] == 0); break;
3582 case 1: CHECK(ARR2
[i
].u32
[off
] == 0); break;
3583 case 2: CHECK(ARR2
[i
].u16
[off
] == 0); break;
3584 case 3: CHECK(ARR2
[i
].u8
[off
] == 0); break;
3590 barriers
[it
]->Block();
3597 printf("test73:\n");
3599 std::vector
<MyThread
*> vec(Nlog
);
3601 for (int i
= 0; i
< N_iter
; i
++)
3602 barriers
[i
] = new Barrier(Nlog
);
3604 // Create and start Nlog threads
3605 for (int i
= 0; i
< Nlog
; i
++) {
3606 vec
[i
] = new MyThread(Worker
);
3610 // Join all threads.
3611 for (int i
= 0; i
< Nlog
; i
++) {
3615 for (int i
= 0; i
< N_iter
; i
++)
3618 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
3619 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/
3621 REGISTER_TEST2(Run
, 73, STABILITY
|PERFORMANCE
|EXCLUDE_FROM_ALL
);
3622 #endif // NO_BARRIER
3623 } // namespace test73
3626 // test74: PERF. A lot of lock/unlock calls. {{{1
3628 const int N
= 100000;
3631 printf("test74: perf\n");
3632 for (int i
= 0; i
< N
; i
++ ) {
3637 REGISTER_TEST(Run
, 74)
3638 } // namespace test74
3641 // test75: TN. Test for sem_post, sem_wait, sem_trywait. {{{1
3658 sem_trywait(&sem
[1]);
3664 sem_init(&sem
[0], 0, 0);
3665 sem_init(&sem
[1], 0, 0);
3667 printf("test75: negative\n");
3669 MyThreadArray
t(Poster
, Waiter
);
3675 MyThreadArray
t(Poster
, TryWaiter
);
3679 printf("\tGLOB=%d\n", GLOB
);
3681 sem_destroy(&sem
[0]);
3682 sem_destroy(&sem
[1]);
3685 REGISTER_TEST(Run
, 75)
3686 } // namespace test75
3688 // RefCountedClass {{{1
3689 struct RefCountedClass
{
3692 annotate_unref_
= false;
3697 ~RefCountedClass() {
3698 CHECK(ref_
== 0); // race may be reported here
3699 int data_val
= data_
; // and here
3700 // if MU is not annotated
3703 printf("\tRefCountedClass::data_ = %d\n", data_val
);
3723 bool do_delete
= ref_
== 0;
3724 if (annotate_unref_
) {
3725 ANNOTATE_CONDVAR_SIGNAL(this);
3729 if (annotate_unref_
) {
3730 ANNOTATE_CONDVAR_WAIT(this);
3736 static void Annotate_MU() {
3737 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU
);
3739 void AnnotateUnref() {
3740 annotate_unref_
= true;
3742 void Annotate_Race() {
3743 ANNOTATE_BENIGN_RACE(&this->data_
, "needs annotation");
3744 ANNOTATE_BENIGN_RACE(&this->ref_
, "needs annotation");
3747 bool annotate_unref_
;
3750 Mutex mu_
; // protects data_
3753 static Mutex MU
; // protects ref_
3756 Mutex
RefCountedClass::MU
;
3758 // test76: FP. Ref counting, no annotations. {{{1
3763 RefCountedClass
*object
= NULL
;
3767 object
->AccessData();
3771 printf("test76: false positive (ref counting)\n");
3772 object
= new RefCountedClass
;
3773 object
->Annotate_Race();
3774 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
3778 REGISTER_TEST2(Run
, 76, FEATURE
)
3779 #endif // NO_BARRIER
3780 } // namespace test76
3784 // test77: TN. Ref counting, MU is annotated. {{{1
3787 // same as test76, but RefCountedClass::MU is annotated.
3790 RefCountedClass
*object
= NULL
;
3794 object
->AccessData();
3798 printf("test77: true negative (ref counting), mutex is annotated\n");
3799 RefCountedClass::Annotate_MU();
3800 object
= new RefCountedClass
;
3801 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
3805 REGISTER_TEST(Run
, 77)
3806 #endif // NO_BARRIER
3807 } // namespace test77
3811 // test78: TN. Ref counting, Unref is annotated. {{{1
3814 // same as test76, but RefCountedClass::Unref is annotated.
3817 RefCountedClass
*object
= NULL
;
3821 object
->AccessData();
3825 printf("test78: true negative (ref counting), Unref is annotated\n");
3826 RefCountedClass::Annotate_MU();
3827 object
= new RefCountedClass
;
3828 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
3832 REGISTER_TEST(Run
, 78)
3833 #endif // NO_BARRIER
3834 } // namespace test78
3838 // test79 TN. Swap. {{{1
3841 typedef __gnu_cxx::hash_map
<int, int> map_t
;
3843 typedef std::map
<int, int> map_t
;
3848 // Here we use swap to pass MAP between threads.
3849 // The synchronization is correct, but w/o ANNOTATE_MUTEX_IS_USED_AS_CONDVAR
3850 // Helgrind will complain.
3855 // We swap the new empty map 'tmp' with 'MAP'.
3858 // tmp (which is the old version of MAP) is destroyed here.
3863 MAP
[1]++; // Just update MAP under MU.
3867 void Worker3() { Worker1(); }
3868 void Worker4() { Worker2(); }
3871 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU
);
3872 printf("test79: negative\n");
3873 MyThreadArray
t(Worker1
, Worker2
, Worker3
, Worker4
);
3877 REGISTER_TEST(Run
, 79)
3878 } // namespace test79
3881 // AtomicRefCountedClass. {{{1
3882 // Same as RefCountedClass, but using atomic ops instead of mutex.
3883 struct AtomicRefCountedClass
{
3885 AtomicRefCountedClass() {
3886 annotate_unref_
= false;
3891 ~AtomicRefCountedClass() {
3892 CHECK(ref_
== 0); // race may be reported here
3893 int data_val
= data_
; // and here
3896 printf("\tRefCountedClass::data_ = %d\n", data_val
);
3906 AtomicIncrement(&ref_
, 1);
3910 // DISCLAIMER: I am not sure I've implemented this correctly
3911 // (might require some memory barrier, etc).
3912 // But this implementation of reference counting is enough for
3913 // the purpose of Helgrind demonstration.
3914 AtomicIncrement(&ref_
, -1);
3915 if (annotate_unref_
) { ANNOTATE_CONDVAR_SIGNAL(this); }
3917 if (annotate_unref_
) { ANNOTATE_CONDVAR_WAIT(this); }
3922 void AnnotateUnref() {
3923 annotate_unref_
= true;
3925 void Annotate_Race() {
3926 ANNOTATE_BENIGN_RACE(&this->data_
, "needs annotation");
3929 bool annotate_unref_
;
3932 int data_
; // under mu_
3934 int ref_
; // used in atomic ops.
3937 // test80: FP. Ref counting with atomics, no annotations. {{{1
3942 AtomicRefCountedClass
*object
= NULL
;
3946 object
->AccessData();
3947 object
->Unref(); // All the tricky stuff is here.
3950 printf("test80: false positive (ref counting)\n");
3951 object
= new AtomicRefCountedClass
;
3952 object
->Annotate_Race();
3953 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
3957 REGISTER_TEST2(Run
, 80, FEATURE
|EXCLUDE_FROM_ALL
)
3958 #endif // NO_BARRIER
3959 } // namespace test80
3962 // test81: TN. Ref counting with atomics, Unref is annotated. {{{1
3965 // same as test80, but Unref is annotated.
3968 AtomicRefCountedClass
*object
= NULL
;
3972 object
->AccessData();
3973 object
->Unref(); // All the tricky stuff is here.
3976 printf("test81: negative (annotated ref counting)\n");
3977 object
= new AtomicRefCountedClass
;
3978 object
->AnnotateUnref();
3979 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
3983 REGISTER_TEST2(Run
, 81, FEATURE
|EXCLUDE_FROM_ALL
)
3984 #endif // NO_BARRIER
3985 } // namespace test81
3988 // test82: Object published w/o synchronization. {{{1
3991 // Writer creates a new object and makes the pointer visible to the Reader.
3992 // Reader waits until the object pointer is non-null and reads the object.
3994 // On Core 2 Duo this test will sometimes (quite rarely) fail in
3995 // the CHECK below, at least if compiled with -O2.
3997 // The sequence of events::
3998 // Thread1: Thread2:
3999 // a. arr_[...] = ...
4001 // A. ... = foo[i]; // non NULL
4002 // B. ... = arr_[...];
4004 // Since there is no proper synchronization, during the even (B)
4005 // Thread2 may not see the result of the event (a).
4006 // On x86 and x86_64 this happens due to compiler reordering instructions.
4007 // On other arcitectures it may also happen due to cashe inconsistency.
4012 idx_
= rand() % 1024;
4014 // __asm__ __volatile__("" : : : "memory"); // this fixes!
4016 static void check(volatile FOO
*foo
) {
4017 CHECK(foo
->arr_
[foo
->idx_
] == 77777);
4024 const int N
= 100000;
4025 static volatile FOO
*foo
[N
];
4029 for (int i
= 0; i
< N
; i
++) {
4036 for (int i
= 0; i
< N
; i
++) {
4038 MU
.Lock(); // this is NOT a synchronization,
4039 MU
.Unlock(); // it just helps foo[i] to become visible in Reader.
4041 if ((i
% 100) == 0) {
4042 printf("rd %d\n", i
);
4044 // At this point Reader() sees the new value of foo[i]
4045 // but in very rare cases will not see the new value of foo[i]->arr_.
4046 // Thus this CHECK will sometimes fail.
4052 printf("test82: positive\n");
4053 MyThreadArray
t(Writer
, Reader
);
4057 REGISTER_TEST2(Run
, 82, FEATURE
|EXCLUDE_FROM_ALL
)
4058 } // namespace test82
4061 // test83: Object published w/o synchronization (simple version){{{1
4063 // A simplified version of test83 (example of a wrong code).
4064 // This test, though incorrect, will almost never fail.
4065 volatile static int *ptr
= NULL
;
4075 MU
.Lock(); // Not a synchronization!
4082 // printf("test83: positive\n");
4083 MyThreadArray
t(Writer
, Reader
);
4087 REGISTER_TEST2(Run
, 83, FEATURE
|EXCLUDE_FROM_ALL
)
4088 } // namespace test83
4091 // test84: TP. True race (regression test for a bug related to atomics){{{1
4093 // Helgrind should not create HB arcs for the bus lock even when
4094 // --pure-happens-before=yes is used.
4095 // Bug found in by Bart Van Assche, the test is taken from
4096 // valgrind file drd/tests/atomic_var.c.
4098 /* s_dummy[] ensures that s_x and s_y are not in the same cache line. */
4099 static char s_dummy
[512] = {0};
4102 void thread_func_1()
4105 AtomicIncrement(&s_x
, 1);
4108 void thread_func_2()
4110 while (AtomicIncrement(&s_x
, 0) == 0)
4112 printf("y = %d\n", s_y
);
4117 CHECK(s_dummy
[0] == 0); // Avoid compiler warning about 's_dummy unused'.
4118 printf("test84: positive\n");
4119 FAST_MODE_INIT(&s_y
);
4120 ANNOTATE_EXPECT_RACE_FOR_TSAN(&s_y
, "test84: TP. true race.");
4121 MyThreadArray
t(thread_func_1
, thread_func_2
);
4125 REGISTER_TEST(Run
, 84)
4126 } // namespace test84
4129 // test85: Test for RunningOnValgrind(). {{{1
4133 printf("test85: RunningOnValgrind() = %d\n", RunningOnValgrind());
4135 REGISTER_TEST(Run
, 85)
4136 } // namespace test85
4139 // test86: Test for race inside DTOR: racey write to vptr. Benign. {{{1
4141 // This test shows a racey access to vptr (the pointer to vtbl).
4142 // We have class A and class B derived from A.
4143 // Both classes have a virtual function f() and a virtual DTOR.
4144 // We create an object 'A *a = new B'
4145 // and pass this object from Thread1 to Thread2.
4146 // Thread2 calls a->f(). This call reads a->vtpr.
4147 // Thread1 deletes the object. B::~B waits untill the object can be destroyed
4148 // (flag_stopped == true) but at the very beginning of B::~B
4149 // a->vptr is written to.
4150 // So, we have a race on a->vptr.
4151 // On this particular test this race is benign, but test87 shows
4152 // how such race could harm.
4158 // 2. Q.Put(a); ------------\ .
4159 // \--------------------> a. a = Q.Get();
4161 // /--------- c. flag_stopped = true;
4163 // waits untill flag_stopped <------/
4167 bool flag_stopped
= false;
4170 ProducerConsumerQueue
Q(INT_MAX
); // Used to pass A* between threads.
4173 A() { printf("A::A()\n"); }
4174 virtual ~A() { printf("A::~A()\n"); }
4175 virtual void f() { }
4177 uintptr_t padding
[15];
4178 } __attribute__ ((aligned (64)));
4181 B() { printf("B::B()\n"); }
4183 // The race is here. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
4184 printf("B::~B()\n");
4185 // wait until flag_stopped is true.
4186 mu
.LockWhen(Condition
<bool>(&ArgIsTrue
, &flag_stopped
));
4188 printf("B::~B() done\n");
4190 virtual void f() { }
4195 if (!Tsan_FastMode())
4196 ANNOTATE_EXPECT_RACE(a
, "test86: expected race on a->vptr");
4197 printf("Waiter: B created\n");
4199 usleep(100000); // so that Worker calls a->f() first.
4200 printf("Waiter: deleting B\n");
4202 printf("Waiter: B deleted\n");
4204 printf("Waiter: done\n");
4208 A
*a
= reinterpret_cast<A
*>(Q
.Get());
4209 printf("Worker: got A\n");
4213 flag_stopped
= true;
4216 printf("Worker: done\n");
4220 printf("test86: positive, race inside DTOR\n");
4221 MyThreadArray
t(Waiter
, Worker
);
4225 REGISTER_TEST(Run
, 86)
4226 } // namespace test86
4229 // test87: Test for race inside DTOR: racey write to vptr. Harmful.{{{1
4231 // A variation of test86 where the race is harmful.
4232 // Here we have class C derived from B.
4233 // We create an object 'A *a = new C' in Thread1 and pass it to Thread2.
4234 // Thread2 calls a->f().
4235 // Thread1 calls 'delete a'.
4236 // It first calls C::~C, then B::~B where it rewrites the vptr to point
4237 // to B::vtbl. This is a problem because Thread2 might not have called a->f()
4238 // and now it will call B::f instead of C::f.
4240 bool flag_stopped
= false;
4243 ProducerConsumerQueue
Q(INT_MAX
); // Used to pass A* between threads.
4246 A() { printf("A::A()\n"); }
4247 virtual ~A() { printf("A::~A()\n"); }
4248 virtual void f() = 0; // pure virtual.
4252 B() { printf("B::B()\n"); }
4254 // The race is here. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
4255 printf("B::~B()\n");
4256 // wait until flag_stopped is true.
4257 mu
.LockWhen(Condition
<bool>(&ArgIsTrue
, &flag_stopped
));
4259 printf("B::~B() done\n");
4261 virtual void f() = 0; // pure virtual.
4265 C() { printf("C::C()\n"); }
4266 virtual ~C() { printf("C::~C()\n"); }
4267 virtual void f() { }
4277 A
*a
= reinterpret_cast<A
*>(Q
.Get());
4281 flag_stopped
= true;
4282 ANNOTATE_CONDVAR_SIGNAL(&mu
);
4287 printf("test87: positive, race inside DTOR\n");
4288 MyThreadArray
t(Waiter
, Worker
);
4292 REGISTER_TEST2(Run
, 87, FEATURE
|EXCLUDE_FROM_ALL
)
4293 } // namespace test87
4296 // test88: Test for ANNOTATE_IGNORE_WRITES_*{{{1
4298 // a recey write annotated with ANNOTATE_IGNORE_WRITES_BEGIN/END.
4301 ANNOTATE_IGNORE_WRITES_BEGIN();
4303 ANNOTATE_IGNORE_WRITES_END();
4306 printf("test88: negative, test for ANNOTATE_IGNORE_WRITES_*\n");
4311 printf("\tGLOB=%d\n", GLOB
);
4313 REGISTER_TEST(Run
, 88)
4314 } // namespace test88
4317 // test89: Test for debug info. {{{1
4319 // Simlpe races with different objects (stack, heap globals; scalars, structs).
4320 // Also, if run with --trace-level=2 this test will show a sequence of
4321 // CTOR and DTOR calls.
4329 ANNOTATE_TRACE_MEMORY(&a
);
4338 B() { CHECK(a
== 1); }
4339 virtual ~B() { CHECK(a
== 3); }
4343 virtual ~C() { a
= 3; }
4349 STRUCT
*STACK_STRUCT
;
4350 STRUCT
*HEAP_STRUCT
;
4356 STACK_STRUCT
->b
= 1;
4364 STRUCT stack_struct
;
4365 STACK_STRUCT
= &stack_struct
;
4367 HEAP_STRUCT
= new STRUCT
;
4369 printf("test89: negative\n");
4370 MyThreadArray
t(Worker
, Worker
);
4377 printf("Using 'a->a': %d\n", a
->a
);
4380 REGISTER_TEST2(Run
, 89, FEATURE
|EXCLUDE_FROM_ALL
)
4381 } // namespace test89
4384 // test90: FP. Test for a safely-published pointer (read-only). {{{1
4386 // The Publisher creates an object and safely publishes it under a mutex.
4387 // Readers access the object read-only.
4390 // Without annotations Helgrind will issue a false positive in Reader().
4392 // Choices for annotations:
4393 // -- ANNOTATE_CONDVAR_SIGNAL/ANNOTATE_CONDVAR_WAIT
4394 // -- ANNOTATE_MUTEX_IS_USED_AS_CONDVAR
4395 // -- ANNOTATE_PUBLISH_MEMORY_RANGE.
4402 GLOB
= (int*)memalign(64, sizeof(int));
4404 if (!Tsan_PureHappensBefore() && !Tsan_FastMode())
4405 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB
, "test90. FP. This is a false positve");
4417 CHECK(*p
== 777); // Race is reported here.
4424 printf("test90: false positive (safely published pointer).\n");
4425 MyThreadArray
t(Publisher
, Reader
, Reader
, Reader
);
4428 printf("\t*GLOB=%d\n", *GLOB
);
4431 REGISTER_TEST(Run
, 90)
4432 } // namespace test90
4435 // test91: FP. Test for a safely-published pointer (read-write). {{{1
4437 // Similar to test90.
4438 // The Publisher creates an object and safely publishes it under a mutex MU1.
4439 // Accessors get the object under MU1 and access it (read/write) under MU2.
4441 // Without annotations Helgrind will issue a false positive in Accessor().
4449 GLOB
= (int*)memalign(64, sizeof(int));
4451 if (!Tsan_PureHappensBefore() && !Tsan_FastMode())
4452 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB
, "test91. FP. This is a false positve");
4464 (*p
)++; // Race is reported here.
4473 printf("test91: false positive (safely published pointer, read/write).\n");
4474 MyThreadArray
t(Publisher
, Accessor
, Accessor
, Accessor
);
4477 printf("\t*GLOB=%d\n", *GLOB
);
4480 REGISTER_TEST(Run
, 91)
4481 } // namespace test91
4484 // test92: TN. Test for a safely-published pointer (read-write), annotated. {{{1
4486 // Similar to test91, but annotated with ANNOTATE_PUBLISH_MEMORY_RANGE.
4489 // Publisher: Accessors:
4493 // 3. ANNOTATE_PUBLISH_...(GLOB) -------\ .
4494 // 4. MU1.Unlock() \ .
4497 // \ c. MU1.Unlock()
4498 // \--> d. Access GLOB
4500 // A happens-before arc is created between ANNOTATE_PUBLISH_MEMORY_RANGE and
4501 // accesses to GLOB.
4513 for (int i
= 0; i
< 10; i
++) {
4516 // This annotation should go right before the object is published.
4517 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB
, sizeof(*GLOB
));
4521 void Accessor(int index
) {
4528 p
->arr
[index
]++; // W/o the annotations the race will be reported here.
4529 CHECK(p
->arr
[index
] == 778);
4536 void Accessor0() { Accessor(0); }
4537 void Accessor5() { Accessor(5); }
4538 void Accessor9() { Accessor(9); }
4541 printf("test92: safely published pointer, read/write, annotated.\n");
4542 MyThreadArray
t(Publisher
, Accessor0
, Accessor5
, Accessor9
);
4545 printf("\t*GLOB=%d\n", GLOB
->arr
[0]);
4547 REGISTER_TEST(Run
, 92)
4548 } // namespace test92
4551 // test93: TP. Test for incorrect usage of ANNOTATE_PUBLISH_MEMORY_RANGE. {{{1
4561 // Incorrect, used after the memory has been accessed in another thread.
4562 ANNOTATE_PUBLISH_MEMORY_RANGE(&GLOB
, sizeof(GLOB
));
4566 printf("test93: positive, misuse of ANNOTATE_PUBLISH_MEMORY_RANGE\n");
4567 MyThreadArray
t(Reader
, Publisher
);
4570 printf("\tGLOB=%d\n", GLOB
);
4572 REGISTER_TEST2(Run
, 93, FEATURE
|EXCLUDE_FROM_ALL
)
4573 } // namespace test93
4576 // test94: TP. Check do_cv_signal/fake segment logic {{{1
4586 usleep(10000); // Make sure the waiter blocks.
4596 usleep(1000*1000); // Make sure CV2.Signal() "happens after" CV.Signal()
4597 usleep(10000); // Make sure the waiter blocks.
4615 GLOB
= 2; // READ: no HB-relation between CV.Signal and CV2.Wait !
4618 FAST_MODE_INIT(&GLOB
);
4619 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test94: TP.");
4620 printf("test94: TP. Check do_cv_signal/fake segment logic\n");
4621 MyThreadArray
mta(Thr1
, Thr2
, Thr3
, Thr4
);
4624 printf("\tGLOB=%d\n", GLOB
);
4626 REGISTER_TEST(Run
, 94);
4627 } // namespace test94
4629 // test95: TP. Check do_cv_signal/fake segment logic {{{1
4639 usleep(1000*1000); // Make sure CV2.Signal() "happens before" CV.Signal()
4640 usleep(10000); // Make sure the waiter blocks.
4650 usleep(10000); // Make sure the waiter blocks.
4668 GLOB
= 2; // READ: no HB-relation between CV.Signal and CV2.Wait !
4671 FAST_MODE_INIT(&GLOB
);
4672 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test95: TP.");
4673 printf("test95: TP. Check do_cv_signal/fake segment logic\n");
4674 MyThreadArray
mta(Thr1
, Thr2
, Thr3
, Thr4
);
4677 printf("\tGLOB=%d\n", GLOB
);
4679 REGISTER_TEST(Run
, 95);
4680 } // namespace test95
4682 // test96: TN. tricky LockSet behaviour {{{1
4683 // 3 threads access the same memory with three different
4684 // locksets: {A, B}, {B, C}, {C, A}.
4685 // These locksets have empty intersection
4710 printf("test96: FP. tricky LockSet behaviour\n");
4711 ANNOTATE_TRACE_MEMORY(&GLOB
);
4712 MyThreadArray
mta(Thread1
, Thread2
, Thread3
);
4716 printf("\tGLOB=%d\n", GLOB
);
4718 REGISTER_TEST(Run
, 96);
4719 } // namespace test96
4721 // test97: This test shows false negative with --fast-mode=yes {{{1
4723 const int HG_CACHELINE_SIZE
= 64;
4727 const int ARRAY_SIZE
= HG_CACHELINE_SIZE
* 4 / sizeof(int);
4728 int array
[ARRAY_SIZE
];
4729 int * GLOB
= &array
[ARRAY_SIZE
/2];
4731 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points
4732 to a memory inside a CacheLineZ which is inside array's memory range
4737 CHECK(777 == *GLOB
);
4741 MyThreadArray
t(Reader
);
4742 if (!Tsan_FastMode())
4743 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB
, "test97: TP. FN with --fast-mode=yes");
4744 printf("test97: This test shows false negative with --fast-mode=yes\n");
4751 REGISTER_TEST2(Run
, 97, FEATURE
)
4752 } // namespace test97
4754 // test98: Synchronization via read/write (or send/recv). {{{1
4756 // The synchronization here is done by a pair of read/write calls
4757 // that create a happens-before arc. Same may be done with send/recv.
4758 // Such synchronization is quite unusual in real programs
4759 // (why would one synchronizae via a file or socket?), but
4760 // quite possible in unittests where one threads runs for producer
4761 // and one for consumer.
4763 // A race detector has to create a happens-before arcs for
4764 // {read,send}->{write,recv} even if the file descriptors are different.
4773 const char *str
= "Hey there!\n";
4774 IGNORE_RETURN_VALUE(write(fd_out
, str
, strlen(str
) + 1));
4779 while (read(fd_in
, buff
, 100) == 0)
4781 printf("read: %s\n", buff
);
4786 printf("test98: negative, synchronization via I/O\n");
4789 // we open two files, on for reading and one for writing,
4790 // but the files are actually the same (symlinked).
4791 sprintf(out_name
, "/tmp/racecheck_unittest_out.%ld", (long) getpid());
4792 fd_out
= creat(out_name
, O_WRONLY
| S_IRWXU
);
4794 // symlink() is not supported on Darwin. Copy the output file name.
4795 strcpy(in_name
, out_name
);
4797 sprintf(in_name
, "/tmp/racecheck_unittest_in.%ld", (long) getpid());
4798 IGNORE_RETURN_VALUE(symlink(out_name
, in_name
));
4800 fd_in
= open(in_name
, 0, O_RDONLY
);
4803 MyThreadArray
t(Writer
, Reader
);
4806 printf("\tGLOB=%d\n", GLOB
);
4813 REGISTER_TEST(Run
, 98)
4814 } // namespace test98
4817 // test99: TP. Unit test for a bug in LockWhen*. {{{1
4824 static void Thread1() {
4825 for (int i
= 0; i
< 100; i
++) {
4826 mu
.LockWhenWithTimeout(Condition
<bool>(&ArgIsTrue
, &GLOB
), 5);
4833 static void Thread2() {
4834 for (int i
= 0; i
< 100; i
++) {
4842 printf("test99: regression test for LockWhen*\n");
4843 MyThreadArray
t(Thread1
, Thread2
);
4847 REGISTER_TEST(Run
, 99);
4848 } // namespace test99
4851 // test100: Test for initialization bit. {{{1
4879 printf("test100: test for initialization bit. \n");
4880 MyThreadArray
t(Creator
, Worker1
, Worker2
);
4881 ANNOTATE_TRACE_MEMORY(&G1
);
4882 ANNOTATE_TRACE_MEMORY(&G2
);
4883 ANNOTATE_TRACE_MEMORY(&G3
);
4884 ANNOTATE_TRACE_MEMORY(&G4
);
4888 REGISTER_TEST2(Run
, 100, FEATURE
|EXCLUDE_FROM_ALL
)
4889 } // namespace test100
4892 // test101: TN. Two signals and two waits. {{{1
4936 printf("test101: negative\n");
4937 MyThreadArray
t(Waiter
, Signaller
);
4940 printf("\tGLOB=%d\n", GLOB
);
4942 REGISTER_TEST(Run
, 101)
4943 } // namespace test101
4945 // test102: --fast-mode=yes vs. --initialization-bit=yes {{{1
4947 const int HG_CACHELINE_SIZE
= 64;
4951 const int ARRAY_SIZE
= HG_CACHELINE_SIZE
* 4 / sizeof(int);
4952 int array
[ARRAY_SIZE
+ 1];
4953 int * GLOB
= &array
[ARRAY_SIZE
/2];
4955 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points
4956 to a memory inside a CacheLineZ which is inside array's memory range
4961 CHECK(777 == GLOB
[0]);
4963 CHECK(777 == GLOB
[1]);
4967 MyThreadArray
t(Reader
);
4968 if (!Tsan_FastMode())
4969 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB
+0, "test102: TP. FN with --fast-mode=yes");
4970 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB
+1, "test102: TP");
4971 printf("test102: --fast-mode=yes vs. --initialization-bit=yes\n");
4980 REGISTER_TEST2(Run
, 102, FEATURE
)
4981 } // namespace test102
4983 // test103: Access different memory locations with different LockSets {{{1
4985 const int N_MUTEXES
= 6;
4986 const int LOCKSET_INTERSECTION_SIZE
= 3;
4988 int data
[1 << LOCKSET_INTERSECTION_SIZE
] = {0};
4989 Mutex MU
[N_MUTEXES
];
4991 inline int LS_to_idx (int ls
) {
4992 return (ls
>> (N_MUTEXES
- LOCKSET_INTERSECTION_SIZE
))
4993 & ((1 << LOCKSET_INTERSECTION_SIZE
) - 1);
4997 for (int ls
= 0; ls
< (1 << N_MUTEXES
); ls
++) {
4998 if (LS_to_idx(ls
) == 0)
5000 for (int m
= 0; m
< N_MUTEXES
; m
++)
5004 data
[LS_to_idx(ls
)]++;
5006 for (int m
= N_MUTEXES
- 1; m
>= 0; m
--)
5013 printf("test103: Access different memory locations with different LockSets\n");
5014 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
5018 REGISTER_TEST2(Run
, 103, FEATURE
)
5019 } // namespace test103
5021 // test104: TP. Simple race (write vs write). Heap mem. {{{1
5036 GLOB
= (int*)memalign(64, sizeof(int));
5038 ANNOTATE_EXPECT_RACE(GLOB
, "test104. TP.");
5039 ANNOTATE_TRACE_MEMORY(GLOB
);
5040 printf("test104: positive\n");
5042 printf("\tGLOB=%d\n", *GLOB
);
5045 REGISTER_TEST(Run
, 104);
5046 } // namespace test104
5049 // test105: Checks how stack grows. {{{1
5054 int ar
[32] __attribute__((unused
));
5055 // ANNOTATE_TRACE_MEMORY(&ar[0]);
5056 // ANNOTATE_TRACE_MEMORY(&ar[31]);
5062 int ar
[32] __attribute__((unused
));
5063 // ANNOTATE_TRACE_MEMORY(&ar[0]);
5064 // ANNOTATE_TRACE_MEMORY(&ar[31]);
5071 printf("test105: negative\n");
5076 printf("\tGLOB=%d\n", GLOB
);
5078 REGISTER_TEST(Run
, 105)
5079 } // namespace test105
5082 // test106: TN. pthread_once. {{{1
5085 static pthread_once_t once
= PTHREAD_ONCE_INIT
;
5088 ANNOTATE_TRACE_MEMORY(GLOB
);
5093 pthread_once(&once
, Init
);
5097 pthread_once(&once
, Init
);
5098 CHECK(*GLOB
== 777);
5103 printf("test106: negative\n");
5104 MyThreadArray
t(Worker0
, Worker1
, Worker1
, Worker1
);
5107 printf("\tGLOB=%d\n", *GLOB
);
5109 REGISTER_TEST2(Run
, 106, FEATURE
)
5110 } // namespace test106
5113 // test107: Test for ANNOTATE_EXPECT_RACE {{{1
5117 printf("test107: negative\n");
5118 ANNOTATE_EXPECT_RACE(&GLOB
, "No race in fact. Just checking the tool.");
5119 printf("\tGLOB=%d\n", GLOB
);
5121 REGISTER_TEST2(Run
, 107, FEATURE
|EXCLUDE_FROM_ALL
)
5122 } // namespace test107
5125 // test108: TN. initialization of static object. {{{1
5127 // Here we have a function-level static object.
5128 // Starting from gcc 4 this is therad safe,
5129 // but is is not thread safe with many other compilers.
5131 // Helgrind supports this kind of initialization by
5132 // intercepting __cxa_guard_acquire/__cxa_guard_release
5133 // and ignoring all accesses between them.
5134 // Helgrind also intercepts pthread_once in the same manner.
5138 ANNOTATE_TRACE_MEMORY(&a_
);
5141 void Check() const { CHECK(a_
== 42); }
5146 const Foo
*GetFoo() {
5147 static const Foo
*foo
= new Foo();
5156 const Foo
*foo
= GetFoo();
5162 printf("test108: negative, initialization of static object\n");
5163 MyThreadArray
t(Worker0
, Worker
, Worker
);
5167 REGISTER_TEST2(Run
, 108, FEATURE
)
5168 } // namespace test108
5171 // test109: TN. Checking happens before between parent and child threads. {{{1
5173 // Check that the detector correctly connects
5174 // pthread_create with the new thread
5176 // thread exit with pthread_join
5180 void Worker(void *a
) {
5182 // printf("--Worker : %ld %p\n", (int*)a - GLOB, (void*)pthread_self());
5188 printf("test109: negative\n");
5190 for (int i
= 0; i
< N
; i
++) {
5191 t
[i
] = new MyThread(Worker
, &GLOB
[i
]);
5193 for (int i
= 0; i
< N
; i
++) {
5194 ANNOTATE_TRACE_MEMORY(&GLOB
[i
]);
5197 // printf("--Started: %p\n", (void*)t[i]->tid());
5199 for (int i
= 0; i
< N
; i
++) {
5200 // printf("--Joining: %p\n", (void*)t[i]->tid());
5202 // printf("--Joined : %p\n", (void*)t[i]->tid());
5205 for (int i
= 0; i
< N
; i
++) delete t
[i
];
5207 printf("\tGLOB=%d\n", GLOB
[13]);
5209 REGISTER_TEST(Run
, 109)
5210 } // namespace test109
5213 // test110: TP. Simple races with stack, global and heap objects. {{{1
5226 union pi_pv_union
{ int* pi
; void* pv
; } POSIX_MEMALIGN
;
5244 (*(POSIX_MEMALIGN
.pi
))++;
5254 MALLOC
= (int*)malloc(sizeof(int));
5255 CALLOC
= (int*)calloc(1, sizeof(int));
5256 REALLOC
= (int*)realloc(NULL
, sizeof(int));
5257 VALLOC
= (int*)valloc(sizeof(int));
5258 PVALLOC
= (int*)valloc(sizeof(int)); // TODO: pvalloc breaks helgrind.
5259 MEMALIGN
= (int*)memalign(64, sizeof(int));
5260 CHECK(0 == posix_memalign(&POSIX_MEMALIGN
.pv
, 64, sizeof(int)));
5261 MMAP
= (int*)mmap(NULL
, sizeof(int), PROT_READ
| PROT_WRITE
,
5262 MAP_PRIVATE
| MAP_ANON
, -1, 0);
5265 NEW_ARR
= new int[10];
5268 FAST_MODE_INIT(STACK
);
5269 ANNOTATE_EXPECT_RACE(STACK
, "real race on stack object");
5270 FAST_MODE_INIT(&GLOB
);
5271 ANNOTATE_EXPECT_RACE(&GLOB
, "real race on global object");
5272 FAST_MODE_INIT(&STATIC
);
5273 ANNOTATE_EXPECT_RACE(&STATIC
, "real race on a static global object");
5274 FAST_MODE_INIT(MALLOC
);
5275 ANNOTATE_EXPECT_RACE(MALLOC
, "real race on a malloc-ed object");
5276 FAST_MODE_INIT(CALLOC
);
5277 ANNOTATE_EXPECT_RACE(CALLOC
, "real race on a calloc-ed object");
5278 FAST_MODE_INIT(REALLOC
);
5279 ANNOTATE_EXPECT_RACE(REALLOC
, "real race on a realloc-ed object");
5280 FAST_MODE_INIT(VALLOC
);
5281 ANNOTATE_EXPECT_RACE(VALLOC
, "real race on a valloc-ed object");
5282 FAST_MODE_INIT(PVALLOC
);
5283 ANNOTATE_EXPECT_RACE(PVALLOC
, "real race on a pvalloc-ed object");
5284 FAST_MODE_INIT(MEMALIGN
);
5285 ANNOTATE_EXPECT_RACE(MEMALIGN
, "real race on a memalign-ed object");
5286 FAST_MODE_INIT(POSIX_MEMALIGN
.pi
);
5287 ANNOTATE_EXPECT_RACE(POSIX_MEMALIGN
.pi
, "real race on a posix_memalign-ed object");
5288 FAST_MODE_INIT(MMAP
);
5289 ANNOTATE_EXPECT_RACE(MMAP
, "real race on a mmap-ed object");
5291 FAST_MODE_INIT(NEW
);
5292 ANNOTATE_EXPECT_RACE(NEW
, "real race on a new-ed object");
5293 FAST_MODE_INIT(NEW_ARR
);
5294 ANNOTATE_EXPECT_RACE(NEW_ARR
, "real race on a new[]-ed object");
5296 MyThreadArray
t(Worker
, Worker
, Worker
);
5299 printf("test110: positive (race on a stack object)\n");
5300 printf("\tSTACK=%d\n", *STACK
);
5310 free(POSIX_MEMALIGN
.pv
);
5311 munmap(MMAP
, sizeof(int));
5315 REGISTER_TEST(Run
, 110)
5316 } // namespace test110
5319 // test111: TN. Unit test for a bug related to stack handling. {{{1
5326 void write_to_p(char *p
, int val
) {
5327 for (int i
= 0; i
< N
; i
++)
5331 static bool ArgIsTrue(bool *arg
) {
5332 // printf("ArgIsTrue: %d tid=%d\n", *arg, (int)pthread_self());
5333 return *arg
== true;
5338 write_to_p(some_stack
, 1);
5339 mu
.LockWhen(Condition
<bool>(&ArgIsTrue
, &COND
));
5345 char some_more_stack
[N
];
5346 write_to_p(some_stack
, 2);
5347 write_to_p(some_more_stack
, 2);
5366 printf("test111: regression test\n");
5367 MyThreadArray
t(Worker1
, Worker1
, Worker2
);
5368 // AnnotateSetVerbosity(__FILE__, __LINE__, 3);
5371 // AnnotateSetVerbosity(__FILE__, __LINE__, 1);
5373 REGISTER_TEST2(Run
, 111, FEATURE
)
5374 } // namespace test111
5376 // test112: STAB. Test for ANNOTATE_PUBLISH_MEMORY_RANGE{{{1
5379 const int N
= 64 * 5;
5381 bool ready
= false; // under mu
5382 int beg
, end
; // under mu
5388 bool is_ready
= false;
5400 for (int i
= b
; i
< e
; i
++) {
5406 void PublishRange(int b
, int e
) {
5407 MyThreadArray
t(Worker
, Worker
);
5408 ready
= false; // runs before other threads
5411 ANNOTATE_NEW_MEMORY(GLOB
+ b
, e
- b
);
5412 ANNOTATE_TRACE_MEMORY(GLOB
+ b
);
5413 for (int j
= b
; j
< e
; j
++) {
5416 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB
+ b
, e
- b
);
5429 printf("test112: stability (ANNOTATE_PUBLISH_MEMORY_RANGE)\n");
5430 GLOB
= new char [N
];
5432 PublishRange(0, 10);
5435 PublishRange(12, 13);
5436 PublishRange(10, 14);
5438 PublishRange(15, 17);
5439 PublishRange(16, 18);
5441 // do few more random publishes.
5442 for (int i
= 0; i
< 20; i
++) {
5443 const int begin
= rand() % N
;
5444 const int size
= (rand() % (N
- begin
)) + 1;
5446 CHECK(begin
+ size
<= N
);
5447 PublishRange(begin
, begin
+ size
);
5450 printf("GLOB = %d\n", (int)GLOB
[0]);
5452 REGISTER_TEST2(Run
, 112, STABILITY
)
5453 } // namespace test112
5456 // test113: PERF. A lot of lock/unlock calls. Many locks {{{1
5458 const int kNumIter
= 100000;
5459 const int kNumLocks
= 7;
5460 Mutex MU
[kNumLocks
];
5462 printf("test113: perf\n");
5463 for (int i
= 0; i
< kNumIter
; i
++ ) {
5464 for (int j
= 0; j
< kNumLocks
; j
++) {
5465 if (i
& (1 << j
)) MU
[j
].Lock();
5467 for (int j
= kNumLocks
- 1; j
>= 0; j
--) {
5468 if (i
& (1 << j
)) MU
[j
].Unlock();
5472 REGISTER_TEST(Run
, 113)
5473 } // namespace test113
5476 // test114: STAB. Recursive lock. {{{1
5483 static int foo
= Bar();
5487 static int x
= Foo();
5491 printf("test114: stab\n");
5492 MyThreadArray
t(Worker
, Worker
);
5496 REGISTER_TEST(Run
, 114)
5497 } // namespace test114
5500 // test115: TN. sem_open. {{{1
5504 const char *kSemName
= "drt-test-sem";
5508 sem_t
*DoSemOpen() {
5509 // TODO: there is some race report inside sem_open
5510 // for which suppressions do not work... (???)
5511 ANNOTATE_IGNORE_WRITES_BEGIN();
5512 sem_t
*sem
= sem_open(kSemName
, O_CREAT
, 0600, 3);
5513 ANNOTATE_IGNORE_WRITES_END();
5526 // if the detector observes a happens-before arc between
5527 // sem_open and sem_wait, it will be silent.
5528 sem_t
*sem
= DoSemOpen();
5530 CHECK(sem
!= SEM_FAILED
);
5531 CHECK(sem_wait(sem
) == 0);
5539 printf("test115: stab (sem_open())\n");
5541 // just check that sem_open is not completely broken
5542 sem_unlink(kSemName
);
5543 sem_t
* sem
= DoSemOpen();
5544 CHECK(sem
!= SEM_FAILED
);
5545 CHECK(sem_wait(sem
) == 0);
5546 sem_unlink(kSemName
);
5548 // check that sem_open and sem_wait create a happens-before arc.
5549 MyThreadArray
t(Worker
, Worker
, Worker
);
5553 sem_unlink(kSemName
);
5555 REGISTER_TEST(Run
, 115)
5556 } // namespace test115
5559 // test116: TN. some operations with string<> objects. {{{1
5563 string A
[10], B
[10], C
[10];
5564 for (int i
= 0; i
< 1000; i
++) {
5565 for (int j
= 0; j
< 10; j
++) {
5569 a
= "sdl;fkjhasdflksj df";
5570 b
= "sdf sdf;ljsd ";
5578 for (int j
= 0; j
< 10; j
++) {
5590 printf("test116: negative (strings)\n");
5591 MyThreadArray
t(Worker
, Worker
, Worker
);
5595 REGISTER_TEST2(Run
, 116, FEATURE
|EXCLUDE_FROM_ALL
)
5596 } // namespace test116
5598 // test117: TN. Many calls to function-scope static init. {{{1
5607 void Worker(void *a
) {
5608 static int foo
= Foo();
5613 printf("test117: negative\n");
5615 for (int i
= 0; i
< N
; i
++) {
5616 t
[i
] = new MyThread(Worker
);
5618 for (int i
= 0; i
< N
; i
++) {
5621 for (int i
= 0; i
< N
; i
++) {
5624 for (int i
= 0; i
< N
; i
++) delete t
[i
];
5626 REGISTER_TEST(Run
, 117)
5627 } // namespace test117
5631 // test118 PERF: One signal, multiple waits. {{{1
5634 const int kNumIter
= 2000000;
5637 ANNOTATE_CONDVAR_SIGNAL(&GLOB
);
5640 for (int i
= 0; i
< kNumIter
; i
++) {
5641 ANNOTATE_CONDVAR_WAIT(&GLOB
);
5642 if (i
== kNumIter
/ 2)
5647 printf("test118: perf\n");
5648 MyThreadArray
t(Signaller
, Waiter
, Signaller
, Waiter
);
5651 printf("\tGLOB=%d\n", GLOB
);
5653 REGISTER_TEST(Run
, 118)
5654 } // namespace test118
5657 // test119: TP. Testing that malloc does not introduce any HB arc. {{{1
5670 printf("test119: positive (checking if malloc creates HB arcs)\n");
5671 FAST_MODE_INIT(&GLOB
);
5672 if (!(Tsan_PureHappensBefore() && kMallocUsesMutex
))
5673 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "true race");
5674 MyThreadArray
t(Worker1
, Worker2
);
5677 printf("\tGLOB=%d\n", GLOB
);
5679 REGISTER_TEST(Run
, 119)
5680 } // namespace test119
5683 // test120: TP. Thread1: write then read. Thread2: read. {{{1
5689 CHECK(GLOB
); // read
5694 CHECK(GLOB
>= 0); // read
5698 FAST_MODE_INIT(&GLOB
);
5699 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "TP (T1: write then read, T2: read)");
5700 printf("test120: positive\n");
5701 MyThreadArray
t(Thread1
, Thread2
);
5705 printf("\tGLOB=%d\n", GLOB
);
5707 REGISTER_TEST(Run
, 120)
5708 } // namespace test120
5711 // test121: TP. Example of double-checked-locking {{{1
5715 } __attribute__ ((aligned (64)));
5722 MutexLock
lock(&mu
);
5724 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo
, "test121. Double-checked locking (ptr)");
5726 if (!Tsan_FastMode())
5727 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo
->a
, "test121. Double-checked locking (obj)");
5735 CHECK(foo
&& foo
->a
== 42);
5738 void Worker1() { UseMe(); }
5739 void Worker2() { UseMe(); }
5740 void Worker3() { UseMe(); }
5744 FAST_MODE_INIT(&foo
);
5745 printf("test121: TP. Example of double-checked-locking\n");
5746 MyThreadArray
t1(Worker1
, Worker2
, Worker3
);
5751 REGISTER_TEST(Run
, 121)
5752 } // namespace test121
5754 // test122 TP: Simple test with RWLock {{{1
5760 void WriteWhileHoldingReaderLock(int *p
) {
5762 ReaderLockScoped
lock(&mu
); // Reader lock for writing. -- bug.
5766 void CorrectWrite(int *p
) {
5767 WriterLockScoped
lock(&mu
);
5771 void Thread1() { WriteWhileHoldingReaderLock(&VAR1
); }
5772 void Thread2() { CorrectWrite(&VAR1
); }
5773 void Thread3() { CorrectWrite(&VAR2
); }
5774 void Thread4() { WriteWhileHoldingReaderLock(&VAR2
); }
5778 printf("test122: positive (rw-lock)\n");
5781 ANNOTATE_TRACE_MEMORY(&VAR1
);
5782 ANNOTATE_TRACE_MEMORY(&VAR2
);
5783 if (!Tsan_PureHappensBefore()) {
5784 ANNOTATE_EXPECT_RACE_FOR_TSAN(&VAR1
, "test122. TP. ReaderLock-ed while writing");
5785 ANNOTATE_EXPECT_RACE_FOR_TSAN(&VAR2
, "test122. TP. ReaderLock-ed while writing");
5787 MyThreadArray
t(Thread1
, Thread2
, Thread3
, Thread4
);
5791 REGISTER_TEST(Run
, 122)
5792 } // namespace test122
5795 // test123 TP: accesses of different sizes. {{{1
5807 // Q. Hey dude, why so many functions?
5808 // A. I need different stack traces for different accesses.
5810 void Wr64_0() { MEM
[0].u64
[0] = 1; }
5811 void Wr64_1() { MEM
[1].u64
[0] = 1; }
5812 void Wr64_2() { MEM
[2].u64
[0] = 1; }
5813 void Wr64_3() { MEM
[3].u64
[0] = 1; }
5814 void Wr64_4() { MEM
[4].u64
[0] = 1; }
5815 void Wr64_5() { MEM
[5].u64
[0] = 1; }
5816 void Wr64_6() { MEM
[6].u64
[0] = 1; }
5817 void Wr64_7() { MEM
[7].u64
[0] = 1; }
5819 void Wr32_0() { MEM
[0].u32
[0] = 1; }
5820 void Wr32_1() { MEM
[1].u32
[1] = 1; }
5821 void Wr32_2() { MEM
[2].u32
[0] = 1; }
5822 void Wr32_3() { MEM
[3].u32
[1] = 1; }
5823 void Wr32_4() { MEM
[4].u32
[0] = 1; }
5824 void Wr32_5() { MEM
[5].u32
[1] = 1; }
5825 void Wr32_6() { MEM
[6].u32
[0] = 1; }
5826 void Wr32_7() { MEM
[7].u32
[1] = 1; }
5828 void Wr16_0() { MEM
[0].u16
[0] = 1; }
5829 void Wr16_1() { MEM
[1].u16
[1] = 1; }
5830 void Wr16_2() { MEM
[2].u16
[2] = 1; }
5831 void Wr16_3() { MEM
[3].u16
[3] = 1; }
5832 void Wr16_4() { MEM
[4].u16
[0] = 1; }
5833 void Wr16_5() { MEM
[5].u16
[1] = 1; }
5834 void Wr16_6() { MEM
[6].u16
[2] = 1; }
5835 void Wr16_7() { MEM
[7].u16
[3] = 1; }
5837 void Wr8_0() { MEM
[0].u8
[0] = 1; }
5838 void Wr8_1() { MEM
[1].u8
[1] = 1; }
5839 void Wr8_2() { MEM
[2].u8
[2] = 1; }
5840 void Wr8_3() { MEM
[3].u8
[3] = 1; }
5841 void Wr8_4() { MEM
[4].u8
[4] = 1; }
5842 void Wr8_5() { MEM
[5].u8
[5] = 1; }
5843 void Wr8_6() { MEM
[6].u8
[6] = 1; }
5844 void Wr8_7() { MEM
[7].u8
[7] = 1; }
5890 void W00() { WriteAll64(); }
5891 void W01() { WriteAll64(); }
5892 void W02() { WriteAll64(); }
5894 void W10() { WriteAll32(); }
5895 void W11() { WriteAll32(); }
5896 void W12() { WriteAll32(); }
5898 void W20() { WriteAll16(); }
5899 void W21() { WriteAll16(); }
5900 void W22() { WriteAll16(); }
5902 void W30() { WriteAll8(); }
5903 void W31() { WriteAll8(); }
5904 void W32() { WriteAll8(); }
5906 typedef void (*F
)(void);
5908 void TestTwoSizes(F f1
, F f2
) {
5909 // first f1, then f2
5910 ANNOTATE_NEW_MEMORY(&MEM
, sizeof(MEM
));
5911 memset(&MEM
, 0, sizeof(MEM
));
5912 MyThreadArray
t1(f1
, f2
);
5916 ANNOTATE_NEW_MEMORY(&MEM
, sizeof(MEM
));
5917 memset(&MEM
, 0, sizeof(MEM
));
5918 MyThreadArray
t2(f2
, f1
);
5924 printf("test123: positive (different sizes)\n");
5925 TestTwoSizes(W00
, W10
);
5926 // TestTwoSizes(W01, W20);
5927 // TestTwoSizes(W02, W30);
5928 // TestTwoSizes(W11, W21);
5929 // TestTwoSizes(W12, W31);
5930 // TestTwoSizes(W22, W32);
5933 REGISTER_TEST2(Run
, 123, FEATURE
|EXCLUDE_FROM_ALL
)
5934 } // namespace test123
5937 // test124: What happens if we delete an unlocked lock? {{{1
5939 // This test does not worg with pthreads (you can't call
5940 // pthread_mutex_destroy on a locked lock).
5944 Mutex
*a_large_local_array_of_mutexes
;
5945 a_large_local_array_of_mutexes
= new Mutex
[N
];
5946 for (int i
= 0; i
< N
; i
++) {
5947 a_large_local_array_of_mutexes
[i
].Lock();
5949 delete []a_large_local_array_of_mutexes
;
5954 printf("test124: negative\n");
5955 MyThreadArray
t(Worker
, Worker
, Worker
);
5958 printf("\tGLOB=%d\n", GLOB
);
5960 REGISTER_TEST2(Run
, 124, FEATURE
|EXCLUDE_FROM_ALL
)
5961 } // namespace test124
5964 // test125 TN: Backwards lock (annotated). {{{1
5966 // This test uses "Backwards mutex" locking protocol.
5967 // We take a *reader* lock when writing to a per-thread data
5968 // (GLOB[thread_num]) and we take a *writer* lock when we
5969 // are reading from the entire array at once.
5971 // Such locking protocol is not understood by ThreadSanitizer's
5972 // hybrid state machine. So, you either have to use a pure-happens-before
5973 // detector ("tsan --pure-happens-before") or apply pure happens-before mode
5974 // to this particular lock by using ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu).
5976 const int n_threads
= 3;
5978 int GLOB
[n_threads
];
5980 int adder_num
; // updated atomically.
5983 int my_num
= AtomicIncrement(&adder_num
, 1);
5985 ReaderLockScoped
lock(&mu
);
5992 WriterLockScoped
lock(&mu
);
5993 for (int i
= 0; i
< n_threads
; i
++) {
5997 printf("sum=%d\n", sum
);
6001 printf("test125: negative\n");
6003 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu
);
6005 // run Adders, then Aggregator
6007 MyThreadArray
t(Adder
, Adder
, Adder
, Aggregator
);
6012 // Run Aggregator first.
6015 MyThreadArray
t(Aggregator
, Adder
, Adder
, Adder
);
6021 REGISTER_TEST(Run
, 125)
6022 } // namespace test125
6024 // test126 TN: test for BlockingCounter {{{1
6026 BlockingCounter
*blocking_counter
;
6029 CHECK(blocking_counter
);
6031 blocking_counter
->DecrementCount();
6034 printf("test126: negative\n");
6035 MyThreadArray
t(Worker
, Worker
, Worker
);
6036 blocking_counter
= new BlockingCounter(3);
6038 blocking_counter
->Wait();
6041 printf("\tGLOB=%d\n", GLOB
);
6043 REGISTER_TEST(Run
, 126)
6044 } // namespace test126
6047 // test127. Bad code: unlocking a mutex locked by another thread. {{{1
6058 printf("test127: unlocking a mutex locked by another thread.\n");
6059 MyThreadArray
t(Thread1
, Thread2
);
6063 REGISTER_TEST(Run
, 127)
6064 } // namespace test127
6066 // test128. Suppressed code in concurrent accesses {{{1
6067 // Please use --suppressions=unittest.supp flag when running this test.
6077 void ThisFunctionShouldBeSuppressed() {
6081 printf("test128: Suppressed code in concurrent accesses.\n");
6082 MyThreadArray
t(Worker
, ThisFunctionShouldBeSuppressed
);
6086 REGISTER_TEST2(Run
, 128, FEATURE
| EXCLUDE_FROM_ALL
)
6087 } // namespace test128
6089 // test129: TN. Synchronization via ReaderLockWhen(). {{{1
6093 bool WeirdCondition(int* param
) {
6094 *param
= GLOB
; // a write into Waiter's memory
6099 MU
.ReaderLockWhen(Condition
<int>(WeirdCondition
, ¶m
));
6105 usleep(100000); // Make sure the waiter blocks.
6108 MU
.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
6111 printf("test129: Synchronization via ReaderLockWhen()\n");
6112 MyThread
mt(Waiter
, NULL
, "Waiter Thread");
6116 printf("\tGLOB=%d\n", GLOB
);
6118 REGISTER_TEST2(Run
, 129, FEATURE
);
6119 } // namespace test129
6121 // test130: TN. Per-thread. {{{1
6124 // This test verifies that the race detector handles
6125 // thread-local storage (TLS) correctly.
6126 // As of 09-03-30 ThreadSanitizer has a bug:
6128 // - Thread1 touches per_thread_global
6130 // - Thread2 starts (and there is no happens-before relation between it and
6132 // - Thread2 touches per_thread_global
6133 // It may happen so that Thread2 will have per_thread_global in the same address
6134 // as Thread1. Since there is no happens-before relation between threads,
6135 // ThreadSanitizer reports a race.
6137 // test131 does the same for stack.
6139 static __thread
int per_thread_global
[10] = {0};
6141 void RealWorker() { // Touch per_thread_global.
6142 per_thread_global
[1]++;
6146 void Worker() { // Spawn few threads that touch per_thread_global.
6147 MyThreadArray
t(RealWorker
, RealWorker
);
6151 void Worker0() { sleep(0); Worker(); }
6152 void Worker1() { sleep(1); Worker(); }
6153 void Worker2() { sleep(2); Worker(); }
6154 void Worker3() { sleep(3); Worker(); }
6157 printf("test130: Per-thread\n");
6158 MyThreadArray
t1(Worker0
, Worker1
, Worker2
, Worker3
);
6161 printf("\tper_thread_global=%d\n", per_thread_global
[1]);
6163 REGISTER_TEST(Run
, 130)
6165 } // namespace test130
6168 // test131: TN. Stack. {{{1
6170 // Same as test130, but for stack.
6172 void RealWorker() { // Touch stack.
6177 void Worker() { // Spawn few threads that touch stack.
6178 MyThreadArray
t(RealWorker
, RealWorker
);
6182 void Worker0() { sleep(0); Worker(); }
6183 void Worker1() { sleep(1); Worker(); }
6184 void Worker2() { sleep(2); Worker(); }
6185 void Worker3() { sleep(3); Worker(); }
6188 printf("test131: stack\n");
6189 MyThreadArray
t(Worker0
, Worker1
, Worker2
, Worker3
);
6193 REGISTER_TEST(Run
, 131)
6194 } // namespace test131
6197 // test132: TP. Simple race (write vs write). Works in fast-mode. {{{1
6200 void Worker() { GLOB
= 1; }
6203 FAST_MODE_INIT(&GLOB
);
6204 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test132");
6205 printf("test132: positive; &GLOB=%p\n", &GLOB
);
6206 ANNOTATE_TRACE_MEMORY(&GLOB
);
6208 MyThreadArray
t(Worker
, Worker
);
6216 REGISTER_TEST(Run
, 132);
6217 } // namespace test132
6220 // test133: TP. Simple race (write vs write). Works in fast mode. {{{1
6222 // Same as test132, but everything is run from a separate thread spawned from
6225 void Worker() { GLOB
= 1; }
6228 FAST_MODE_INIT(&GLOB
);
6229 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "test133");
6230 printf("test133: positive; &GLOB=%p\n", &GLOB
);
6231 ANNOTATE_TRACE_MEMORY(&GLOB
);
6233 MyThreadArray
t(Worker
, Worker
);
6242 REGISTER_TEST(Run
, 133);
6243 } // namespace test133
6246 // test134 TN. Swap. Variant of test79. {{{1
6249 typedef __gnu_cxx::hash_map
<int, int> map_t
;
6251 typedef std::map
<int, int> map_t
;
6255 // Here we use swap to pass map between threads.
6256 // The synchronization is correct, but w/o the annotation
6257 // any hybrid detector will complain.
6259 // Swap is very unfriendly to the lock-set (and hybrid) race detectors.
6260 // Since tmp is destructed outside the mutex, we need to have a happens-before
6261 // arc between any prior access to map and here.
6262 // Since the internals of tmp are created ouside the mutex and are passed to
6263 // other thread, we need to have a h-b arc between here and any future access.
6264 // These arcs can be created by HAPPENS_{BEFORE,AFTER} annotations, but it is
6265 // much simpler to apply pure-happens-before mode to the mutex mu.
6268 MutexLock
lock(&mu
);
6269 ANNOTATE_HAPPENS_AFTER(&map
);
6270 // We swap the new empty map 'tmp' with 'map'.
6272 ANNOTATE_HAPPENS_BEFORE(&map
);
6273 // tmp (which is the old version of map) is destroyed here.
6277 MutexLock
lock(&mu
);
6278 ANNOTATE_HAPPENS_AFTER(&map
);
6280 ANNOTATE_HAPPENS_BEFORE(&map
);
6284 printf("test134: negative (swap)\n");
6285 // ********************** Shorter way: ***********************
6286 // ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu);
6287 MyThreadArray
t(Worker
, Worker
, Swapper
, Worker
, Worker
);
6291 REGISTER_TEST(Run
, 134)
6292 } // namespace test134
6294 // test135 TN. Swap. Variant of test79. {{{1
6298 const long SIZE
= 65536;
6299 for (int i
= 0; i
< 32; i
++) {
6300 int *ptr
= (int*)mmap(NULL
, SIZE
, PROT_READ
| PROT_WRITE
,
6301 MAP_PRIVATE
| MAP_ANON
, -1, 0);
6308 MyThreadArray
t(SubWorker
, SubWorker
, SubWorker
, SubWorker
);
6314 printf("test135: negative (mmap)\n");
6315 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
6319 REGISTER_TEST(Run
, 135)
6320 } // namespace test135
6322 // test136. Unlock twice. {{{1
6325 printf("test136: unlock twice\n");
6326 pthread_mutexattr_t attr
;
6327 CHECK(0 == pthread_mutexattr_init(&attr
));
6328 CHECK(0 == pthread_mutexattr_settype(&attr
, PTHREAD_MUTEX_ERRORCHECK
));
6331 CHECK(0 == pthread_mutex_init(&mu
, &attr
));
6332 CHECK(0 == pthread_mutex_lock(&mu
));
6333 CHECK(0 == pthread_mutex_unlock(&mu
));
6334 int ret_unlock
= pthread_mutex_unlock(&mu
); // unlocking twice.
6335 int ret_destroy
= pthread_mutex_destroy(&mu
);
6336 printf(" pthread_mutex_unlock returned %d\n", ret_unlock
);
6337 printf(" pthread_mutex_destroy returned %d\n", ret_destroy
);
6341 REGISTER_TEST(Run
, 136)
6342 } // namespace test136
6344 // test137 TP. Races on stack variables. {{{1
6347 ProducerConsumerQueue
q(10);
6351 int *tmp
= (int*)q
.Get();
6353 int *racey
= &stack
;
6357 // We may miss the races if we sleep less due to die_memory events...
6362 printf("test137: TP. Races on stack variables.\n");
6364 MyThreadArray
t(Worker
, Worker
, Worker
, Worker
);
6370 REGISTER_TEST2(Run
, 137, FEATURE
| EXCLUDE_FROM_ALL
)
6371 } // namespace test137
6373 // test138 FN. Two closures hit the same thread in ThreadPool. {{{1
6383 FAST_MODE_INIT(&GLOB
);
6384 printf("test138: FN. Two closures hit the same thread in ThreadPool.\n");
6386 // When using thread pools, two concurrent callbacks might be scheduled
6387 // onto the same executor thread. As a result, unnecessary happens-before
6388 // relation may be introduced between callbacks.
6389 // If we set the number of executor threads to 1, any known data
6390 // race detector will be silent. However, the same situation may happen
6391 // with any number of executor threads (with some probability).
6394 tp
.Add(NewCallback(Worker
));
6395 tp
.Add(NewCallback(Worker
));
6398 REGISTER_TEST2(Run
, 138, FEATURE
)
6399 } // namespace test138
6401 // test139: FN. A true race hidden by reference counting annotation. {{{1
6404 RefCountedClass
*obj
;
6407 GLOB
++; // First access.
6414 GLOB
++; // Second access.
6418 FAST_MODE_INIT(&GLOB
);
6419 printf("test139: FN. A true race hidden by reference counting annotation.\n");
6421 obj
= new RefCountedClass
;
6422 obj
->AnnotateUnref();
6425 MyThreadArray
mt(Worker1
, Worker2
);
6430 REGISTER_TEST2(Run
, 139, FEATURE
)
6431 } // namespace test139
6433 // test140 TN. Swap. Variant of test79 and test134. {{{1
6436 typedef __gnu_cxx::hash_map
<int, int> Container
;
6438 typedef std::map
<int,int> Container
;
6441 static Container container
;
6443 // Here we use swap to pass a Container between threads.
6444 // The synchronization is correct, but w/o the annotation
6445 // any hybrid detector will complain.
6447 // Unlike the test134, we try to have a minimal set of annotations
6448 // so that extra h-b arcs do not hide other races.
6450 // Swap is very unfriendly to the lock-set (and hybrid) race detectors.
6451 // Since tmp is destructed outside the mutex, we need to have a happens-before
6452 // arc between any prior access to map and here.
6453 // Since the internals of tmp are created ouside the mutex and are passed to
6454 // other thread, we need to have a h-b arc between here and any future access.
6456 // We want to be able to annotate swapper so that we don't need to annotate
6460 tmp
[1] = tmp
[2] = tmp
[3] = 0;
6462 MutexLock
lock(&mu
);
6463 container
.swap(tmp
);
6464 // we are unpublishing the old container.
6465 ANNOTATE_UNPUBLISH_MEMORY_RANGE(&container
, sizeof(container
));
6466 // we are publishing the new container.
6467 ANNOTATE_PUBLISH_MEMORY_RANGE(&container
, sizeof(container
));
6471 // tmp (which is the old version of container) is destroyed here.
6475 MutexLock
lock(&mu
);
6477 int *v
= &container
[2];
6478 for (int i
= 0; i
< 10; i
++) {
6479 // if uncommented, this will break ANNOTATE_UNPUBLISH_MEMORY_RANGE():
6480 // ANNOTATE_HAPPENS_BEFORE(v);
6488 printf("test140: negative (swap) %p\n", &container
);
6489 MyThreadArray
t(Worker
, Worker
, Swapper
, Worker
, Worker
);
6493 REGISTER_TEST(Run
, 140)
6494 } // namespace test140
6496 // test141 FP. unlink/fopen, rmdir/opendir. {{{1
6500 char *dir_name
= NULL
,
6506 // unlink deletes a file 'filename'
6507 // which exits spin-loop in Waiter1().
6508 printf(" Deleting file...\n");
6509 CHECK(unlink(filename
) == 0);
6514 while ((tmp
= fopen(filename
, "r")) != NULL
) {
6518 printf(" ...file has been deleted\n");
6525 // rmdir deletes a directory 'dir_name'
6526 // which exit spin-loop in Waker().
6527 printf(" Deleting directory...\n");
6528 CHECK(rmdir(dir_name
) == 0);
6533 while ((tmp
= opendir(dir_name
)) != NULL
) {
6537 printf(" ...directory has been deleted\n");
6542 FAST_MODE_INIT(&GLOB1
);
6543 FAST_MODE_INIT(&GLOB2
);
6544 printf("test141: FP. unlink/fopen, rmdir/opendir.\n");
6546 dir_name
= strdup("/tmp/tsan-XXXXXX");
6547 IGNORE_RETURN_VALUE(mkdtemp(dir_name
));
6549 filename
= strdup((std::string() + dir_name
+ "/XXXXXX").c_str());
6550 const int fd
= mkstemp(filename
);
6554 MyThreadArray
mta1(Waker1
, Waiter1
);
6558 MyThreadArray
mta2(Waker2
, Waiter2
);
6566 REGISTER_TEST(Run
, 141)
6567 } // namespace test141
6570 // Simple FIFO queue annotated with PCQ annotations. {{{1
6571 class FifoMessageQueue
{
6573 FifoMessageQueue() { ANNOTATE_PCQ_CREATE(this); }
6574 ~FifoMessageQueue() { ANNOTATE_PCQ_DESTROY(this); }
6575 // Send a message. 'message' should be positive.
6576 void Put(int message
) {
6578 MutexLock
lock(&mu_
);
6579 ANNOTATE_PCQ_PUT(this);
6582 // Return the message from the queue and pop it
6583 // or return 0 if there are no messages.
6585 MutexLock
lock(&mu_
);
6586 if (q_
.empty()) return 0;
6587 int res
= q_
.front();
6589 ANNOTATE_PCQ_GET(this);
6598 // test142: TN. Check PCQ_* annotations. {{{1
6600 // Putter writes to array[i] and sends a message 'i'.
6601 // Getters receive messages and read array[message].
6602 // PCQ_* annotations calm down the hybrid detectors.
6610 for (int i
= 1; i
<= N
; i
++) {
6618 int non_zero_received
= 0;
6619 for (int i
= 1; i
<= N
; i
++) {
6622 CHECK(array
[res
] == res
* res
);
6623 non_zero_received
++;
6627 printf("T=%zd: non_zero_received=%d\n",
6628 (size_t)pthread_self(), non_zero_received
);
6632 printf("test142: tests PCQ annotations\n");
6633 MyThreadArray
t(Putter
, Getter
, Getter
);
6637 REGISTER_TEST(Run
, 142)
6638 } // namespace test142
6641 // test143: TP. Check PCQ_* annotations. {{{1
6644 // We have a race on GLOB between Putter and one of the Getters.
6645 // Pure h-b will not see it.
6646 // If FifoMessageQueue was annotated using HAPPENS_BEFORE/AFTER, the race would
6648 // PCQ_* annotations do not hide this race.
6661 CHECK(GLOB
== 1); // Race here
6666 if (!Tsan_PureHappensBefore()) {
6667 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB
, "true races");
6669 printf("test143: tests PCQ annotations (true positive)\n");
6670 MyThreadArray
t(Putter
, Getter
, Getter
);
6674 REGISTER_TEST(Run
, 143);
6675 } // namespace test143
6685 REGISTER_TEST2(Run
, 300, RACE_DEMO
)
6686 } // namespace test300
6688 // test301: Simple race. {{{1
6690 Mutex mu1
; // This Mutex guards var.
6691 Mutex mu2
; // This Mutex is not related to var.
6692 int var
; // GUARDED_BY(mu1)
6694 void Thread1() { // Runs in thread named 'test-thread-1'.
6695 MutexLock
lock(&mu1
); // Correct Mutex.
6699 void Thread2() { // Runs in thread named 'test-thread-2'.
6700 MutexLock
lock(&mu2
); // Wrong Mutex.
6706 printf("test301: simple race.\n");
6707 MyThread
t1(Thread1
, NULL
, "test-thread-1");
6708 MyThread
t2(Thread2
, NULL
, "test-thread-2");
6714 REGISTER_TEST2(Run
, 301, RACE_DEMO
)
6715 } // namespace test301
6717 // test302: Complex race which happens at least twice. {{{1
6719 // In this test we have many different accesses to GLOB and only one access
6720 // is not synchronized properly.
6726 for(int i
= 0; i
< 100; i
++) {
6729 // This read is protected correctly.
6730 MU1
.Lock(); CHECK(GLOB
>= 0); MU1
.Unlock();
6733 // Here we used the wrong lock! The reason of the race is here.
6734 MU2
.Lock(); CHECK(GLOB
>= 0); MU2
.Unlock();
6737 // This read is protected correctly.
6738 MU1
.Lock(); CHECK(GLOB
>= 0); MU1
.Unlock();
6741 // This write is protected correctly.
6742 MU1
.Lock(); GLOB
++; MU1
.Unlock();
6745 // sleep a bit so that the threads interleave
6746 // and the race happens at least twice.
6752 printf("test302: Complex race that happens twice.\n");
6753 MyThread
t1(Worker
), t2(Worker
);
6756 t1
.Join(); t2
.Join();
6758 REGISTER_TEST2(Run
, 302, RACE_DEMO
)
6759 } // namespace test302
6762 // test303: Need to trace the memory to understand the report. {{{1
6767 void Worker1() { CHECK(GLOB
>= 0); }
6768 void Worker2() { MU
.Lock(); GLOB
=1; MU
.Unlock();}
6771 printf("test303: a race that needs annotations.\n");
6772 ANNOTATE_TRACE_MEMORY(&GLOB
);
6773 MyThreadArray
t(Worker1
, Worker2
);
6777 REGISTER_TEST2(Run
, 303, RACE_DEMO
)
6778 } // namespace test303
6782 // test304: Can not trace the memory, since it is a library object. {{{1
6789 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6790 MU
.Lock(); CHECK(STR
->length() >= 4); MU
.Unlock();
6794 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6795 CHECK(STR
->length() >= 4); // Unprotected!
6799 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6800 MU
.Lock(); CHECK(STR
->length() >= 4); MU
.Unlock();
6804 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6805 MU
.Lock(); *STR
+= " + a very very long string"; MU
.Unlock();
6809 STR
= new string ("The String");
6810 printf("test304: a race where memory tracing does not work.\n");
6811 MyThreadArray
t(Worker1
, Worker2
, Worker3
, Worker4
);
6815 printf("%s\n", STR
->c_str());
6818 REGISTER_TEST2(Run
, 304, RACE_DEMO
)
6819 } // namespace test304
6823 // test305: A bit more tricky: two locks used inconsistenly. {{{1
6827 // In this test GLOB is protected by MU1 and MU2, but inconsistently.
6828 // The TRACES observed by helgrind are:
6829 // TRACE[1]: Access{T2/S2 wr} -> new State{Mod; #LS=2; #SS=1; T2/S2}
6830 // TRACE[2]: Access{T4/S9 wr} -> new State{Mod; #LS=1; #SS=2; T2/S2, T4/S9}
6831 // TRACE[3]: Access{T5/S13 wr} -> new State{Mod; #LS=1; #SS=3; T2/S2, T4/S9, T5/S13}
6832 // TRACE[4]: Access{T6/S19 wr} -> new State{Mod; #LS=0; #SS=4; T2/S2, T4/S9, T5/S13, T6/S19}
6834 // The guilty access is either Worker2() or Worker4(), depending on
6835 // which mutex is supposed to protect GLOB.
6838 void Worker1() { MU1
.Lock(); MU2
.Lock(); GLOB
=1; MU2
.Unlock(); MU1
.Unlock(); }
6839 void Worker2() { MU1
.Lock(); GLOB
=2; MU1
.Unlock(); }
6840 void Worker3() { MU1
.Lock(); MU2
.Lock(); GLOB
=3; MU2
.Unlock(); MU1
.Unlock(); }
6841 void Worker4() { MU2
.Lock(); GLOB
=4; MU2
.Unlock(); }
6844 ANNOTATE_TRACE_MEMORY(&GLOB
);
6845 printf("test305: simple race.\n");
6846 MyThread
t1(Worker1
), t2(Worker2
), t3(Worker3
), t4(Worker4
);
6847 t1
.Start(); usleep(100);
6848 t2
.Start(); usleep(100);
6849 t3
.Start(); usleep(100);
6850 t4
.Start(); usleep(100);
6851 t1
.Join(); t2
.Join(); t3
.Join(); t4
.Join();
6853 REGISTER_TEST2(Run
, 305, RACE_DEMO
)
6854 } // namespace test305
6856 // test306: Two locks are used to protect a var. {{{1
6859 // Thread1 and Thread2 access the var under two locks.
6860 // Thread3 uses no locks.
6864 void Worker1() { MU1
.Lock(); MU2
.Lock(); GLOB
=1; MU2
.Unlock(); MU1
.Unlock(); }
6865 void Worker2() { MU1
.Lock(); MU2
.Lock(); GLOB
=3; MU2
.Unlock(); MU1
.Unlock(); }
6866 void Worker3() { GLOB
=4; }
6869 ANNOTATE_TRACE_MEMORY(&GLOB
);
6870 printf("test306: simple race.\n");
6871 MyThread
t1(Worker1
), t2(Worker2
), t3(Worker3
);
6872 t1
.Start(); usleep(100);
6873 t2
.Start(); usleep(100);
6874 t3
.Start(); usleep(100);
6875 t1
.Join(); t2
.Join(); t3
.Join();
6877 REGISTER_TEST2(Run
, 306, RACE_DEMO
)
6878 } // namespace test306
6880 // test307: Simple race, code with control flow {{{1
6883 volatile /*to fake the compiler*/ bool some_condition
= true;
6888 int FunctionWithControlFlow() {
6889 int unrelated_stuff
= 0;
6891 SomeFunc(); // "--keep-history=1" will point somewhere here.
6892 if (some_condition
) { // Or here
6893 if (some_condition
) {
6894 unrelated_stuff
++; // Or here.
6896 (*GLOB
)++; // "--keep-history=2" will point here (experimental).
6900 return unrelated_stuff
;
6903 void Worker1() { FunctionWithControlFlow(); }
6904 void Worker2() { Worker1(); }
6905 void Worker3() { Worker2(); }
6906 void Worker4() { Worker3(); }
6911 printf("test307: simple race, code with control flow\n");
6912 MyThreadArray
t1(Worker1
, Worker2
, Worker3
, Worker4
);
6916 REGISTER_TEST2(Run
, 307, RACE_DEMO
)
6917 } // namespace test307
6919 // test308: Example of double-checked-locking {{{1
6925 static int is_inited
= 0;
6943 CHECK(foo
&& foo
->a
== 42);
6946 void Worker1() { UseMe(); }
6947 void Worker2() { UseMe(); }
6948 void Worker3() { UseMe(); }
6952 ANNOTATE_TRACE_MEMORY(&is_inited
);
6953 printf("test308: Example of double-checked-locking\n");
6954 MyThreadArray
t1(Worker1
, Worker2
, Worker3
);
6958 REGISTER_TEST2(Run
, 308, RACE_DEMO
)
6959 } // namespace test308
6961 // test309: Simple race on an STL object. {{{1
6970 GLOB
="Booooooooooo";
6974 printf("test309: simple race on an STL object.\n");
6975 MyThread
t1(Worker1
), t2(Worker2
);
6978 t1
.Join(); t2
.Join();
6980 REGISTER_TEST2(Run
, 309, RACE_DEMO
)
6981 } // namespace test309
6983 // test310: One more simple race. {{{1
6985 int *PTR
= NULL
; // GUARDED_BY(mu1)
6987 Mutex mu1
; // Protects PTR.
6988 Mutex mu2
; // Unrelated to PTR.
6989 Mutex mu3
; // Unrelated to PTR.
6992 MutexLock
lock3(&mu3
); // This lock is unrelated to PTR.
6993 MutexLock
lock1(&mu1
); // Protect PTR.
6998 MutexLock
lock2(&mu2
); // This lock is unrelated to PTR.
6999 MutexLock
lock1(&mu1
); // Protect PTR.
7000 int some_unrelated_stuff
= 0;
7001 if (some_unrelated_stuff
== 0)
7002 some_unrelated_stuff
++;
7008 MutexLock
lock2(&mu2
); // Oh, gosh, this is a wrong mutex!
7012 // Some functions to make the stack trace non-trivial.
7013 void DoWrite1() { Writer1(); }
7014 void Thread1() { DoWrite1(); }
7016 void DoWrite2() { Writer2(); }
7017 void Thread2() { DoWrite2(); }
7019 void DoRead() { Reader(); }
7020 void Thread3() { DoRead(); }
7023 printf("test310: simple race.\n");
7025 ANNOTATE_TRACE_MEMORY(PTR
);
7027 MyThread
t1(Thread1
, NULL
, "writer1"),
7028 t2(Thread2
, NULL
, "writer2"),
7029 t3(Thread3
, NULL
, "buggy reader");
7032 usleep(100000); // Let the writers go first.
7039 REGISTER_TEST2(Run
, 310, RACE_DEMO
)
7040 } // namespace test310
7042 // test311: Yet another simple race. {{{1
7044 int *PTR
= NULL
; // GUARDED_BY(mu1)
7046 Mutex mu1
; // Protects PTR.
7047 Mutex mu2
; // Unrelated to PTR.
7048 Mutex mu3
; // Unrelated to PTR.
7050 void GoodWriter1() {
7051 MutexLock
lock3(&mu3
); // This lock is unrelated to PTR.
7052 MutexLock
lock1(&mu1
); // Protect PTR.
7056 void GoodWriter2() {
7057 MutexLock
lock2(&mu2
); // This lock is unrelated to PTR.
7058 MutexLock
lock1(&mu1
); // Protect PTR.
7063 MutexLock
lock1(&mu1
); // Protect PTR.
7067 void BuggyWriter() {
7068 MutexLock
lock2(&mu2
); // Wrong mutex!
7072 // Some functions to make the stack trace non-trivial.
7073 void DoWrite1() { GoodWriter1(); }
7074 void Thread1() { DoWrite1(); }
7076 void DoWrite2() { GoodWriter2(); }
7077 void Thread2() { DoWrite2(); }
7079 void DoGoodRead() { GoodReader(); }
7080 void Thread3() { DoGoodRead(); }
7082 void DoBadWrite() { BuggyWriter(); }
7083 void Thread4() { DoBadWrite(); }
7086 printf("test311: simple race.\n");
7088 ANNOTATE_TRACE_MEMORY(PTR
);
7090 MyThread
t1(Thread1
, NULL
, "good writer1"),
7091 t2(Thread2
, NULL
, "good writer2"),
7092 t3(Thread3
, NULL
, "good reader"),
7093 t4(Thread4
, NULL
, "buggy writer");
7096 // t2 goes after t3. This way a pure happens-before detector has no chance.
7099 usleep(100000); // Let the good folks go first.
7107 REGISTER_TEST2(Run
, 311, RACE_DEMO
)
7108 } // namespace test311
7110 // test312: A test with a very deep stack. {{{1
7113 void RaceyWrite() { GLOB
++; }
7114 void Func1() { RaceyWrite(); }
7115 void Func2() { Func1(); }
7116 void Func3() { Func2(); }
7117 void Func4() { Func3(); }
7118 void Func5() { Func4(); }
7119 void Func6() { Func5(); }
7120 void Func7() { Func6(); }
7121 void Func8() { Func7(); }
7122 void Func9() { Func8(); }
7123 void Func10() { Func9(); }
7124 void Func11() { Func10(); }
7125 void Func12() { Func11(); }
7126 void Func13() { Func12(); }
7127 void Func14() { Func13(); }
7128 void Func15() { Func14(); }
7129 void Func16() { Func15(); }
7130 void Func17() { Func16(); }
7131 void Func18() { Func17(); }
7132 void Func19() { Func18(); }
7133 void Worker() { Func19(); }
7135 printf("test312: simple race with deep stack.\n");
7136 MyThreadArray
t(Worker
, Worker
, Worker
);
7140 REGISTER_TEST2(Run
, 312, RACE_DEMO
)
7141 } // namespace test312
7143 // test313 TP: test for thread graph output {{{1
7145 BlockingCounter
*blocking_counter
;
7148 // Worker(N) will do 2^N increments of GLOB, each increment in a separate thread
7149 void Worker(long depth
) {
7153 pool
.StartWorkers();
7154 pool
.Add(NewCallback(Worker
, depth
-1));
7155 pool
.Add(NewCallback(Worker
, depth
-1));
7157 GLOB
++; // Race here
7161 printf("test313: positive\n");
7163 printf("\tGLOB=%d\n", GLOB
);
7165 REGISTER_TEST2(Run
, 313, RACE_DEMO
)
7166 } // namespace test313
7170 // test400: Demo of a simple false positive. {{{1
7173 static vector
<int> *vec
; // GUARDED_BY(mu);
7175 void InitAllBeforeStartingThreads() {
7176 vec
= new vector
<int>;
7182 MutexLock
lock(&mu
);
7187 MutexLock
lock(&mu
);
7191 //---- Sub-optimal code ---------
7192 size_t NumberOfElementsLeft() {
7193 MutexLock
lock(&mu
);
7197 void WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly() {
7198 while(NumberOfElementsLeft()) {
7199 ; // sleep or print or do nothing.
7201 // It is now safe to access vec w/o lock.
7202 // But a hybrid detector (like ThreadSanitizer) can't see it.
7204 // 1. Use pure happens-before detector (e.g. "tsan --pure-happens-before")
7205 // 2. Call ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu)
7206 // in InitAllBeforeStartingThreads()
7207 // 3. (preferred) Use WaitForAllThreadsToFinish_Good() (see below).
7208 CHECK(vec
->empty());
7212 //----- Better code -----------
7214 bool NoElementsLeft(vector
<int> *v
) {
7218 void WaitForAllThreadsToFinish_Good() {
7219 mu
.LockWhen(Condition
<vector
<int> >(NoElementsLeft
, vec
));
7222 // It is now safe to access vec w/o lock.
7223 CHECK(vec
->empty());
7229 MyThreadArray
t(Thread1
, Thread2
);
7230 InitAllBeforeStartingThreads();
7232 WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly();
7233 // WaitForAllThreadsToFinish_Good();
7236 REGISTER_TEST2(Run
, 400, RACE_DEMO
)
7237 } // namespace test400
7239 // test401: Demo of false positive caused by reference counting. {{{1
7241 // A simplified example of reference counting.
7242 // DecRef() does ref count increment in a way unfriendly to race detectors.
7243 // DecRefAnnotated() does the same in a friendly way.
7245 static vector
<int> *vec
;
7246 static int ref_count
;
7248 void InitAllBeforeStartingThreads(int number_of_threads
) {
7249 vec
= new vector
<int>;
7251 ref_count
= number_of_threads
;
7254 // Correct, but unfriendly to race detectors.
7256 return AtomicIncrement(&ref_count
, -1);
7259 // Correct and friendly to race detectors.
7260 int DecRefAnnotated() {
7261 ANNOTATE_CONDVAR_SIGNAL(&ref_count
);
7262 int res
= AtomicIncrement(&ref_count
, -1);
7264 ANNOTATE_CONDVAR_WAIT(&ref_count
);
7269 void ThreadWorker() {
7270 CHECK(ref_count
> 0);
7271 CHECK(vec
->size() == 1);
7272 if (DecRef() == 0) { // Use DecRefAnnotated() instead!
7273 // No one uses vec now ==> delete it.
7274 delete vec
; // A false race may be reported here.
7280 MyThreadArray
t(ThreadWorker
, ThreadWorker
, ThreadWorker
);
7281 InitAllBeforeStartingThreads(3 /*number of threads*/);
7286 REGISTER_TEST2(Run
, 401, RACE_DEMO
)
7287 } // namespace test401
7289 // test501: Manually call PRINT_* annotations {{{1
7293 Mutex muCounter
, muGlob
[65];
7297 int myId
= ++COUNTER
;
7302 muGlob
[myId
].Lock();
7306 muGlob
[myId
].Unlock();
7310 MyThreadArray
ta (Worker
, Worker
, Worker
, Worker
);
7317 MyThreadArray
ta (Worker_1
, Worker_1
, Worker_1
, Worker_1
);
7324 ANNOTATE_RESET_STATS();
7325 printf("test501: Manually call PRINT_* annotations.\n");
7326 MyThreadArray
ta (Worker_2
, Worker_2
, Worker_2
, Worker_2
);
7330 ANNOTATE_PRINT_MEMORY_USAGE(0);
7331 ANNOTATE_PRINT_STATS();
7334 REGISTER_TEST2(Run
, 501, FEATURE
| EXCLUDE_FROM_ALL
)
7335 } // namespace test501
7337 // test502: produce lots of segments without cross-thread relations {{{1
7341 * This test produces ~1Gb of memory usage when run with the following options:
7344 * --trace-after-race=0
7353 for (int i
= 0; i
< 750000; i
++) {
7361 MyThreadArray
t(TP
, TP
);
7362 printf("test502: produce lots of segments without cross-thread relations\n");
7368 REGISTER_TEST2(Run
, 502, MEMORY_USAGE
| PRINT_STATS
| EXCLUDE_FROM_ALL
7370 } // namespace test502
7372 // test503: produce lots of segments with simple HB-relations {{{1
7373 // HB cache-miss rate is ~55%
7392 const int N_threads
= 32;
7393 const int ARRAY_SIZE
= 128;
7394 int GLOB
[ARRAY_SIZE
];
7395 ProducerConsumerQueue
*Q
[N_threads
];
7396 int GLOB_limit
= 100000;
7400 int myId
= AtomicIncrement(&count
, 1);
7402 ProducerConsumerQueue
&myQ
= *Q
[myId
], &nextQ
= *Q
[(myId
+1) % N_threads
];
7404 // this code produces a new SS with each new segment
7405 while (myQ
.Get() != NULL
) {
7406 for (int i
= 0; i
< ARRAY_SIZE
; i
++)
7409 if (myId
== 0 && GLOB
[0] > GLOB_limit
) {
7411 for (int i
= 0; i
< N_threads
; i
++)
7419 printf("test503: produce lots of segments with simple HB-relations\n");
7420 for (int i
= 0; i
< N_threads
; i
++)
7421 Q
[i
] = new ProducerConsumerQueue(1);
7425 ThreadPool
pool(N_threads
);
7426 pool
.StartWorkers();
7427 for (int i
= 0; i
< N_threads
; i
++) {
7428 pool
.Add(NewCallback(Worker
));
7430 } // all folks are joined here.
7432 for (int i
= 0; i
< N_threads
; i
++)
7436 REGISTER_TEST2(Run
, 503, MEMORY_USAGE
| PRINT_STATS
7437 | PERFORMANCE
| EXCLUDE_FROM_ALL
)
7438 } // namespace test503
7440 // test504: force massive cache fetch-wback (50% misses, mostly CacheLineZ) {{{1
7443 const int N_THREADS
= 2,
7444 HG_CACHELINE_COUNT
= 1 << 16,
7445 HG_CACHELINE_SIZE
= 1 << 6,
7446 HG_CACHE_SIZE
= HG_CACHELINE_COUNT
* HG_CACHELINE_SIZE
;
7448 // int gives us ~4x speed of the byte test
7449 // 4x array size gives us
7450 // total multiplier of 16x over the cachesize
7451 // so we can neglect the cached-at-the-end memory
7452 const int ARRAY_SIZE
= 4 * HG_CACHE_SIZE
,
7454 int array
[ARRAY_SIZE
];
7464 // all threads write to different memory locations,
7465 // so no synchronization mechanisms are needed
7466 int lower_bound
= ARRAY_SIZE
* (myId
-1) / N_THREADS
,
7467 upper_bound
= ARRAY_SIZE
* ( myId
) / N_THREADS
;
7468 for (int j
= 0; j
< ITERATIONS
; j
++)
7469 for (int i
= lower_bound
; i
< upper_bound
;
7470 i
+= HG_CACHELINE_SIZE
/ sizeof(array
[0])) {
7471 array
[i
] = i
; // each array-write generates a cache miss
7476 printf("test504: force massive CacheLineZ fetch-wback\n");
7477 MyThreadArray
t(Worker
, Worker
);
7482 REGISTER_TEST2(Run
, 504, PERFORMANCE
| PRINT_STATS
| EXCLUDE_FROM_ALL
)
7483 } // namespace test504
7485 // test505: force massive cache fetch-wback (60% misses) {{{1
7486 // modification of test504 - more threads, byte accesses and lots of mutexes
7487 // so it produces lots of CacheLineF misses (30-50% of CacheLineZ misses)
7490 const int N_THREADS
= 2,
7491 HG_CACHELINE_COUNT
= 1 << 16,
7492 HG_CACHELINE_SIZE
= 1 << 6,
7493 HG_CACHE_SIZE
= HG_CACHELINE_COUNT
* HG_CACHELINE_SIZE
;
7495 const int ARRAY_SIZE
= 4 * HG_CACHE_SIZE
,
7497 int64_t array
[ARRAY_SIZE
];
7503 const int N_MUTEXES
= 5;
7504 Mutex mu
[N_MUTEXES
];
7509 // all threads write to different memory locations,
7510 // so no synchronization mechanisms are needed
7511 int lower_bound
= ARRAY_SIZE
* (myId
-1) / N_THREADS
,
7512 upper_bound
= ARRAY_SIZE
* ( myId
) / N_THREADS
;
7513 for (int j
= 0; j
< ITERATIONS
; j
++)
7514 for (int mutex_id
= 0; mutex_id
< N_MUTEXES
; mutex_id
++) {
7515 Mutex
*m
= & mu
[mutex_id
];
7517 for (int i
= lower_bound
+ mutex_id
, cnt
= 0;
7519 i
+= HG_CACHELINE_SIZE
/ sizeof(array
[0]), cnt
++) {
7520 array
[i
] = i
; // each array-write generates a cache miss
7527 printf("test505: force massive CacheLineF fetch-wback\n");
7528 MyThreadArray
t(Worker
, Worker
);
7533 REGISTER_TEST2(Run
, 505, PERFORMANCE
| PRINT_STATS
| EXCLUDE_FROM_ALL
)
7534 } // namespace test505
7536 // test506: massive HB's using Barriers {{{1
7537 // HB cache miss is ~40%
7538 // segments consume 10x more memory than SSs
7539 // modification of test39
7542 // Same as test17 but uses Barrier class (pthread_barrier_t).
7544 const int N_threads
= 64,
7546 Barrier
*barrier
[ITERATIONS
];
7550 for (int i
= 0; i
< ITERATIONS
; i
++) {
7554 barrier
[i
]->Block();
7558 printf("test506: massive HB's using Barriers\n");
7559 for (int i
= 0; i
< ITERATIONS
; i
++) {
7560 barrier
[i
] = new Barrier(N_threads
);
7563 ThreadPool
pool(N_threads
);
7564 pool
.StartWorkers();
7565 for (int i
= 0; i
< N_threads
; i
++) {
7566 pool
.Add(NewCallback(Worker
));
7568 } // all folks are joined here.
7569 CHECK(GLOB
== N_threads
* ITERATIONS
);
7570 for (int i
= 0; i
< ITERATIONS
; i
++) {
7574 REGISTER_TEST2(Run
, 506, PERFORMANCE
| PRINT_STATS
| EXCLUDE_FROM_ALL
);
7575 #endif // NO_BARRIER
7576 } // namespace test506
7578 // test507: vgHelgrind_initIterAtFM/stackClear benchmark {{{1
7579 // vgHelgrind_initIterAtFM/stackClear consume ~8.5%/5.5% CPU
7581 const int N_THREADS
= 1,
7583 ITERATIONS
= 1 << 20;
7589 ANNOTATE_RWLOCK_CREATE(&temp
);
7592 ANNOTATE_RWLOCK_DESTROY(&temp
);
7599 for (int j
= 0; j
< ITERATIONS
; j
++) {
7605 printf("test507: vgHelgrind_initIterAtFM/stackClear benchmark\n");
7607 ThreadPool
pool(N_THREADS
);
7608 pool
.StartWorkers();
7609 for (int i
= 0; i
< N_THREADS
; i
++) {
7610 pool
.Add(NewCallback(Worker
));
7612 } // all folks are joined here.
7614 REGISTER_TEST2(Run
, 507, EXCLUDE_FROM_ALL
);
7615 } // namespace test507
7617 // test508: cmp_WordVecs_for_FM benchmark {{{1
7618 // 50+% of CPU consumption by cmp_WordVecs_for_FM
7620 const int N_THREADS
= 1,
7621 BUFFER_SIZE
= 1 << 10,
7622 ITERATIONS
= 1 << 9;
7628 ANNOTATE_RWLOCK_CREATE(&temp
);
7631 ANNOTATE_RWLOCK_DESTROY(&temp
);
7638 for (int j
= 0; j
< ITERATIONS
; j
++) {
7644 printf("test508: cmp_WordVecs_for_FM benchmark\n");
7646 ThreadPool
pool(N_THREADS
);
7647 pool
.StartWorkers();
7648 for (int i
= 0; i
< N_THREADS
; i
++) {
7649 pool
.Add(NewCallback(Worker
));
7651 } // all folks are joined here.
7653 REGISTER_TEST2(Run
, 508, EXCLUDE_FROM_ALL
);
7654 } // namespace test508
7656 // test509: avl_find_node benchmark {{{1
7657 // 10+% of CPU consumption by avl_find_node
7659 const int N_THREADS
= 16,
7660 ITERATIONS
= 1 << 8;
7663 std::vector
<Mutex
*> mu_list
;
7664 for (int i
= 0; i
< ITERATIONS
; i
++) {
7665 Mutex
* mu
= new Mutex();
7666 mu_list
.push_back(mu
);
7669 for (int i
= ITERATIONS
- 1; i
>= 0; i
--) {
7670 Mutex
* mu
= mu_list
[i
];
7677 printf("test509: avl_find_node benchmark\n");
7679 ThreadPool
pool(N_THREADS
);
7680 pool
.StartWorkers();
7681 for (int i
= 0; i
< N_THREADS
; i
++) {
7682 pool
.Add(NewCallback(Worker
));
7684 } // all folks are joined here.
7686 REGISTER_TEST2(Run
, 509, EXCLUDE_FROM_ALL
);
7687 } // namespace test509
7689 // test510: SS-recycle test {{{1
7690 // this tests shows the case where only ~1% of SS are recycled
7692 const int N_THREADS
= 16,
7693 ITERATIONS
= 1 << 10;
7698 for (int i
= 0; i
< ITERATIONS
; i
++) {
7699 ANNOTATE_CONDVAR_SIGNAL((void*)0xDeadBeef);
7706 //ANNOTATE_BENIGN_RACE(&GLOB, "Test");
7707 printf("test510: SS-recycle test\n");
7709 ThreadPool
pool(N_THREADS
);
7710 pool
.StartWorkers();
7711 for (int i
= 0; i
< N_THREADS
; i
++) {
7712 pool
.Add(NewCallback(Worker
));
7714 } // all folks are joined here.
7716 REGISTER_TEST2(Run
, 510, MEMORY_USAGE
| PRINT_STATS
| EXCLUDE_FROM_ALL
);
7717 } // namespace test510
7719 // test511: Segment refcounting test ('1' refcounting) {{{1
7724 for (int i
= 0; i
< 300; i
++) {
7725 ANNOTATE_CONDVAR_SIGNAL(&GLOB
);
7728 ANNOTATE_CONDVAR_WAIT(&GLOB
);
7730 ANNOTATE_PRINT_MEMORY_USAGE(0);
7733 REGISTER_TEST2(Run
, 511, MEMORY_USAGE
| PRINT_STATS
| EXCLUDE_FROM_ALL
);
7734 } // namespace test511
7736 // test512: Segment refcounting test ('S' refcounting) {{{1
7742 sem_init(&SEM
, 0, 0);
7743 for (int i
= 0; i
< 300; i
++) {
7749 ANNOTATE_PRINT_MEMORY_USAGE(0);*/
7753 REGISTER_TEST2(Run
, 512, MEMORY_USAGE
| PRINT_STATS
| EXCLUDE_FROM_ALL
);
7754 } // namespace test512
7756 // test513: --fast-mode benchmark {{{1
7759 const int N_THREADS
= 2,
7760 HG_CACHELINE_SIZE
= 1 << 6,
7761 ARRAY_SIZE
= HG_CACHELINE_SIZE
* 512,
7763 // MUTEX_ID_MASK = (1 << MUTEX_ID_BITS) - 1;
7765 // Each thread has its own cacheline and tackles with it intensively
7766 const int ITERATIONS
= 1024;
7767 int array
[N_THREADS
][ARRAY_SIZE
];
7771 Mutex mutex_arr
[N_THREADS
][MUTEX_ID_BITS
];
7778 // all threads write to different memory locations
7779 for (int j
= 0; j
< ITERATIONS
; j
++) {
7780 int mutex_mask
= j
& MUTEX_ID_BITS
;
7781 for (int m
= 0; m
< MUTEX_ID_BITS
; m
++)
7782 if (mutex_mask
& (1 << m
))
7783 mutex_arr
[myId
][m
].Lock();
7785 for (int i
= 0; i
< ARRAY_SIZE
; i
++) {
7789 for (int m
= 0; m
< MUTEX_ID_BITS
; m
++)
7790 if (mutex_mask
& (1 << m
))
7791 mutex_arr
[myId
][m
].Unlock();
7796 printf("test513: --fast-mode benchmark\n");
7798 ThreadPool
pool(N_THREADS
);
7799 pool
.StartWorkers();
7800 for (int i
= 0; i
< N_THREADS
; i
++) {
7801 pool
.Add(NewCallback(Worker
));
7803 } // all folks are joined here.
7806 REGISTER_TEST2(Run
, 513, PERFORMANCE
| PRINT_STATS
| EXCLUDE_FROM_ALL
)
7807 } // namespace test513
7810 // vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=marker