memcheck/tests/sh-mem-random.c: Set huge_addr to 240GB
[valgrind.git] / drd / tests / tsan_unittest.cpp
blobab42740515a2d98f7839b1292492e108072db0f5
1 /*
2 This file is part of Valgrind, a dynamic binary instrumentation
3 framework.
5 Copyright (C) 2008-2008 Google Inc
6 opensource@google.com
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2 of the
11 License, or (at your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
21 The GNU General Public License is contained in the file COPYING.
24 // Author: Konstantin Serebryany <opensource@google.com>
26 // This file contains a set of unit tests for a data race detection tool.
30 // This test can be compiled with pthreads (default) or
31 // with any other library that supports threads, locks, cond vars, etc.
32 //
33 // To compile with pthreads:
34 // g++ racecheck_unittest.cc dynamic_annotations.cc
35 // -lpthread -g -DDYNAMIC_ANNOTATIONS=1
36 //
37 // To compile with different library:
38 // 1. cp thread_wrappers_pthread.h thread_wrappers_yourlib.h
39 // 2. edit thread_wrappers_yourlib.h
40 // 3. add '-DTHREAD_WRAPPERS="thread_wrappers_yourlib.h"' to your compilation.
44 // This test must not include any other file specific to threading library,
45 // everything should be inside THREAD_WRAPPERS.
46 #ifndef THREAD_WRAPPERS
47 # define THREAD_WRAPPERS "thread_wrappers_pthread.h"
48 #endif
49 #include THREAD_WRAPPERS
51 #ifndef NEEDS_SEPERATE_RW_LOCK
52 #define RWLock Mutex // Mutex does work as an rw-lock.
53 #define WriterLockScoped MutexLock
54 #define ReaderLockScoped ReaderMutexLock
55 #endif // !NEEDS_SEPERATE_RW_LOCK
58 // Helgrind memory usage testing stuff
59 // If not present in dynamic_annotations.h/.cc - ignore
60 #ifndef ANNOTATE_RESET_STATS
61 #define ANNOTATE_RESET_STATS() do { } while(0)
62 #endif
63 #ifndef ANNOTATE_PRINT_STATS
64 #define ANNOTATE_PRINT_STATS() do { } while(0)
65 #endif
66 #ifndef ANNOTATE_PRINT_MEMORY_USAGE
67 #define ANNOTATE_PRINT_MEMORY_USAGE(a) do { } while(0)
68 #endif
71 // A function that allows to suppress gcc's warnings about
72 // unused return values in a portable way.
73 template <typename T>
74 static inline void IGNORE_RETURN_VALUE(T v)
75 { }
77 #include <vector>
78 #include <string>
79 #include <map>
80 #include <queue>
81 #include <algorithm>
82 #include <cstring> // strlen(), index(), rindex()
83 #include <ctime>
84 #include <sys/time.h>
85 #include <sys/types.h>
86 #include <sys/stat.h>
87 #include <fcntl.h>
88 #include <sys/mman.h> // mmap
89 #include <errno.h>
90 #include <stdint.h> // uintptr_t
91 #include <stdlib.h>
92 #include <dirent.h>
94 #ifndef VGO_darwin
95 #include <malloc.h>
96 #endif
98 #ifdef VGO_solaris
99 #include <strings.h> // index(), rindex()
100 #endif
102 // The tests are
103 // - Stability tests (marked STAB)
104 // - Performance tests (marked PERF)
105 // - Feature tests
106 // - TN (true negative) : no race exists and the tool is silent.
107 // - TP (true positive) : a race exists and reported.
108 // - FN (false negative): a race exists but not reported.
109 // - FP (false positive): no race exists but the tool reports it.
111 // The feature tests are marked according to the behavior of helgrind 3.3.0.
113 // TP and FP tests are annotated with ANNOTATE_EXPECT_RACE,
114 // so, no error reports should be seen when running under helgrind.
116 // When some of the FP cases are fixed in helgrind we'll need
117 // to update this test.
119 // Each test resides in its own namespace.
120 // Namespaces are named test01, test02, ...
121 // Please, *DO NOT* change the logic of existing tests nor rename them.
122 // Create a new test instead.
124 // Some tests use sleep()/usleep().
125 // This is not a synchronization, but a simple way to trigger
126 // some specific behaviour of the race detector's scheduler.
128 // Globals and utilities used by several tests. {{{1
129 CondVar CV;
130 int COND = 0;
133 typedef void (*void_func_void_t)(void);
134 enum TEST_FLAG {
135 FEATURE = 1 << 0,
136 STABILITY = 1 << 1,
137 PERFORMANCE = 1 << 2,
138 EXCLUDE_FROM_ALL = 1 << 3,
139 NEEDS_ANNOTATIONS = 1 << 4,
140 RACE_DEMO = 1 << 5,
141 MEMORY_USAGE = 1 << 6,
142 PRINT_STATS = 1 << 7
145 // Put everything into stderr.
146 Mutex printf_mu;
147 #define printf(args...) \
148 do{ \
149 printf_mu.Lock();\
150 fprintf(stderr, args);\
151 printf_mu.Unlock(); \
152 }while(0)
154 long GetTimeInMs() {
155 struct timeval tv;
156 gettimeofday(&tv, NULL);
157 return (tv.tv_sec * 1000L) + (tv.tv_usec / 1000L);
160 struct Test{
161 void_func_void_t f_;
162 int flags_;
163 Test(void_func_void_t f, int flags)
164 : f_(f)
165 , flags_(flags)
167 Test() : f_(0), flags_(0) {}
168 void Run() {
169 ANNOTATE_RESET_STATS();
170 if (flags_ & PERFORMANCE) {
171 long start = GetTimeInMs();
172 f_();
173 long end = GetTimeInMs();
174 printf ("Time: %4ldms\n", end-start);
175 } else
176 f_();
177 if (flags_ & PRINT_STATS)
178 ANNOTATE_PRINT_STATS();
179 if (flags_ & MEMORY_USAGE)
180 ANNOTATE_PRINT_MEMORY_USAGE(0);
183 std::map<int, Test> TheMapOfTests;
185 #define NOINLINE __attribute__ ((noinline))
186 extern "C" void NOINLINE AnnotateSetVerbosity(const char *, int, int) {};
189 struct TestAdder {
190 TestAdder(void_func_void_t f, int id, int flags = FEATURE) {
191 // AnnotateSetVerbosity(__FILE__, __LINE__, 0);
192 CHECK(TheMapOfTests.count(id) == 0);
193 TheMapOfTests[id] = Test(f, flags);
197 #define REGISTER_TEST(f, id) TestAdder add_test_##id (f, id);
198 #define REGISTER_TEST2(f, id, flags) TestAdder add_test_##id (f, id, flags);
200 static bool ArgIsOne(int *arg) { return *arg == 1; };
201 static bool ArgIsZero(int *arg) { return *arg == 0; };
202 static bool ArgIsTrue(bool *arg) { return *arg == true; };
204 // Call ANNOTATE_EXPECT_RACE only if 'machine' env variable is defined.
205 // Useful to test against several different machines.
206 // Supported machines so far:
207 // MSM_HYBRID1 -- aka MSMProp1
208 // MSM_HYBRID1_INIT_STATE -- aka MSMProp1 with --initialization-state=yes
209 // MSM_THREAD_SANITIZER -- ThreadSanitizer's state machine
210 #define ANNOTATE_EXPECT_RACE_FOR_MACHINE(mem, descr, machine) \
211 while(getenv(machine)) {\
212 ANNOTATE_EXPECT_RACE(mem, descr); \
213 break;\
216 #define ANNOTATE_EXPECT_RACE_FOR_TSAN(mem, descr) \
217 ANNOTATE_EXPECT_RACE_FOR_MACHINE(mem, descr, "MSM_THREAD_SANITIZER")
219 inline bool Tsan_PureHappensBefore() {
220 return true;
223 inline bool Tsan_FastMode() {
224 return getenv("TSAN_FAST_MODE") != NULL;
227 // Initialize *(mem) to 0 if Tsan_FastMode.
228 #define FAST_MODE_INIT(mem) do { if (Tsan_FastMode()) { *(mem) = 0; } } while(0)
230 #ifndef MAIN_INIT_ACTION
231 #define MAIN_INIT_ACTION
232 #endif
236 int main(int argc, char** argv) { // {{{1
237 MAIN_INIT_ACTION;
238 printf("FLAGS [phb=%i, fm=%i]\n", Tsan_PureHappensBefore(), Tsan_FastMode());
239 if (argc == 2 && !strcmp(argv[1], "benchmark")) {
240 for (std::map<int,Test>::iterator it = TheMapOfTests.begin();
241 it != TheMapOfTests.end(); ++it) {
242 if(!(it->second.flags_ & PERFORMANCE)) continue;
243 it->second.Run();
245 } else if (argc == 2 && !strcmp(argv[1], "demo")) {
246 for (std::map<int,Test>::iterator it = TheMapOfTests.begin();
247 it != TheMapOfTests.end(); ++it) {
248 if(!(it->second.flags_ & RACE_DEMO)) continue;
249 it->second.Run();
251 } else if (argc > 1) {
252 // the tests are listed in command line flags
253 for (int i = 1; i < argc; i++) {
254 int f_num = atoi(argv[i]);
255 CHECK(TheMapOfTests.count(f_num));
256 TheMapOfTests[f_num].Run();
258 } else {
259 bool run_tests_with_annotations = false;
260 if (getenv("DRT_ALLOW_ANNOTATIONS")) {
261 run_tests_with_annotations = true;
263 for (std::map<int,Test>::iterator it = TheMapOfTests.begin();
264 it != TheMapOfTests.end();
265 ++it) {
266 if(it->second.flags_ & EXCLUDE_FROM_ALL) continue;
267 if(it->second.flags_ & RACE_DEMO) continue;
268 if((it->second.flags_ & NEEDS_ANNOTATIONS)
269 && run_tests_with_annotations == false) continue;
270 it->second.Run();
275 #ifdef THREAD_WRAPPERS_PTHREAD_H
276 #endif
279 // An array of threads. Create/start/join all elements at once. {{{1
280 class MyThreadArray {
281 public:
282 static const int kSize = 5;
283 typedef void (*F) (void);
284 MyThreadArray(F f1, F f2 = NULL, F f3 = NULL, F f4 = NULL, F f5 = NULL) {
285 ar_[0] = new MyThread(f1);
286 ar_[1] = f2 ? new MyThread(f2) : NULL;
287 ar_[2] = f3 ? new MyThread(f3) : NULL;
288 ar_[3] = f4 ? new MyThread(f4) : NULL;
289 ar_[4] = f5 ? new MyThread(f5) : NULL;
291 void Start() {
292 for(int i = 0; i < kSize; i++) {
293 if(ar_[i]) {
294 ar_[i]->Start();
295 usleep(10);
300 void Join() {
301 for(int i = 0; i < kSize; i++) {
302 if(ar_[i]) {
303 ar_[i]->Join();
308 ~MyThreadArray() {
309 for(int i = 0; i < kSize; i++) {
310 delete ar_[i];
313 private:
314 MyThread *ar_[kSize];
319 // test00: {{{1
320 namespace test00 {
321 int GLOB = 0;
322 void Run() {
323 printf("test00: negative\n");
324 printf("\tGLOB=%d\n", GLOB);
326 REGISTER_TEST(Run, 00)
327 } // namespace test00
330 // test01: TP. Simple race (write vs write). {{{1
331 namespace test01 {
332 int GLOB = 0;
333 void Worker() {
334 GLOB = 1;
337 void Parent() {
338 MyThread t(Worker);
339 t.Start();
340 const timespec delay = { 0, 100 * 1000 * 1000 };
341 nanosleep(&delay, 0);
342 GLOB = 2;
343 t.Join();
345 void Run() {
346 FAST_MODE_INIT(&GLOB);
347 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test01. TP.");
348 ANNOTATE_TRACE_MEMORY(&GLOB);
349 printf("test01: positive\n");
350 Parent();
351 const int tmp = GLOB;
352 printf("\tGLOB=%d\n", tmp);
354 REGISTER_TEST(Run, 1);
355 } // namespace test01
358 // test02: TN. Synchronization via CondVar. {{{1
359 namespace test02 {
360 int GLOB = 0;
361 // Two write accesses to GLOB are synchronized because
362 // the pair of CV.Signal() and CV.Wait() establish happens-before relation.
364 // Waiter: Waker:
365 // 1. COND = 0
366 // 2. Start(Waker)
367 // 3. MU.Lock() a. write(GLOB)
368 // b. MU.Lock()
369 // c. COND = 1
370 // /--- d. CV.Signal()
371 // 4. while(COND) / e. MU.Unlock()
372 // CV.Wait(MU) <---/
373 // 5. MU.Unlock()
374 // 6. write(GLOB)
375 Mutex MU;
377 void Waker() {
378 usleep(100000); // Make sure the waiter blocks.
379 GLOB = 1;
381 MU.Lock();
382 COND = 1;
383 CV.Signal();
384 MU.Unlock();
387 void Waiter() {
388 ThreadPool pool(1);
389 pool.StartWorkers();
390 COND = 0;
391 pool.Add(NewCallback(Waker));
392 MU.Lock();
393 while(COND != 1)
394 CV.Wait(&MU);
395 MU.Unlock();
396 GLOB = 2;
398 void Run() {
399 printf("test02: negative\n");
400 Waiter();
401 printf("\tGLOB=%d\n", GLOB);
403 REGISTER_TEST(Run, 2);
404 } // namespace test02
407 // test03: TN. Synchronization via LockWhen, signaller gets there first. {{{1
408 namespace test03 {
409 int GLOB = 0;
410 // Two write accesses to GLOB are synchronized via conditional critical section.
411 // Note that LockWhen() happens first (we use sleep(1) to make sure)!
413 // Waiter: Waker:
414 // 1. COND = 0
415 // 2. Start(Waker)
416 // a. write(GLOB)
417 // b. MU.Lock()
418 // c. COND = 1
419 // /--- d. MU.Unlock()
420 // 3. MU.LockWhen(COND==1) <---/
421 // 4. MU.Unlock()
422 // 5. write(GLOB)
423 Mutex MU;
425 void Waker() {
426 usleep(100000); // Make sure the waiter blocks.
427 GLOB = 1;
429 MU.Lock();
430 COND = 1; // We are done! Tell the Waiter.
431 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
433 void Waiter() {
434 ThreadPool pool(1);
435 pool.StartWorkers();
436 COND = 0;
437 pool.Add(NewCallback(Waker));
438 MU.LockWhen(Condition<int>(&ArgIsOne, &COND)); // calls ANNOTATE_CONDVAR_WAIT
439 MU.Unlock(); // Waker is done!
441 GLOB = 2;
443 void Run() {
444 printf("test03: negative\n");
445 Waiter();
446 printf("\tGLOB=%d\n", GLOB);
448 REGISTER_TEST2(Run, 3, FEATURE|NEEDS_ANNOTATIONS);
449 } // namespace test03
451 // test04: TN. Synchronization via PCQ. {{{1
452 namespace test04 {
453 int GLOB = 0;
454 ProducerConsumerQueue Q(INT_MAX);
455 // Two write accesses to GLOB are separated by PCQ Put/Get.
457 // Putter: Getter:
458 // 1. write(GLOB)
459 // 2. Q.Put() ---------\ .
460 // \-------> a. Q.Get()
461 // b. write(GLOB)
464 void Putter() {
465 GLOB = 1;
466 Q.Put(NULL);
469 void Getter() {
470 Q.Get();
471 GLOB = 2;
474 void Run() {
475 printf("test04: negative\n");
476 MyThreadArray t(Putter, Getter);
477 t.Start();
478 t.Join();
479 printf("\tGLOB=%d\n", GLOB);
481 REGISTER_TEST(Run, 4);
482 } // namespace test04
485 // test05: FP. Synchronization via CondVar, but waiter does not block. {{{1
486 // Since CondVar::Wait() is not called, we get a false positive.
487 namespace test05 {
488 int GLOB = 0;
489 // Two write accesses to GLOB are synchronized via CondVar.
490 // But race detector can not see it.
491 // See this for details:
492 // http://www.valgrind.org/docs/manual/hg-manual.html#hg-manual.effective-use.
494 // Waiter: Waker:
495 // 1. COND = 0
496 // 2. Start(Waker)
497 // 3. MU.Lock() a. write(GLOB)
498 // b. MU.Lock()
499 // c. COND = 1
500 // d. CV.Signal()
501 // 4. while(COND) e. MU.Unlock()
502 // CV.Wait(MU) <<< not called
503 // 5. MU.Unlock()
504 // 6. write(GLOB)
505 Mutex MU;
507 void Waker() {
508 GLOB = 1;
509 MU.Lock();
510 COND = 1;
511 CV.Signal();
512 MU.Unlock();
515 void Waiter() {
516 ThreadPool pool(1);
517 pool.StartWorkers();
518 COND = 0;
519 pool.Add(NewCallback(Waker));
520 usleep(100000); // Make sure the signaller gets first.
521 MU.Lock();
522 while(COND != 1)
523 CV.Wait(&MU);
524 MU.Unlock();
525 GLOB = 2;
527 void Run() {
528 FAST_MODE_INIT(&GLOB);
529 if (!Tsan_PureHappensBefore())
530 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test05. FP. Unavoidable in hybrid scheme.");
531 printf("test05: unavoidable false positive\n");
532 Waiter();
533 printf("\tGLOB=%d\n", GLOB);
535 REGISTER_TEST(Run, 5);
536 } // namespace test05
539 // test06: TN. Synchronization via CondVar, but Waker gets there first. {{{1
540 namespace test06 {
541 int GLOB = 0;
542 // Same as test05 but we annotated the Wait() loop.
544 // Waiter: Waker:
545 // 1. COND = 0
546 // 2. Start(Waker)
547 // 3. MU.Lock() a. write(GLOB)
548 // b. MU.Lock()
549 // c. COND = 1
550 // /------- d. CV.Signal()
551 // 4. while(COND) / e. MU.Unlock()
552 // CV.Wait(MU) <<< not called /
553 // 6. ANNOTATE_CONDVAR_WAIT(CV, MU) <----/
554 // 5. MU.Unlock()
555 // 6. write(GLOB)
557 Mutex MU;
559 void Waker() {
560 GLOB = 1;
561 MU.Lock();
562 COND = 1;
563 CV.Signal();
564 MU.Unlock();
567 void Waiter() {
568 ThreadPool pool(1);
569 pool.StartWorkers();
570 COND = 0;
571 pool.Add(NewCallback(Waker));
572 usleep(100000); // Make sure the signaller gets first.
573 MU.Lock();
574 while(COND != 1)
575 CV.Wait(&MU);
576 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
578 MU.Unlock();
579 GLOB = 2;
581 void Run() {
582 printf("test06: negative\n");
583 Waiter();
584 printf("\tGLOB=%d\n", GLOB);
586 REGISTER_TEST2(Run, 6, FEATURE|NEEDS_ANNOTATIONS);
587 } // namespace test06
590 // test07: TN. Synchronization via LockWhen(), Signaller is observed first. {{{1
591 namespace test07 {
592 int GLOB = 0;
593 bool COND = 0;
594 // Two write accesses to GLOB are synchronized via conditional critical section.
595 // LockWhen() is observed after COND has been set (due to sleep).
596 // Unlock() calls ANNOTATE_CONDVAR_SIGNAL().
598 // Waiter: Signaller:
599 // 1. COND = 0
600 // 2. Start(Signaller)
601 // a. write(GLOB)
602 // b. MU.Lock()
603 // c. COND = 1
604 // /--- d. MU.Unlock calls ANNOTATE_CONDVAR_SIGNAL
605 // 3. MU.LockWhen(COND==1) <---/
606 // 4. MU.Unlock()
607 // 5. write(GLOB)
609 Mutex MU;
610 void Signaller() {
611 GLOB = 1;
612 MU.Lock();
613 COND = true; // We are done! Tell the Waiter.
614 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
616 void Waiter() {
617 COND = false;
618 MyThread t(Signaller);
619 t.Start();
620 usleep(100000); // Make sure the signaller gets there first.
622 MU.LockWhen(Condition<bool>(&ArgIsTrue, &COND));// calls ANNOTATE_CONDVAR_WAIT
623 MU.Unlock(); // Signaller is done!
625 GLOB = 2; // If LockWhen didn't catch the signal, a race may be reported here.
626 t.Join();
628 void Run() {
629 printf("test07: negative\n");
630 Waiter();
631 printf("\tGLOB=%d\n", GLOB);
633 REGISTER_TEST2(Run, 7, FEATURE|NEEDS_ANNOTATIONS);
634 } // namespace test07
636 // test08: TN. Synchronization via thread start/join. {{{1
637 namespace test08 {
638 int GLOB = 0;
639 // Three accesses to GLOB are separated by thread start/join.
641 // Parent: Worker:
642 // 1. write(GLOB)
643 // 2. Start(Worker) ------------>
644 // a. write(GLOB)
645 // 3. Join(Worker) <------------
646 // 4. write(GLOB)
647 void Worker() {
648 GLOB = 2;
651 void Parent() {
652 MyThread t(Worker);
653 GLOB = 1;
654 t.Start();
655 t.Join();
656 GLOB = 3;
658 void Run() {
659 printf("test08: negative\n");
660 Parent();
661 printf("\tGLOB=%d\n", GLOB);
663 REGISTER_TEST(Run, 8);
664 } // namespace test08
667 // test09: TP. Simple race (read vs write). {{{1
668 namespace test09 {
669 int GLOB = 0;
670 // A simple data race between writer and reader.
671 // Write happens after read (enforced by sleep).
672 // Usually, easily detectable by a race detector.
673 void Writer() {
674 usleep(100000);
675 GLOB = 3;
677 void Reader() {
678 CHECK(GLOB != -777);
681 void Run() {
682 ANNOTATE_TRACE_MEMORY(&GLOB);
683 FAST_MODE_INIT(&GLOB);
684 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test09. TP.");
685 printf("test09: positive\n");
686 MyThreadArray t(Writer, Reader);
687 t.Start();
688 t.Join();
689 printf("\tGLOB=%d\n", GLOB);
691 REGISTER_TEST(Run, 9);
692 } // namespace test09
695 // test10: FN. Simple race (write vs read). {{{1
696 namespace test10 {
697 int GLOB = 0;
698 // A simple data race between writer and reader.
699 // Write happens before Read (enforced by sleep),
700 // otherwise this test is the same as test09.
702 // Writer: Reader:
703 // 1. write(GLOB) a. sleep(long enough so that GLOB
704 // is most likely initialized by Writer)
705 // b. read(GLOB)
708 // Eraser algorithm does not detect the race here,
709 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
711 void Writer() {
712 GLOB = 3;
714 void Reader() {
715 usleep(100000);
716 CHECK(GLOB != -777);
719 void Run() {
720 FAST_MODE_INIT(&GLOB);
721 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test10. TP. FN in MSMHelgrind.");
722 printf("test10: positive\n");
723 MyThreadArray t(Writer, Reader);
724 t.Start();
725 t.Join();
726 printf("\tGLOB=%d\n", GLOB);
728 REGISTER_TEST(Run, 10);
729 } // namespace test10
732 // test11: FP. Synchronization via CondVar, 2 workers. {{{1
733 // This test is properly synchronized, but currently (Dec 2007)
734 // helgrind reports a false positive.
736 // Parent: Worker1, Worker2:
737 // 1. Start(workers) a. read(GLOB)
738 // 2. MU.Lock() b. MU.Lock()
739 // 3. while(COND != 2) /-------- c. CV.Signal()
740 // CV.Wait(&MU) <-------/ d. MU.Unlock()
741 // 4. MU.Unlock()
742 // 5. write(GLOB)
744 namespace test11 {
745 int GLOB = 0;
746 Mutex MU;
747 void Worker() {
748 usleep(200000);
749 CHECK(GLOB != 777);
751 MU.Lock();
752 COND++;
753 CV.Signal();
754 MU.Unlock();
757 void Parent() {
758 COND = 0;
760 MyThreadArray t(Worker, Worker);
761 t.Start();
763 MU.Lock();
764 while(COND != 2) {
765 CV.Wait(&MU);
767 MU.Unlock();
769 GLOB = 2;
771 t.Join();
774 void Run() {
775 // ANNOTATE_EXPECT_RACE(&GLOB, "test11. FP. Fixed by MSMProp1.");
776 printf("test11: negative\n");
777 Parent();
778 printf("\tGLOB=%d\n", GLOB);
780 REGISTER_TEST(Run, 11);
781 } // namespace test11
784 // test12: FP. Synchronization via Mutex, then via PCQ. {{{1
785 namespace test12 {
786 int GLOB = 0;
787 // This test is properly synchronized, but currently (Dec 2007)
788 // helgrind reports a false positive.
790 // First, we write to GLOB under MU, then we synchronize via PCQ,
791 // which is essentially a semaphore.
793 // Putter: Getter:
794 // 1. MU.Lock() a. MU.Lock()
795 // 2. write(GLOB) <---- MU ----> b. write(GLOB)
796 // 3. MU.Unlock() c. MU.Unlock()
797 // 4. Q.Put() ---------------> d. Q.Get()
798 // e. write(GLOB)
800 ProducerConsumerQueue Q(INT_MAX);
801 Mutex MU;
803 void Putter() {
804 MU.Lock();
805 GLOB++;
806 MU.Unlock();
808 Q.Put(NULL);
811 void Getter() {
812 MU.Lock();
813 GLOB++;
814 MU.Unlock();
816 Q.Get();
817 GLOB++;
820 void Run() {
821 // ANNOTATE_EXPECT_RACE(&GLOB, "test12. FP. Fixed by MSMProp1.");
822 printf("test12: negative\n");
823 MyThreadArray t(Putter, Getter);
824 t.Start();
825 t.Join();
826 printf("\tGLOB=%d\n", GLOB);
828 REGISTER_TEST(Run, 12);
829 } // namespace test12
832 // test13: FP. Synchronization via Mutex, then via LockWhen. {{{1
833 namespace test13 {
834 int GLOB = 0;
835 // This test is essentially the same as test12, but uses LockWhen
836 // instead of PCQ.
838 // Waker: Waiter:
839 // 1. MU.Lock() a. MU.Lock()
840 // 2. write(GLOB) <---------- MU ----------> b. write(GLOB)
841 // 3. MU.Unlock() c. MU.Unlock()
842 // 4. MU.Lock() .
843 // 5. COND = 1 .
844 // 6. ANNOTATE_CONDVAR_SIGNAL -------\ .
845 // 7. MU.Unlock() \ .
846 // \----> d. MU.LockWhen(COND == 1)
847 // e. MU.Unlock()
848 // f. write(GLOB)
849 Mutex MU;
851 void Waker() {
852 MU.Lock();
853 GLOB++;
854 MU.Unlock();
856 MU.Lock();
857 COND = 1;
858 ANNOTATE_CONDVAR_SIGNAL(&MU);
859 MU.Unlock();
862 void Waiter() {
863 MU.Lock();
864 GLOB++;
865 MU.Unlock();
867 MU.LockWhen(Condition<int>(&ArgIsOne, &COND));
868 MU.Unlock();
869 GLOB++;
872 void Run() {
873 // ANNOTATE_EXPECT_RACE(&GLOB, "test13. FP. Fixed by MSMProp1.");
874 printf("test13: negative\n");
875 COND = 0;
877 MyThreadArray t(Waker, Waiter);
878 t.Start();
879 t.Join();
881 printf("\tGLOB=%d\n", GLOB);
883 REGISTER_TEST2(Run, 13, FEATURE|NEEDS_ANNOTATIONS);
884 } // namespace test13
887 // test14: FP. Synchronization via PCQ, reads, 2 workers. {{{1
888 namespace test14 {
889 int GLOB = 0;
890 // This test is properly synchronized, but currently (Dec 2007)
891 // helgrind reports a false positive.
893 // This test is similar to test11, but uses PCQ (semaphore).
895 // Putter2: Putter1: Getter:
896 // 1. read(GLOB) a. read(GLOB)
897 // 2. Q2.Put() ----\ b. Q1.Put() -----\ .
898 // \ \--------> A. Q1.Get()
899 // \----------------------------------> B. Q2.Get()
900 // C. write(GLOB)
901 ProducerConsumerQueue Q1(INT_MAX), Q2(INT_MAX);
903 void Putter1() {
904 CHECK(GLOB != 777);
905 Q1.Put(NULL);
907 void Putter2() {
908 CHECK(GLOB != 777);
909 Q2.Put(NULL);
911 void Getter() {
912 Q1.Get();
913 Q2.Get();
914 GLOB++;
916 void Run() {
917 // ANNOTATE_EXPECT_RACE(&GLOB, "test14. FP. Fixed by MSMProp1.");
918 printf("test14: negative\n");
919 MyThreadArray t(Getter, Putter1, Putter2);
920 t.Start();
921 t.Join();
922 printf("\tGLOB=%d\n", GLOB);
924 REGISTER_TEST(Run, 14);
925 } // namespace test14
928 // test15: TN. Synchronization via LockWhen. One waker and 2 waiters. {{{1
929 namespace test15 {
930 // Waker: Waiter1, Waiter2:
931 // 1. write(GLOB)
932 // 2. MU.Lock()
933 // 3. COND = 1
934 // 4. ANNOTATE_CONDVAR_SIGNAL ------------> a. MU.LockWhen(COND == 1)
935 // 5. MU.Unlock() b. MU.Unlock()
936 // c. read(GLOB)
938 int GLOB = 0;
939 Mutex MU;
941 void Waker() {
942 GLOB = 2;
944 MU.Lock();
945 COND = 1;
946 ANNOTATE_CONDVAR_SIGNAL(&MU);
947 MU.Unlock();
950 void Waiter() {
951 MU.LockWhen(Condition<int>(&ArgIsOne, &COND));
952 MU.Unlock();
953 CHECK(GLOB != 777);
957 void Run() {
958 COND = 0;
959 printf("test15: negative\n");
960 MyThreadArray t(Waker, Waiter, Waiter);
961 t.Start();
962 t.Join();
963 printf("\tGLOB=%d\n", GLOB);
965 REGISTER_TEST(Run, 15);
966 } // namespace test15
969 // test16: FP. Barrier (emulated by CV), 2 threads. {{{1
970 namespace test16 {
971 // Worker1: Worker2:
972 // 1. MU.Lock() a. MU.Lock()
973 // 2. write(GLOB) <------------ MU ----------> b. write(GLOB)
974 // 3. MU.Unlock() c. MU.Unlock()
975 // 4. MU2.Lock() d. MU2.Lock()
976 // 5. COND-- e. COND--
977 // 6. ANNOTATE_CONDVAR_SIGNAL(MU2) ---->V .
978 // 7. MU2.Await(COND == 0) <------------+------ f. ANNOTATE_CONDVAR_SIGNAL(MU2)
979 // 8. MU2.Unlock() V-----> g. MU2.Await(COND == 0)
980 // 9. read(GLOB) h. MU2.Unlock()
981 // i. read(GLOB)
984 // TODO: This way we may create too many edges in happens-before graph.
985 // Arndt Mühlenfeld in his PhD (TODO: link) suggests creating special nodes in
986 // happens-before graph to reduce the total number of edges.
987 // See figure 3.14.
990 int GLOB = 0;
991 Mutex MU;
992 Mutex MU2;
994 void Worker() {
995 MU.Lock();
996 GLOB++;
997 MU.Unlock();
999 MU2.Lock();
1000 COND--;
1001 ANNOTATE_CONDVAR_SIGNAL(&MU2);
1002 MU2.Await(Condition<int>(&ArgIsZero, &COND));
1003 MU2.Unlock();
1005 CHECK(GLOB == 2);
1008 void Run() {
1009 // ANNOTATE_EXPECT_RACE(&GLOB, "test16. FP. Fixed by MSMProp1 + Barrier support.");
1010 COND = 2;
1011 printf("test16: negative\n");
1012 MyThreadArray t(Worker, Worker);
1013 t.Start();
1014 t.Join();
1015 printf("\tGLOB=%d\n", GLOB);
1017 REGISTER_TEST2(Run, 16, FEATURE|NEEDS_ANNOTATIONS);
1018 } // namespace test16
1021 // test17: FP. Barrier (emulated by CV), 3 threads. {{{1
1022 namespace test17 {
1023 // Same as test16, but with 3 threads.
1024 int GLOB = 0;
1025 Mutex MU;
1026 Mutex MU2;
1028 void Worker() {
1029 MU.Lock();
1030 GLOB++;
1031 MU.Unlock();
1033 MU2.Lock();
1034 COND--;
1035 ANNOTATE_CONDVAR_SIGNAL(&MU2);
1036 MU2.Await(Condition<int>(&ArgIsZero, &COND));
1037 MU2.Unlock();
1039 CHECK(GLOB == 3);
1042 void Run() {
1043 // ANNOTATE_EXPECT_RACE(&GLOB, "test17. FP. Fixed by MSMProp1 + Barrier support.");
1044 COND = 3;
1045 printf("test17: negative\n");
1046 MyThreadArray t(Worker, Worker, Worker);
1047 t.Start();
1048 t.Join();
1049 printf("\tGLOB=%d\n", GLOB);
1051 REGISTER_TEST2(Run, 17, FEATURE|NEEDS_ANNOTATIONS);
1052 } // namespace test17
1055 // test18: TN. Synchronization via Await(), signaller gets there first. {{{1
1056 namespace test18 {
1057 int GLOB = 0;
1058 Mutex MU;
1059 // Same as test03, but uses Mutex::Await() instead of Mutex::LockWhen().
1061 void Waker() {
1062 usleep(100000); // Make sure the waiter blocks.
1063 GLOB = 1;
1065 MU.Lock();
1066 COND = 1; // We are done! Tell the Waiter.
1067 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1069 void Waiter() {
1070 ThreadPool pool(1);
1071 pool.StartWorkers();
1072 COND = 0;
1073 pool.Add(NewCallback(Waker));
1075 MU.Lock();
1076 MU.Await(Condition<int>(&ArgIsOne, &COND)); // calls ANNOTATE_CONDVAR_WAIT
1077 MU.Unlock(); // Waker is done!
1079 GLOB = 2;
1081 void Run() {
1082 printf("test18: negative\n");
1083 Waiter();
1084 printf("\tGLOB=%d\n", GLOB);
1086 REGISTER_TEST2(Run, 18, FEATURE|NEEDS_ANNOTATIONS);
1087 } // namespace test18
1089 // test19: TN. Synchronization via AwaitWithTimeout(). {{{1
1090 namespace test19 {
1091 int GLOB = 0;
1092 // Same as test18, but with AwaitWithTimeout. Do not timeout.
1093 Mutex MU;
1094 void Waker() {
1095 usleep(100000); // Make sure the waiter blocks.
1096 GLOB = 1;
1098 MU.Lock();
1099 COND = 1; // We are done! Tell the Waiter.
1100 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1102 void Waiter() {
1103 ThreadPool pool(1);
1104 pool.StartWorkers();
1105 COND = 0;
1106 pool.Add(NewCallback(Waker));
1108 MU.Lock();
1109 CHECK(MU.AwaitWithTimeout(Condition<int>(&ArgIsOne, &COND), INT_MAX));
1110 MU.Unlock();
1112 GLOB = 2;
1114 void Run() {
1115 printf("test19: negative\n");
1116 Waiter();
1117 printf("\tGLOB=%d\n", GLOB);
1119 REGISTER_TEST2(Run, 19, FEATURE|NEEDS_ANNOTATIONS);
1120 } // namespace test19
1122 // test20: TP. Incorrect synchronization via AwaitWhen(), timeout. {{{1
1123 namespace test20 {
1124 int GLOB = 0;
1125 Mutex MU;
1126 // True race. We timeout in AwaitWhen.
1127 void Waker() {
1128 GLOB = 1;
1129 usleep(100 * 1000);
1131 void Waiter() {
1132 ThreadPool pool(1);
1133 pool.StartWorkers();
1134 COND = 0;
1135 pool.Add(NewCallback(Waker));
1137 MU.Lock();
1138 CHECK(!MU.AwaitWithTimeout(Condition<int>(&ArgIsOne, &COND), 100));
1139 MU.Unlock();
1141 GLOB = 2;
1143 void Run() {
1144 FAST_MODE_INIT(&GLOB);
1145 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test20. TP.");
1146 printf("test20: positive\n");
1147 Waiter();
1148 printf("\tGLOB=%d\n", GLOB);
1150 REGISTER_TEST2(Run, 20, FEATURE|NEEDS_ANNOTATIONS);
1151 } // namespace test20
1153 // test21: TP. Incorrect synchronization via LockWhenWithTimeout(). {{{1
1154 namespace test21 {
1155 int GLOB = 0;
1156 // True race. We timeout in LockWhenWithTimeout().
1157 Mutex MU;
1158 void Waker() {
1159 GLOB = 1;
1160 usleep(100 * 1000);
1162 void Waiter() {
1163 ThreadPool pool(1);
1164 pool.StartWorkers();
1165 COND = 0;
1166 pool.Add(NewCallback(Waker));
1168 CHECK(!MU.LockWhenWithTimeout(Condition<int>(&ArgIsOne, &COND), 100));
1169 MU.Unlock();
1171 GLOB = 2;
1173 void Run() {
1174 FAST_MODE_INIT(&GLOB);
1175 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test21. TP.");
1176 printf("test21: positive\n");
1177 Waiter();
1178 printf("\tGLOB=%d\n", GLOB);
1180 REGISTER_TEST2(Run, 21, FEATURE|NEEDS_ANNOTATIONS);
1181 } // namespace test21
1183 // test22: TP. Incorrect synchronization via CondVar::WaitWithTimeout(). {{{1
1184 namespace test22 {
1185 int GLOB = 0;
1186 Mutex MU;
1187 // True race. We timeout in CondVar::WaitWithTimeout().
1188 void Waker() {
1189 GLOB = 1;
1190 usleep(100 * 1000);
1192 void Waiter() {
1193 ThreadPool pool(1);
1194 pool.StartWorkers();
1195 COND = 0;
1196 pool.Add(NewCallback(Waker));
1198 int64_t ms_left_to_wait = 100;
1199 int64_t deadline_ms = GetCurrentTimeMillis() + ms_left_to_wait;
1200 MU.Lock();
1201 while(COND != 1 && ms_left_to_wait > 0) {
1202 CV.WaitWithTimeout(&MU, ms_left_to_wait);
1203 ms_left_to_wait = deadline_ms - GetCurrentTimeMillis();
1205 MU.Unlock();
1207 GLOB = 2;
1209 void Run() {
1210 FAST_MODE_INIT(&GLOB);
1211 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test22. TP.");
1212 printf("test22: positive\n");
1213 Waiter();
1214 printf("\tGLOB=%d\n", GLOB);
1216 REGISTER_TEST(Run, 22);
1217 } // namespace test22
1219 // test23: TN. TryLock, ReaderLock, ReaderTryLock. {{{1
1220 namespace test23 {
1221 // Correct synchronization with TryLock, Lock, ReaderTryLock, ReaderLock.
1222 int GLOB = 0;
1223 Mutex MU;
1224 void Worker_TryLock() {
1225 for (int i = 0; i < 20; i++) {
1226 while (true) {
1227 if (MU.TryLock()) {
1228 GLOB++;
1229 MU.Unlock();
1230 break;
1232 usleep(1000);
1237 void Worker_ReaderTryLock() {
1238 for (int i = 0; i < 20; i++) {
1239 while (true) {
1240 if (MU.ReaderTryLock()) {
1241 CHECK(GLOB != 777);
1242 MU.ReaderUnlock();
1243 break;
1245 usleep(1000);
1250 void Worker_ReaderLock() {
1251 for (int i = 0; i < 20; i++) {
1252 MU.ReaderLock();
1253 CHECK(GLOB != 777);
1254 MU.ReaderUnlock();
1255 usleep(1000);
1259 void Worker_Lock() {
1260 for (int i = 0; i < 20; i++) {
1261 MU.Lock();
1262 GLOB++;
1263 MU.Unlock();
1264 usleep(1000);
1268 void Run() {
1269 printf("test23: negative\n");
1270 MyThreadArray t(Worker_TryLock,
1271 Worker_ReaderTryLock,
1272 Worker_ReaderLock,
1273 Worker_Lock
1275 t.Start();
1276 t.Join();
1277 printf("\tGLOB=%d\n", GLOB);
1279 REGISTER_TEST(Run, 23);
1280 } // namespace test23
1282 // test24: TN. Synchronization via ReaderLockWhen(). {{{1
1283 namespace test24 {
1284 int GLOB = 0;
1285 Mutex MU;
1286 // Same as test03, but uses ReaderLockWhen().
1288 void Waker() {
1289 usleep(100000); // Make sure the waiter blocks.
1290 GLOB = 1;
1292 MU.Lock();
1293 COND = 1; // We are done! Tell the Waiter.
1294 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1296 void Waiter() {
1297 ThreadPool pool(1);
1298 pool.StartWorkers();
1299 COND = 0;
1300 pool.Add(NewCallback(Waker));
1301 MU.ReaderLockWhen(Condition<int>(&ArgIsOne, &COND));
1302 MU.ReaderUnlock();
1304 GLOB = 2;
1306 void Run() {
1307 printf("test24: negative\n");
1308 Waiter();
1309 printf("\tGLOB=%d\n", GLOB);
1311 REGISTER_TEST2(Run, 24, FEATURE|NEEDS_ANNOTATIONS);
1312 } // namespace test24
1314 // test25: TN. Synchronization via ReaderLockWhenWithTimeout(). {{{1
1315 namespace test25 {
1316 int GLOB = 0;
1317 Mutex MU;
1318 // Same as test24, but uses ReaderLockWhenWithTimeout().
1319 // We do not timeout.
1321 void Waker() {
1322 usleep(100000); // Make sure the waiter blocks.
1323 GLOB = 1;
1325 MU.Lock();
1326 COND = 1; // We are done! Tell the Waiter.
1327 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1329 void Waiter() {
1330 ThreadPool pool(1);
1331 pool.StartWorkers();
1332 COND = 0;
1333 pool.Add(NewCallback(Waker));
1334 CHECK(MU.ReaderLockWhenWithTimeout(Condition<int>(&ArgIsOne, &COND), INT_MAX));
1335 MU.ReaderUnlock();
1337 GLOB = 2;
1339 void Run() {
1340 printf("test25: negative\n");
1341 Waiter();
1342 printf("\tGLOB=%d\n", GLOB);
1344 REGISTER_TEST2(Run, 25, FEATURE|NEEDS_ANNOTATIONS);
1345 } // namespace test25
1347 // test26: TP. Incorrect synchronization via ReaderLockWhenWithTimeout(). {{{1
1348 namespace test26 {
1349 int GLOB = 0;
1350 Mutex MU;
1351 // Same as test25, but we timeout and incorrectly assume happens-before.
1353 void Waker() {
1354 GLOB = 1;
1355 usleep(10000);
1357 void Waiter() {
1358 ThreadPool pool(1);
1359 pool.StartWorkers();
1360 COND = 0;
1361 pool.Add(NewCallback(Waker));
1362 CHECK(!MU.ReaderLockWhenWithTimeout(Condition<int>(&ArgIsOne, &COND), 100));
1363 MU.ReaderUnlock();
1365 GLOB = 2;
1367 void Run() {
1368 FAST_MODE_INIT(&GLOB);
1369 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test26. TP");
1370 printf("test26: positive\n");
1371 Waiter();
1372 printf("\tGLOB=%d\n", GLOB);
1374 REGISTER_TEST2(Run, 26, FEATURE|NEEDS_ANNOTATIONS);
1375 } // namespace test26
1378 // test27: TN. Simple synchronization via SpinLock. {{{1
1379 namespace test27 {
1380 #ifndef NO_SPINLOCK
1381 int GLOB = 0;
1382 SpinLock MU;
1383 void Worker() {
1384 MU.Lock();
1385 GLOB++;
1386 MU.Unlock();
1387 usleep(10000);
1390 void Run() {
1391 printf("test27: negative\n");
1392 MyThreadArray t(Worker, Worker, Worker, Worker);
1393 t.Start();
1394 t.Join();
1395 printf("\tGLOB=%d\n", GLOB);
1397 REGISTER_TEST2(Run, 27, FEATURE|NEEDS_ANNOTATIONS);
1398 #endif // NO_SPINLOCK
1399 } // namespace test27
1402 // test28: TN. Synchronization via Mutex, then PCQ. 3 threads {{{1
1403 namespace test28 {
1404 // Putter1: Getter: Putter2:
1405 // 1. MU.Lock() A. MU.Lock()
1406 // 2. write(GLOB) B. write(GLOB)
1407 // 3. MU.Unlock() C. MU.Unlock()
1408 // 4. Q.Put() ---------\ /------- D. Q.Put()
1409 // 5. MU.Lock() \-------> a. Q.Get() / E. MU.Lock()
1410 // 6. read(GLOB) b. Q.Get() <---------/ F. read(GLOB)
1411 // 7. MU.Unlock() (sleep) G. MU.Unlock()
1412 // c. read(GLOB)
1413 ProducerConsumerQueue Q(INT_MAX);
1414 int GLOB = 0;
1415 Mutex MU;
1417 void Putter() {
1418 MU.Lock();
1419 GLOB++;
1420 MU.Unlock();
1422 Q.Put(NULL);
1424 MU.Lock();
1425 CHECK(GLOB != 777);
1426 MU.Unlock();
1429 void Getter() {
1430 Q.Get();
1431 Q.Get();
1432 usleep(100000);
1433 CHECK(GLOB == 2);
1436 void Run() {
1437 printf("test28: negative\n");
1438 MyThreadArray t(Getter, Putter, Putter);
1439 t.Start();
1440 t.Join();
1441 printf("\tGLOB=%d\n", GLOB);
1443 REGISTER_TEST(Run, 28);
1444 } // namespace test28
1447 // test29: TN. Synchronization via Mutex, then PCQ. 4 threads. {{{1
1448 namespace test29 {
1449 // Similar to test28, but has two Getters and two PCQs.
1450 ProducerConsumerQueue *Q1, *Q2;
1451 Mutex MU;
1452 int GLOB = 0;
1454 void Putter(ProducerConsumerQueue *q) {
1455 MU.Lock();
1456 GLOB++;
1457 MU.Unlock();
1459 q->Put(NULL);
1460 q->Put(NULL);
1462 MU.Lock();
1463 CHECK(GLOB != 777);
1464 MU.Unlock();
1468 void Putter1() { Putter(Q1); }
1469 void Putter2() { Putter(Q2); }
1471 void Getter() {
1472 Q1->Get();
1473 Q2->Get();
1474 usleep(100000);
1475 CHECK(GLOB == 2);
1476 usleep(48000); // TODO: remove this when FP in test32 is fixed.
1479 void Run() {
1480 printf("test29: negative\n");
1481 Q1 = new ProducerConsumerQueue(INT_MAX);
1482 Q2 = new ProducerConsumerQueue(INT_MAX);
1483 MyThreadArray t(Getter, Getter, Putter1, Putter2);
1484 t.Start();
1485 t.Join();
1486 printf("\tGLOB=%d\n", GLOB);
1487 delete Q1;
1488 delete Q2;
1490 REGISTER_TEST(Run, 29);
1491 } // namespace test29
1494 // test30: TN. Synchronization via 'safe' race. Writer vs multiple Readers. {{{1
1495 namespace test30 {
1496 // This test shows a very risky kind of synchronization which is very easy
1497 // to get wrong. Actually, I am not sure I've got it right.
1499 // Writer: Reader1, Reader2, ..., ReaderN:
1500 // 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY
1501 // 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n)
1502 // 3. BOUNDARY++; c. read(GLOB[i]: i < n)
1504 // Here we have a 'safe' race on accesses to BOUNDARY and
1505 // no actual races on accesses to GLOB[]:
1506 // Writer writes to GLOB[i] where i>=BOUNDARY and then increments BOUNDARY.
1507 // Readers read BOUNDARY and read GLOB[i] where i<BOUNDARY.
1509 // I am not completely sure that this scheme guaranties no race between
1510 // accesses to GLOB since compilers and CPUs
1511 // are free to rearrange memory operations.
1512 // I am actually sure that this scheme is wrong unless we use
1513 // some smart memory fencing...
1516 const int N = 48;
1517 static int GLOB[N];
1518 volatile int BOUNDARY = 0;
1520 void Writer() {
1521 for (int i = 0; i < N; i++) {
1522 CHECK(BOUNDARY == i);
1523 for (int j = i; j < N; j++) {
1524 GLOB[j] = j;
1526 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY+1));
1527 BOUNDARY++;
1528 usleep(1000);
1532 void Reader() {
1533 int n;
1534 do {
1535 n = BOUNDARY;
1536 if (n == 0) continue;
1537 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n));
1538 for (int i = 0; i < n; i++) {
1539 CHECK(GLOB[i] == i);
1541 usleep(100);
1542 } while(n < N);
1545 void Run() {
1546 FAST_MODE_INIT(&BOUNDARY);
1547 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY), "test30. Sync via 'safe' race.");
1548 printf("test30: negative\n");
1549 MyThreadArray t(Writer, Reader, Reader, Reader);
1550 t.Start();
1551 t.Join();
1552 printf("\tGLOB=%d\n", GLOB[N-1]);
1554 REGISTER_TEST2(Run, 30, FEATURE|NEEDS_ANNOTATIONS);
1555 } // namespace test30
1558 // test31: TN. Synchronization via 'safe' race. Writer vs Writer. {{{1
1559 namespace test31 {
1560 // This test is similar to test30, but
1561 // it has one Writer instead of mulitple Readers.
1563 // Writer1: Writer2
1564 // 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY
1565 // 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n)
1566 // 3. BOUNDARY++; c. write(GLOB[i]: i < n)
1569 const int N = 48;
1570 static int GLOB[N];
1571 volatile int BOUNDARY = 0;
1573 void Writer1() {
1574 for (int i = 0; i < N; i++) {
1575 CHECK(BOUNDARY == i);
1576 for (int j = i; j < N; j++) {
1577 GLOB[j] = j;
1579 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY+1));
1580 BOUNDARY++;
1581 usleep(1000);
1585 void Writer2() {
1586 int n;
1587 do {
1588 n = BOUNDARY;
1589 if (n == 0) continue;
1590 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n));
1591 for (int i = 0; i < n; i++) {
1592 if(GLOB[i] == i) {
1593 GLOB[i]++;
1596 usleep(100);
1597 } while(n < N);
1600 void Run() {
1601 FAST_MODE_INIT(&BOUNDARY);
1602 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY), "test31. Sync via 'safe' race.");
1603 printf("test31: negative\n");
1604 MyThreadArray t(Writer1, Writer2);
1605 t.Start();
1606 t.Join();
1607 printf("\tGLOB=%d\n", GLOB[N-1]);
1609 REGISTER_TEST2(Run, 31, FEATURE|NEEDS_ANNOTATIONS);
1610 } // namespace test31
1613 // test32: FP. Synchronization via thread create/join. W/R. {{{1
1614 namespace test32 {
1615 // This test is well synchronized but helgrind 3.3.0 reports a race.
1617 // Parent: Writer: Reader:
1618 // 1. Start(Reader) -----------------------\ .
1619 // \ .
1620 // 2. Start(Writer) ---\ \ .
1621 // \---> a. MU.Lock() \--> A. sleep(long enough)
1622 // b. write(GLOB)
1623 // /---- c. MU.Unlock()
1624 // 3. Join(Writer) <---/
1625 // B. MU.Lock()
1626 // C. read(GLOB)
1627 // /------------ D. MU.Unlock()
1628 // 4. Join(Reader) <----------------/
1629 // 5. write(GLOB)
1632 // The call to sleep() in Reader is not part of synchronization,
1633 // it is required to trigger the false positive in helgrind 3.3.0.
1635 int GLOB = 0;
1636 Mutex MU;
1638 void Writer() {
1639 MU.Lock();
1640 GLOB = 1;
1641 MU.Unlock();
1644 void Reader() {
1645 usleep(480000);
1646 MU.Lock();
1647 CHECK(GLOB != 777);
1648 MU.Unlock();
1651 void Parent() {
1652 MyThread r(Reader);
1653 MyThread w(Writer);
1654 r.Start();
1655 w.Start();
1657 w.Join(); // 'w' joins first.
1658 r.Join();
1660 GLOB = 2;
1663 void Run() {
1664 // ANNOTATE_EXPECT_RACE(&GLOB, "test32. FP. Fixed by MSMProp1.");
1665 printf("test32: negative\n");
1666 Parent();
1667 printf("\tGLOB=%d\n", GLOB);
1670 REGISTER_TEST(Run, 32);
1671 } // namespace test32
1674 // test33: STAB. Stress test for the number of thread sets (TSETs). {{{1
1675 namespace test33 {
1676 int GLOB = 0;
1677 // Here we access N memory locations from within log(N) threads.
1678 // We do it in such a way that helgrind creates nearly all possible TSETs.
1679 // Then we join all threads and start again (N_iter times).
1680 const int N_iter = 48;
1681 const int Nlog = 15;
1682 const int N = 1 << Nlog;
1683 static int ARR[N];
1684 Mutex MU;
1686 void Worker() {
1687 MU.Lock();
1688 int n = ++GLOB;
1689 MU.Unlock();
1691 n %= Nlog;
1692 for (int i = 0; i < N; i++) {
1693 // ARR[i] is accessed by threads from i-th subset
1694 if (i & (1 << n)) {
1695 CHECK(ARR[i] == 0);
1700 void Run() {
1701 printf("test33:\n");
1703 std::vector<MyThread*> vec(Nlog);
1705 for (int j = 0; j < N_iter; j++) {
1706 // Create and start Nlog threads
1707 for (int i = 0; i < Nlog; i++) {
1708 vec[i] = new MyThread(Worker);
1710 for (int i = 0; i < Nlog; i++) {
1711 vec[i]->Start();
1713 // Join all threads.
1714 for (int i = 0; i < Nlog; i++) {
1715 vec[i]->Join();
1716 delete vec[i];
1718 printf("------------------\n");
1721 printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
1722 GLOB, ARR[1], ARR[7], ARR[N-1]);
1724 REGISTER_TEST2(Run, 33, STABILITY|EXCLUDE_FROM_ALL);
1725 } // namespace test33
1728 // test34: STAB. Stress test for the number of locks sets (LSETs). {{{1
1729 namespace test34 {
1730 // Similar to test33, but for lock sets.
1731 int GLOB = 0;
1732 const int N_iter = 48;
1733 const int Nlog = 10;
1734 const int N = 1 << Nlog;
1735 static int ARR[N];
1736 static Mutex *MUs[Nlog];
1738 void Worker() {
1739 for (int i = 0; i < N; i++) {
1740 // ARR[i] is protected by MUs from i-th subset of all MUs
1741 for (int j = 0; j < Nlog; j++) if (i & (1 << j)) MUs[j]->Lock();
1742 CHECK(ARR[i] == 0);
1743 for (int j = 0; j < Nlog; j++) if (i & (1 << j)) MUs[j]->Unlock();
1747 void Run() {
1748 printf("test34:\n");
1749 for (int iter = 0; iter < N_iter; iter++) {
1750 for (int i = 0; i < Nlog; i++) {
1751 MUs[i] = new Mutex;
1753 MyThreadArray t(Worker, Worker);
1754 t.Start();
1755 t.Join();
1756 for (int i = 0; i < Nlog; i++) {
1757 delete MUs[i];
1759 printf("------------------\n");
1761 printf("\tGLOB=%d\n", GLOB);
1763 REGISTER_TEST2(Run, 34, STABILITY|EXCLUDE_FROM_ALL);
1764 } // namespace test34
1767 // test35: PERF. Lots of mutexes and lots of call to free(). {{{1
1768 namespace test35 {
1769 // Helgrind 3.3.0 has very slow in shadow_mem_make_NoAccess(). Fixed locally.
1770 // With the fix helgrind runs this test about a minute.
1771 // Without the fix -- about 5 minutes. (on c2d 2.4GHz).
1773 // TODO: need to figure out the best way for performance testing.
1774 int **ARR;
1775 const int N_mu = 25000;
1776 const int N_free = 48000;
1778 void Worker() {
1779 for (int i = 0; i < N_free; i++)
1780 CHECK(777 == *ARR[i]);
1783 void Run() {
1784 printf("test35:\n");
1785 std::vector<Mutex*> mus;
1787 ARR = new int *[N_free];
1788 for (int i = 0; i < N_free; i++) {
1789 const int c = N_free / N_mu;
1790 if ((i % c) == 0) {
1791 mus.push_back(new Mutex);
1792 mus.back()->Lock();
1793 mus.back()->Unlock();
1795 ARR[i] = new int(777);
1798 // Need to put all ARR[i] into shared state in order
1799 // to trigger the performance bug.
1800 MyThreadArray t(Worker, Worker);
1801 t.Start();
1802 t.Join();
1804 for (int i = 0; i < N_free; i++) delete ARR[i];
1805 delete [] ARR;
1807 for (size_t i = 0; i < mus.size(); i++) {
1808 delete mus[i];
1811 REGISTER_TEST2(Run, 35, PERFORMANCE|EXCLUDE_FROM_ALL);
1812 } // namespace test35
1815 // test36: TN. Synchronization via Mutex, then PCQ. 3 threads. W/W {{{1
1816 namespace test36 {
1817 // variation of test28 (W/W instead of W/R)
1819 // Putter1: Getter: Putter2:
1820 // 1. MU.Lock(); A. MU.Lock()
1821 // 2. write(GLOB) B. write(GLOB)
1822 // 3. MU.Unlock() C. MU.Unlock()
1823 // 4. Q.Put() ---------\ /------- D. Q.Put()
1824 // 5. MU1.Lock() \-------> a. Q.Get() / E. MU1.Lock()
1825 // 6. MU.Lock() b. Q.Get() <---------/ F. MU.Lock()
1826 // 7. write(GLOB) G. write(GLOB)
1827 // 8. MU.Unlock() H. MU.Unlock()
1828 // 9. MU1.Unlock() (sleep) I. MU1.Unlock()
1829 // c. MU1.Lock()
1830 // d. write(GLOB)
1831 // e. MU1.Unlock()
1832 ProducerConsumerQueue Q(INT_MAX);
1833 int GLOB = 0;
1834 Mutex MU, MU1;
1836 void Putter() {
1837 MU.Lock();
1838 GLOB++;
1839 MU.Unlock();
1841 Q.Put(NULL);
1843 MU1.Lock();
1844 MU.Lock();
1845 GLOB++;
1846 MU.Unlock();
1847 MU1.Unlock();
1850 void Getter() {
1851 Q.Get();
1852 Q.Get();
1853 usleep(100000);
1854 MU1.Lock();
1855 GLOB++;
1856 MU1.Unlock();
1859 void Run() {
1860 printf("test36: negative \n");
1861 MyThreadArray t(Getter, Putter, Putter);
1862 t.Start();
1863 t.Join();
1864 printf("\tGLOB=%d\n", GLOB);
1866 REGISTER_TEST(Run, 36);
1867 } // namespace test36
1870 // test37: TN. Simple synchronization (write vs read). {{{1
1871 namespace test37 {
1872 int GLOB = 0;
1873 Mutex MU;
1874 // Similar to test10, but properly locked.
1875 // Writer: Reader:
1876 // 1. MU.Lock()
1877 // 2. write
1878 // 3. MU.Unlock()
1879 // a. MU.Lock()
1880 // b. read
1881 // c. MU.Unlock();
1883 void Writer() {
1884 MU.Lock();
1885 GLOB = 3;
1886 MU.Unlock();
1888 void Reader() {
1889 usleep(100000);
1890 MU.Lock();
1891 CHECK(GLOB != -777);
1892 MU.Unlock();
1895 void Run() {
1896 printf("test37: negative\n");
1897 MyThreadArray t(Writer, Reader);
1898 t.Start();
1899 t.Join();
1900 printf("\tGLOB=%d\n", GLOB);
1902 REGISTER_TEST(Run, 37);
1903 } // namespace test37
1906 // test38: TN. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1
1907 namespace test38 {
1908 // Fusion of test29 and test36.
1910 // Putter1: Putter2: Getter1: Getter2:
1911 // MU1.Lock() MU1.Lock()
1912 // write(GLOB) write(GLOB)
1913 // MU1.Unlock() MU1.Unlock()
1914 // Q1.Put() Q2.Put()
1915 // Q1.Put() Q2.Put()
1916 // MU1.Lock() MU1.Lock()
1917 // MU2.Lock() MU2.Lock()
1918 // write(GLOB) write(GLOB)
1919 // MU2.Unlock() MU2.Unlock()
1920 // MU1.Unlock() MU1.Unlock() sleep sleep
1921 // Q1.Get() Q1.Get()
1922 // Q2.Get() Q2.Get()
1923 // MU2.Lock() MU2.Lock()
1924 // write(GLOB) write(GLOB)
1925 // MU2.Unlock() MU2.Unlock()
1929 ProducerConsumerQueue *Q1, *Q2;
1930 int GLOB = 0;
1931 Mutex MU, MU1, MU2;
1933 void Putter(ProducerConsumerQueue *q) {
1934 MU1.Lock();
1935 GLOB++;
1936 MU1.Unlock();
1938 q->Put(NULL);
1939 q->Put(NULL);
1941 MU1.Lock();
1942 MU2.Lock();
1943 GLOB++;
1944 MU2.Unlock();
1945 MU1.Unlock();
1949 void Putter1() { Putter(Q1); }
1950 void Putter2() { Putter(Q2); }
1952 void Getter() {
1953 usleep(100000);
1954 Q1->Get();
1955 Q2->Get();
1957 MU2.Lock();
1958 GLOB++;
1959 MU2.Unlock();
1961 usleep(48000); // TODO: remove this when FP in test32 is fixed.
1964 void Run() {
1965 printf("test38: negative\n");
1966 Q1 = new ProducerConsumerQueue(INT_MAX);
1967 Q2 = new ProducerConsumerQueue(INT_MAX);
1968 MyThreadArray t(Getter, Getter, Putter1, Putter2);
1969 t.Start();
1970 t.Join();
1971 printf("\tGLOB=%d\n", GLOB);
1972 delete Q1;
1973 delete Q2;
1975 REGISTER_TEST(Run, 38);
1976 } // namespace test38
1978 // test39: FP. Barrier. {{{1
1979 namespace test39 {
1980 #ifndef NO_BARRIER
1981 // Same as test17 but uses Barrier class (pthread_barrier_t).
1982 int GLOB = 0;
1983 const int N_threads = 3;
1984 Barrier barrier(N_threads);
1985 Mutex MU;
1987 void Worker() {
1988 MU.Lock();
1989 GLOB++;
1990 MU.Unlock();
1991 barrier.Block();
1992 CHECK(GLOB == N_threads);
1994 void Run() {
1995 ANNOTATE_TRACE_MEMORY(&GLOB);
1996 // ANNOTATE_EXPECT_RACE(&GLOB, "test39. FP. Fixed by MSMProp1. Barrier.");
1997 printf("test39: negative\n");
1999 ThreadPool pool(N_threads);
2000 pool.StartWorkers();
2001 for (int i = 0; i < N_threads; i++) {
2002 pool.Add(NewCallback(Worker));
2004 } // all folks are joined here.
2005 printf("\tGLOB=%d\n", GLOB);
2007 REGISTER_TEST(Run, 39);
2008 #endif // NO_BARRIER
2009 } // namespace test39
2012 // test40: FP. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1
2013 namespace test40 {
2014 // Similar to test38 but with different order of events (due to sleep).
2016 // Putter1: Putter2: Getter1: Getter2:
2017 // MU1.Lock() MU1.Lock()
2018 // write(GLOB) write(GLOB)
2019 // MU1.Unlock() MU1.Unlock()
2020 // Q1.Put() Q2.Put()
2021 // Q1.Put() Q2.Put()
2022 // Q1.Get() Q1.Get()
2023 // Q2.Get() Q2.Get()
2024 // MU2.Lock() MU2.Lock()
2025 // write(GLOB) write(GLOB)
2026 // MU2.Unlock() MU2.Unlock()
2028 // MU1.Lock() MU1.Lock()
2029 // MU2.Lock() MU2.Lock()
2030 // write(GLOB) write(GLOB)
2031 // MU2.Unlock() MU2.Unlock()
2032 // MU1.Unlock() MU1.Unlock()
2035 ProducerConsumerQueue *Q1, *Q2;
2036 int GLOB = 0;
2037 Mutex MU, MU1, MU2;
2039 void Putter(ProducerConsumerQueue *q) {
2040 MU1.Lock();
2041 GLOB++;
2042 MU1.Unlock();
2044 q->Put(NULL);
2045 q->Put(NULL);
2046 usleep(100000);
2048 MU1.Lock();
2049 MU2.Lock();
2050 GLOB++;
2051 MU2.Unlock();
2052 MU1.Unlock();
2056 void Putter1() { Putter(Q1); }
2057 void Putter2() { Putter(Q2); }
2059 void Getter() {
2060 Q1->Get();
2061 Q2->Get();
2063 MU2.Lock();
2064 GLOB++;
2065 MU2.Unlock();
2067 usleep(48000); // TODO: remove this when FP in test32 is fixed.
2070 void Run() {
2071 // ANNOTATE_EXPECT_RACE(&GLOB, "test40. FP. Fixed by MSMProp1. Complex Stuff.");
2072 printf("test40: negative\n");
2073 Q1 = new ProducerConsumerQueue(INT_MAX);
2074 Q2 = new ProducerConsumerQueue(INT_MAX);
2075 MyThreadArray t(Getter, Getter, Putter1, Putter2);
2076 t.Start();
2077 t.Join();
2078 printf("\tGLOB=%d\n", GLOB);
2079 delete Q1;
2080 delete Q2;
2082 REGISTER_TEST(Run, 40);
2083 } // namespace test40
2085 // test41: TN. Test for race that appears when loading a dynamic symbol. {{{1
2086 namespace test41 {
2087 void Worker() {
2088 ANNOTATE_NO_OP(NULL); // An empty function, loaded from dll.
2090 void Run() {
2091 printf("test41: negative\n");
2092 MyThreadArray t(Worker, Worker, Worker);
2093 t.Start();
2094 t.Join();
2096 REGISTER_TEST2(Run, 41, FEATURE|NEEDS_ANNOTATIONS);
2097 } // namespace test41
2100 // test42: TN. Using the same cond var several times. {{{1
2101 namespace test42 {
2102 int GLOB = 0;
2103 int COND = 0;
2104 int N_threads = 3;
2105 Mutex MU;
2107 void Worker1() {
2108 GLOB=1;
2110 MU.Lock();
2111 COND = 1;
2112 CV.Signal();
2113 MU.Unlock();
2115 MU.Lock();
2116 while (COND != 0)
2117 CV.Wait(&MU);
2118 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2119 MU.Unlock();
2121 GLOB=3;
2125 void Worker2() {
2127 MU.Lock();
2128 while (COND != 1)
2129 CV.Wait(&MU);
2130 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2131 MU.Unlock();
2133 GLOB=2;
2135 MU.Lock();
2136 COND = 0;
2137 CV.Signal();
2138 MU.Unlock();
2142 void Run() {
2143 // ANNOTATE_EXPECT_RACE(&GLOB, "test42. TN. debugging.");
2144 printf("test42: negative\n");
2145 MyThreadArray t(Worker1, Worker2);
2146 t.Start();
2147 t.Join();
2148 printf("\tGLOB=%d\n", GLOB);
2150 REGISTER_TEST2(Run, 42, FEATURE|NEEDS_ANNOTATIONS);
2151 } // namespace test42
2155 // test43: TN. {{{1
2156 namespace test43 {
2158 // Putter: Getter:
2159 // 1. write
2160 // 2. Q.Put() --\ .
2161 // 3. read \--> a. Q.Get()
2162 // b. read
2163 int GLOB = 0;
2164 ProducerConsumerQueue Q(INT_MAX);
2165 void Putter() {
2166 GLOB = 1;
2167 Q.Put(NULL);
2168 CHECK(GLOB == 1);
2170 void Getter() {
2171 Q.Get();
2172 usleep(100000);
2173 CHECK(GLOB == 1);
2175 void Run() {
2176 printf("test43: negative\n");
2177 MyThreadArray t(Putter, Getter);
2178 t.Start();
2179 t.Join();
2180 printf("\tGLOB=%d\n", GLOB);
2182 REGISTER_TEST(Run, 43)
2183 } // namespace test43
2186 // test44: FP. {{{1
2187 namespace test44 {
2189 // Putter: Getter:
2190 // 1. read
2191 // 2. Q.Put() --\ .
2192 // 3. MU.Lock() \--> a. Q.Get()
2193 // 4. write
2194 // 5. MU.Unlock()
2195 // b. MU.Lock()
2196 // c. write
2197 // d. MU.Unlock();
2198 int GLOB = 0;
2199 Mutex MU;
2200 ProducerConsumerQueue Q(INT_MAX);
2201 void Putter() {
2202 CHECK(GLOB == 0);
2203 Q.Put(NULL);
2204 MU.Lock();
2205 GLOB = 1;
2206 MU.Unlock();
2208 void Getter() {
2209 Q.Get();
2210 usleep(100000);
2211 MU.Lock();
2212 GLOB = 1;
2213 MU.Unlock();
2215 void Run() {
2216 // ANNOTATE_EXPECT_RACE(&GLOB, "test44. FP. Fixed by MSMProp1.");
2217 printf("test44: negative\n");
2218 MyThreadArray t(Putter, Getter);
2219 t.Start();
2220 t.Join();
2221 printf("\tGLOB=%d\n", GLOB);
2223 REGISTER_TEST(Run, 44)
2224 } // namespace test44
2227 // test45: TN. {{{1
2228 namespace test45 {
2230 // Putter: Getter:
2231 // 1. read
2232 // 2. Q.Put() --\ .
2233 // 3. MU.Lock() \--> a. Q.Get()
2234 // 4. write
2235 // 5. MU.Unlock()
2236 // b. MU.Lock()
2237 // c. read
2238 // d. MU.Unlock();
2239 int GLOB = 0;
2240 Mutex MU;
2241 ProducerConsumerQueue Q(INT_MAX);
2242 void Putter() {
2243 CHECK(GLOB == 0);
2244 Q.Put(NULL);
2245 MU.Lock();
2246 GLOB++;
2247 MU.Unlock();
2249 void Getter() {
2250 Q.Get();
2251 usleep(100000);
2252 MU.Lock();
2253 CHECK(GLOB <= 1);
2254 MU.Unlock();
2256 void Run() {
2257 printf("test45: negative\n");
2258 MyThreadArray t(Putter, Getter);
2259 t.Start();
2260 t.Join();
2261 printf("\tGLOB=%d\n", GLOB);
2263 REGISTER_TEST(Run, 45)
2264 } // namespace test45
2267 // test46: FN. {{{1
2268 namespace test46 {
2270 // First: Second:
2271 // 1. write
2272 // 2. MU.Lock()
2273 // 3. write
2274 // 4. MU.Unlock() (sleep)
2275 // a. MU.Lock()
2276 // b. write
2277 // c. MU.Unlock();
2278 int GLOB = 0;
2279 Mutex MU;
2280 void First() {
2281 GLOB++;
2282 MU.Lock();
2283 GLOB++;
2284 MU.Unlock();
2286 void Second() {
2287 usleep(480000);
2288 MU.Lock();
2289 GLOB++;
2290 MU.Unlock();
2292 // just a print.
2293 // If we move it to Run() we will get report in MSMHelgrind
2294 // due to its false positive (test32).
2295 MU.Lock();
2296 printf("\tGLOB=%d\n", GLOB);
2297 MU.Unlock();
2299 void Run() {
2300 ANNOTATE_TRACE_MEMORY(&GLOB);
2301 MyThreadArray t(First, Second);
2302 t.Start();
2303 t.Join();
2305 REGISTER_TEST(Run, 46)
2306 } // namespace test46
2309 // test47: TP. Not detected by pure happens-before detectors. {{{1
2310 namespace test47 {
2311 // A true race that can not be detected by a pure happens-before
2312 // race detector.
2314 // First: Second:
2315 // 1. write
2316 // 2. MU.Lock()
2317 // 3. MU.Unlock() (sleep)
2318 // a. MU.Lock()
2319 // b. MU.Unlock();
2320 // c. write
2321 int GLOB = 0;
2322 Mutex MU;
2323 void First() {
2324 GLOB=1;
2325 MU.Lock();
2326 MU.Unlock();
2328 void Second() {
2329 usleep(480000);
2330 MU.Lock();
2331 MU.Unlock();
2332 GLOB++;
2334 void Run() {
2335 FAST_MODE_INIT(&GLOB);
2336 if (!Tsan_PureHappensBefore())
2337 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test47. TP. Not detected by pure HB.");
2338 printf("test47: positive\n");
2339 MyThreadArray t(First, Second);
2340 t.Start();
2341 t.Join();
2342 printf("\tGLOB=%d\n", GLOB);
2344 REGISTER_TEST(Run, 47)
2345 } // namespace test47
2348 // test48: FN. Simple race (single write vs multiple reads). {{{1
2349 namespace test48 {
2350 int GLOB = 0;
2351 // same as test10 but with single writer and multiple readers
2352 // A simple data race between single writer and multiple readers.
2353 // Write happens before Reads (enforced by sleep(1)),
2356 // Writer: Readers:
2357 // 1. write(GLOB) a. sleep(long enough so that GLOB
2358 // is most likely initialized by Writer)
2359 // b. read(GLOB)
2362 // Eraser algorithm does not detect the race here,
2363 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
2365 void Writer() {
2366 GLOB = 3;
2368 void Reader() {
2369 usleep(100000);
2370 CHECK(GLOB != -777);
2373 void Run() {
2374 FAST_MODE_INIT(&GLOB);
2375 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test48. TP. FN in MSMHelgrind.");
2376 printf("test48: positive\n");
2377 MyThreadArray t(Writer, Reader,Reader,Reader);
2378 t.Start();
2379 t.Join();
2380 printf("\tGLOB=%d\n", GLOB);
2382 REGISTER_TEST(Run, 48)
2383 } // namespace test48
2386 // test49: FN. Simple race (single write vs multiple reads). {{{1
2387 namespace test49 {
2388 int GLOB = 0;
2389 // same as test10 but with multiple read operations done by a single reader
2390 // A simple data race between writer and readers.
2391 // Write happens before Read (enforced by sleep(1)),
2393 // Writer: Reader:
2394 // 1. write(GLOB) a. sleep(long enough so that GLOB
2395 // is most likely initialized by Writer)
2396 // b. read(GLOB)
2397 // c. read(GLOB)
2398 // d. read(GLOB)
2399 // e. read(GLOB)
2402 // Eraser algorithm does not detect the race here,
2403 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
2405 void Writer() {
2406 GLOB = 3;
2408 void Reader() {
2409 usleep(100000);
2410 CHECK(GLOB != -777);
2411 CHECK(GLOB != -777);
2412 CHECK(GLOB != -777);
2413 CHECK(GLOB != -777);
2416 void Run() {
2417 FAST_MODE_INIT(&GLOB);
2418 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test49. TP. FN in MSMHelgrind.");
2419 printf("test49: positive\n");
2420 MyThreadArray t(Writer, Reader);
2421 t.Start();
2422 t.Join();
2423 printf("\tGLOB=%d\n", GLOB);
2425 REGISTER_TEST(Run, 49);
2426 } // namespace test49
2429 // test50: TP. Synchronization via CondVar. {{{1
2430 namespace test50 {
2431 int GLOB = 0;
2432 Mutex MU;
2433 // Two last write accesses to GLOB are not synchronized
2435 // Waiter: Waker:
2436 // 1. COND = 0
2437 // 2. Start(Waker)
2438 // 3. MU.Lock() a. write(GLOB)
2439 // b. MU.Lock()
2440 // c. COND = 1
2441 // /--- d. CV.Signal()
2442 // 4. while(COND != 1) / e. MU.Unlock()
2443 // CV.Wait(MU) <---/
2444 // 5. MU.Unlock()
2445 // 6. write(GLOB) f. MU.Lock()
2446 // g. write(GLOB)
2447 // h. MU.Unlock()
2450 void Waker() {
2451 usleep(100000); // Make sure the waiter blocks.
2453 GLOB = 1;
2455 MU.Lock();
2456 COND = 1;
2457 CV.Signal();
2458 MU.Unlock();
2460 usleep(100000);
2461 MU.Lock();
2462 GLOB = 3;
2463 MU.Unlock();
2466 void Waiter() {
2467 ThreadPool pool(1);
2468 pool.StartWorkers();
2469 COND = 0;
2470 pool.Add(NewCallback(Waker));
2472 MU.Lock();
2473 while(COND != 1)
2474 CV.Wait(&MU);
2475 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2476 MU.Unlock();
2478 GLOB = 2;
2480 void Run() {
2481 FAST_MODE_INIT(&GLOB);
2482 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test50. TP.");
2483 printf("test50: positive\n");
2484 Waiter();
2485 printf("\tGLOB=%d\n", GLOB);
2487 REGISTER_TEST2(Run, 50, FEATURE|NEEDS_ANNOTATIONS);
2488 } // namespace test50
2491 // test51: TP. Synchronization via CondVar: problem with several signals. {{{1
2492 namespace test51 {
2493 int GLOB = 0;
2494 int COND = 0;
2495 Mutex MU;
2498 // scheduler dependent results because of several signals
2499 // second signal will be lost
2501 // Waiter: Waker:
2502 // 1. Start(Waker)
2503 // 2. MU.Lock()
2504 // 3. while(COND)
2505 // CV.Wait(MU)<-\ .
2506 // 4. MU.Unlock() \ .
2507 // 5. write(GLOB) \ a. write(GLOB)
2508 // \ b. MU.Lock()
2509 // \ c. COND = 1
2510 // \--- d. CV.Signal()
2511 // e. MU.Unlock()
2513 // f. write(GLOB)
2515 // g. MU.Lock()
2516 // h. COND = 1
2517 // LOST<---- i. CV.Signal()
2518 // j. MU.Unlock()
2520 void Waker() {
2522 usleep(10000); // Make sure the waiter blocks.
2524 GLOB = 1;
2526 MU.Lock();
2527 COND = 1;
2528 CV.Signal();
2529 MU.Unlock();
2531 usleep(10000); // Make sure the waiter is signalled.
2533 GLOB = 2;
2535 MU.Lock();
2536 COND = 1;
2537 CV.Signal(); //Lost Signal
2538 MU.Unlock();
2541 void Waiter() {
2543 ThreadPool pool(1);
2544 pool.StartWorkers();
2545 pool.Add(NewCallback(Waker));
2547 MU.Lock();
2548 while(COND != 1)
2549 CV.Wait(&MU);
2550 MU.Unlock();
2553 GLOB = 3;
2555 void Run() {
2556 FAST_MODE_INIT(&GLOB);
2557 ANNOTATE_EXPECT_RACE(&GLOB, "test51. TP.");
2558 printf("test51: positive\n");
2559 Waiter();
2560 printf("\tGLOB=%d\n", GLOB);
2562 REGISTER_TEST(Run, 51);
2563 } // namespace test51
2566 // test52: TP. Synchronization via CondVar: problem with several signals. {{{1
2567 namespace test52 {
2568 int GLOB = 0;
2569 int COND = 0;
2570 Mutex MU;
2572 // same as test51 but the first signal will be lost
2573 // scheduler dependent results because of several signals
2575 // Waiter: Waker:
2576 // 1. Start(Waker)
2577 // a. write(GLOB)
2578 // b. MU.Lock()
2579 // c. COND = 1
2580 // LOST<---- d. CV.Signal()
2581 // e. MU.Unlock()
2583 // 2. MU.Lock()
2584 // 3. while(COND)
2585 // CV.Wait(MU)<-\ .
2586 // 4. MU.Unlock() \ f. write(GLOB)
2587 // 5. write(GLOB) \ .
2588 // \ g. MU.Lock()
2589 // \ h. COND = 1
2590 // \--- i. CV.Signal()
2591 // j. MU.Unlock()
2593 void Waker() {
2595 GLOB = 1;
2597 MU.Lock();
2598 COND = 1;
2599 CV.Signal(); //lost signal
2600 MU.Unlock();
2602 usleep(20000); // Make sure the waiter blocks
2604 GLOB = 2;
2606 MU.Lock();
2607 COND = 1;
2608 CV.Signal();
2609 MU.Unlock();
2612 void Waiter() {
2613 ThreadPool pool(1);
2614 pool.StartWorkers();
2615 pool.Add(NewCallback(Waker));
2617 usleep(10000); // Make sure the first signal will be lost
2619 MU.Lock();
2620 while(COND != 1)
2621 CV.Wait(&MU);
2622 MU.Unlock();
2624 GLOB = 3;
2626 void Run() {
2627 FAST_MODE_INIT(&GLOB);
2628 ANNOTATE_EXPECT_RACE(&GLOB, "test52. TP.");
2629 printf("test52: positive\n");
2630 Waiter();
2631 printf("\tGLOB=%d\n", GLOB);
2633 REGISTER_TEST(Run, 52);
2634 } // namespace test52
2637 // test53: FP. Synchronization via implicit semaphore. {{{1
2638 namespace test53 {
2639 // Correctly synchronized test, but the common lockset is empty.
2640 // The variable FLAG works as an implicit semaphore.
2641 // MSMHelgrind still does not complain since it does not maintain the lockset
2642 // at the exclusive state. But MSMProp1 does complain.
2643 // See also test54.
2646 // Initializer: Users
2647 // 1. MU1.Lock()
2648 // 2. write(GLOB)
2649 // 3. FLAG = true
2650 // 4. MU1.Unlock()
2651 // a. MU1.Lock()
2652 // b. f = FLAG;
2653 // c. MU1.Unlock()
2654 // d. if (!f) goto a.
2655 // e. MU2.Lock()
2656 // f. write(GLOB)
2657 // g. MU2.Unlock()
2660 int GLOB = 0;
2661 bool FLAG = false;
2662 Mutex MU1, MU2;
2664 void Initializer() {
2665 MU1.Lock();
2666 GLOB = 1000;
2667 FLAG = true;
2668 MU1.Unlock();
2669 usleep(100000); // just in case
2672 void User() {
2673 bool f = false;
2674 while(!f) {
2675 MU1.Lock();
2676 f = FLAG;
2677 MU1.Unlock();
2678 usleep(10000);
2680 // at this point Initializer will not access GLOB again
2681 MU2.Lock();
2682 CHECK(GLOB >= 1000);
2683 GLOB++;
2684 MU2.Unlock();
2687 void Run() {
2688 FAST_MODE_INIT(&GLOB);
2689 if (!Tsan_PureHappensBefore())
2690 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test53. FP. Implicit semaphore");
2691 printf("test53: FP. false positive, Implicit semaphore\n");
2692 MyThreadArray t(Initializer, User, User);
2693 t.Start();
2694 t.Join();
2695 printf("\tGLOB=%d\n", GLOB);
2697 REGISTER_TEST(Run, 53)
2698 } // namespace test53
2701 // test54: TN. Synchronization via implicit semaphore. Annotated {{{1
2702 namespace test54 {
2703 // Same as test53, but annotated.
2704 int GLOB = 0;
2705 bool FLAG = false;
2706 Mutex MU1, MU2;
2708 void Initializer() {
2709 MU1.Lock();
2710 GLOB = 1000;
2711 FLAG = true;
2712 ANNOTATE_CONDVAR_SIGNAL(&GLOB);
2713 MU1.Unlock();
2714 usleep(100000); // just in case
2717 void User() {
2718 bool f = false;
2719 while(!f) {
2720 MU1.Lock();
2721 f = FLAG;
2722 MU1.Unlock();
2723 usleep(10000);
2725 // at this point Initializer will not access GLOB again
2726 ANNOTATE_CONDVAR_WAIT(&GLOB);
2727 MU2.Lock();
2728 CHECK(GLOB >= 1000);
2729 GLOB++;
2730 MU2.Unlock();
2733 void Run() {
2734 printf("test54: negative\n");
2735 MyThreadArray t(Initializer, User, User);
2736 t.Start();
2737 t.Join();
2738 printf("\tGLOB=%d\n", GLOB);
2740 REGISTER_TEST2(Run, 54, FEATURE|NEEDS_ANNOTATIONS)
2741 } // namespace test54
2744 // test55: FP. Synchronization with TryLock. Not easy for race detectors {{{1
2745 namespace test55 {
2746 // "Correct" synchronization with TryLock and Lock.
2748 // This scheme is actually very risky.
2749 // It is covered in detail in this video:
2750 // http://youtube.com/watch?v=mrvAqvtWYb4 (slide 36, near 50-th minute).
2751 int GLOB = 0;
2752 Mutex MU;
2754 void Worker_Lock() {
2755 GLOB = 1;
2756 MU.Lock();
2759 void Worker_TryLock() {
2760 while (true) {
2761 if (!MU.TryLock()) {
2762 MU.Unlock();
2763 break;
2765 else
2766 MU.Unlock();
2767 usleep(100);
2769 GLOB = 2;
2772 void Run() {
2773 printf("test55:\n");
2774 MyThreadArray t(Worker_Lock, Worker_TryLock);
2775 t.Start();
2776 t.Join();
2777 printf("\tGLOB=%d\n", GLOB);
2779 REGISTER_TEST2(Run, 55, FEATURE|EXCLUDE_FROM_ALL);
2780 } // namespace test55
2784 // test56: TP. Use of ANNOTATE_BENIGN_RACE. {{{1
2785 namespace test56 {
2786 // For whatever reason the user wants to treat
2787 // a race on GLOB as a benign race.
2788 int GLOB = 0;
2789 int GLOB2 = 0;
2791 void Worker() {
2792 GLOB++;
2795 void Run() {
2796 ANNOTATE_BENIGN_RACE(&GLOB, "test56. Use of ANNOTATE_BENIGN_RACE.");
2797 ANNOTATE_BENIGN_RACE(&GLOB2, "No race. The tool should be silent");
2798 printf("test56: positive\n");
2799 MyThreadArray t(Worker, Worker, Worker, Worker);
2800 t.Start();
2801 t.Join();
2802 printf("\tGLOB=%d\n", GLOB);
2804 REGISTER_TEST2(Run, 56, FEATURE|NEEDS_ANNOTATIONS)
2805 } // namespace test56
2808 // test57: TN: Correct use of atomics. {{{1
2809 namespace test57 {
2810 int GLOB = 0;
2811 void Writer() {
2812 for (int i = 0; i < 10; i++) {
2813 AtomicIncrement(&GLOB, 1);
2814 usleep(1000);
2817 void Reader() {
2818 while (GLOB < 20) usleep(1000);
2820 void Run() {
2821 printf("test57: negative\n");
2822 MyThreadArray t(Writer, Writer, Reader, Reader);
2823 t.Start();
2824 t.Join();
2825 CHECK(GLOB == 20);
2826 printf("\tGLOB=%d\n", GLOB);
2828 REGISTER_TEST(Run, 57)
2829 } // namespace test57
2832 // test58: TN. User defined synchronization. {{{1
2833 namespace test58 {
2834 int GLOB1 = 1;
2835 int GLOB2 = 2;
2836 int FLAG1 = 0;
2837 int FLAG2 = 0;
2839 // Correctly synchronized test, but the common lockset is empty.
2840 // The variables FLAG1 and FLAG2 used for synchronization and as
2841 // temporary variables for swapping two global values.
2842 // Such kind of synchronization is rarely used (Excluded from all tests??).
2844 void Worker2() {
2845 FLAG1=GLOB2;
2847 while(!FLAG2)
2849 GLOB2=FLAG2;
2852 void Worker1() {
2853 FLAG2=GLOB1;
2855 while(!FLAG1)
2857 GLOB1=FLAG1;
2860 void Run() {
2861 printf("test58:\n");
2862 MyThreadArray t(Worker1, Worker2);
2863 t.Start();
2864 t.Join();
2865 printf("\tGLOB1=%d\n", GLOB1);
2866 printf("\tGLOB2=%d\n", GLOB2);
2868 REGISTER_TEST2(Run, 58, FEATURE|EXCLUDE_FROM_ALL)
2869 } // namespace test58
2873 // test59: TN. User defined synchronization. Annotated {{{1
2874 namespace test59 {
2875 int COND1 = 0;
2876 int COND2 = 0;
2877 int GLOB1 = 1;
2878 int GLOB2 = 2;
2879 int FLAG1 = 0;
2880 int FLAG2 = 0;
2881 // same as test 58 but annotated
2883 void Worker2() {
2884 FLAG1=GLOB2;
2885 ANNOTATE_CONDVAR_SIGNAL(&COND2);
2886 while(!FLAG2) usleep(1);
2887 ANNOTATE_CONDVAR_WAIT(&COND1);
2888 GLOB2=FLAG2;
2891 void Worker1() {
2892 FLAG2=GLOB1;
2893 ANNOTATE_CONDVAR_SIGNAL(&COND1);
2894 while(!FLAG1) usleep(1);
2895 ANNOTATE_CONDVAR_WAIT(&COND2);
2896 GLOB1=FLAG1;
2899 void Run() {
2900 printf("test59: negative\n");
2901 ANNOTATE_BENIGN_RACE(&FLAG1, "synchronization via 'safe' race");
2902 ANNOTATE_BENIGN_RACE(&FLAG2, "synchronization via 'safe' race");
2903 MyThreadArray t(Worker1, Worker2);
2904 t.Start();
2905 t.Join();
2906 printf("\tGLOB1=%d\n", GLOB1);
2907 printf("\tGLOB2=%d\n", GLOB2);
2909 REGISTER_TEST2(Run, 59, FEATURE|NEEDS_ANNOTATIONS)
2910 } // namespace test59
2913 // test60: TN. Correct synchronization using signal-wait {{{1
2914 namespace test60 {
2915 int COND1 = 0;
2916 int COND2 = 0;
2917 int GLOB1 = 1;
2918 int GLOB2 = 2;
2919 int FLAG2 = 0;
2920 int FLAG1 = 0;
2921 Mutex MU;
2922 // same as test 59 but synchronized with signal-wait.
2924 void Worker2() {
2925 FLAG1=GLOB2;
2927 MU.Lock();
2928 COND1 = 1;
2929 CV.Signal();
2930 MU.Unlock();
2932 MU.Lock();
2933 while(COND2 != 1)
2934 CV.Wait(&MU);
2935 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2936 MU.Unlock();
2938 GLOB2=FLAG2;
2941 void Worker1() {
2942 FLAG2=GLOB1;
2944 MU.Lock();
2945 COND2 = 1;
2946 CV.Signal();
2947 MU.Unlock();
2949 MU.Lock();
2950 while(COND1 != 1)
2951 CV.Wait(&MU);
2952 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2953 MU.Unlock();
2955 GLOB1=FLAG1;
2958 void Run() {
2959 printf("test60: negative\n");
2960 MyThreadArray t(Worker1, Worker2);
2961 t.Start();
2962 t.Join();
2963 printf("\tGLOB1=%d\n", GLOB1);
2964 printf("\tGLOB2=%d\n", GLOB2);
2966 REGISTER_TEST2(Run, 60, FEATURE|NEEDS_ANNOTATIONS)
2967 } // namespace test60
2970 // test61: TN. Synchronization via Mutex as in happens-before, annotated. {{{1
2971 namespace test61 {
2972 Mutex MU;
2973 int GLOB = 0;
2974 int *P1 = NULL, *P2 = NULL;
2976 // In this test Mutex lock/unlock operations introduce happens-before relation.
2977 // We annotate the code so that MU is treated as in pure happens-before detector.
2980 void Putter() {
2981 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU);
2982 MU.Lock();
2983 if (P1 == NULL) {
2984 P1 = &GLOB;
2985 *P1 = 1;
2987 MU.Unlock();
2990 void Getter() {
2991 bool done = false;
2992 while (!done) {
2993 MU.Lock();
2994 if (P1) {
2995 done = true;
2996 P2 = P1;
2997 P1 = NULL;
2999 MU.Unlock();
3001 *P2 = 2;
3005 void Run() {
3006 printf("test61: negative\n");
3007 MyThreadArray t(Putter, Getter);
3008 t.Start();
3009 t.Join();
3010 printf("\tGLOB=%d\n", GLOB);
3012 REGISTER_TEST2(Run, 61, FEATURE|NEEDS_ANNOTATIONS)
3013 } // namespace test61
3016 // test62: STAB. Create as many segments as possible. {{{1
3017 namespace test62 {
3018 // Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments.
3019 // A better scheme is to implement garbage collection for segments.
3020 ProducerConsumerQueue Q(INT_MAX);
3021 const int N = 1 << 22;
3023 void Putter() {
3024 for (int i = 0; i < N; i++){
3025 if ((i % (N / 8)) == 0) {
3026 printf("i=%d\n", i);
3028 Q.Put(NULL);
3032 void Getter() {
3033 for (int i = 0; i < N; i++)
3034 Q.Get();
3037 void Run() {
3038 printf("test62:\n");
3039 MyThreadArray t(Putter, Getter);
3040 t.Start();
3041 t.Join();
3043 REGISTER_TEST2(Run, 62, STABILITY|EXCLUDE_FROM_ALL)
3044 } // namespace test62
3047 // test63: STAB. Create as many segments as possible and do it fast. {{{1
3048 namespace test63 {
3049 // Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments.
3050 // A better scheme is to implement garbage collection for segments.
3051 const int N = 1 << 24;
3052 int C = 0;
3054 void Putter() {
3055 for (int i = 0; i < N; i++){
3056 if ((i % (N / 8)) == 0) {
3057 printf("i=%d\n", i);
3059 ANNOTATE_CONDVAR_SIGNAL(&C);
3063 void Getter() {
3066 void Run() {
3067 printf("test63:\n");
3068 MyThreadArray t(Putter, Getter);
3069 t.Start();
3070 t.Join();
3072 REGISTER_TEST2(Run, 63, STABILITY|EXCLUDE_FROM_ALL)
3073 } // namespace test63
3076 // test64: TP. T2 happens-before T3, but T1 is independent. Reads in T1/T2. {{{1
3077 namespace test64 {
3078 // True race between T1 and T3:
3080 // T1: T2: T3:
3081 // 1. read(GLOB) (sleep)
3082 // a. read(GLOB)
3083 // b. Q.Put() -----> A. Q.Get()
3084 // B. write(GLOB)
3088 int GLOB = 0;
3089 ProducerConsumerQueue Q(INT_MAX);
3091 void T1() {
3092 CHECK(GLOB == 0);
3095 void T2() {
3096 usleep(100000);
3097 CHECK(GLOB == 0);
3098 Q.Put(NULL);
3101 void T3() {
3102 Q.Get();
3103 GLOB = 1;
3107 void Run() {
3108 FAST_MODE_INIT(&GLOB);
3109 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test64: TP.");
3110 printf("test64: positive\n");
3111 MyThreadArray t(T1, T2, T3);
3112 t.Start();
3113 t.Join();
3114 printf("\tGLOB=%d\n", GLOB);
3116 REGISTER_TEST(Run, 64)
3117 } // namespace test64
3120 // test65: TP. T2 happens-before T3, but T1 is independent. Writes in T1/T2. {{{1
3121 namespace test65 {
3122 // Similar to test64.
3123 // True race between T1 and T3:
3125 // T1: T2: T3:
3126 // 1. MU.Lock()
3127 // 2. write(GLOB)
3128 // 3. MU.Unlock() (sleep)
3129 // a. MU.Lock()
3130 // b. write(GLOB)
3131 // c. MU.Unlock()
3132 // d. Q.Put() -----> A. Q.Get()
3133 // B. write(GLOB)
3137 int GLOB = 0;
3138 Mutex MU;
3139 ProducerConsumerQueue Q(INT_MAX);
3141 void T1() {
3142 MU.Lock();
3143 GLOB++;
3144 MU.Unlock();
3147 void T2() {
3148 usleep(100000);
3149 MU.Lock();
3150 GLOB++;
3151 MU.Unlock();
3152 Q.Put(NULL);
3155 void T3() {
3156 Q.Get();
3157 GLOB = 1;
3161 void Run() {
3162 FAST_MODE_INIT(&GLOB);
3163 if (!Tsan_PureHappensBefore())
3164 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test65. TP.");
3165 printf("test65: positive\n");
3166 MyThreadArray t(T1, T2, T3);
3167 t.Start();
3168 t.Join();
3169 printf("\tGLOB=%d\n", GLOB);
3171 REGISTER_TEST(Run, 65)
3172 } // namespace test65
3175 // test66: TN. Two separate pairs of signaller/waiter using the same CV. {{{1
3176 namespace test66 {
3177 int GLOB1 = 0;
3178 int GLOB2 = 0;
3179 int C1 = 0;
3180 int C2 = 0;
3181 Mutex MU;
3183 void Signaller1() {
3184 GLOB1 = 1;
3185 MU.Lock();
3186 C1 = 1;
3187 CV.Signal();
3188 MU.Unlock();
3191 void Signaller2() {
3192 GLOB2 = 1;
3193 usleep(100000);
3194 MU.Lock();
3195 C2 = 1;
3196 CV.Signal();
3197 MU.Unlock();
3200 void Waiter1() {
3201 MU.Lock();
3202 while (C1 != 1) CV.Wait(&MU);
3203 ANNOTATE_CONDVAR_WAIT(&CV);
3204 MU.Unlock();
3205 GLOB1 = 2;
3208 void Waiter2() {
3209 MU.Lock();
3210 while (C2 != 1) CV.Wait(&MU);
3211 ANNOTATE_CONDVAR_WAIT(&CV);
3212 MU.Unlock();
3213 GLOB2 = 2;
3216 void Run() {
3217 printf("test66: negative\n");
3218 MyThreadArray t(Signaller1, Signaller2, Waiter1, Waiter2);
3219 t.Start();
3220 t.Join();
3221 printf("\tGLOB=%d/%d\n", GLOB1, GLOB2);
3223 REGISTER_TEST2(Run, 66, FEATURE|NEEDS_ANNOTATIONS)
3224 } // namespace test66
3227 // test67: FN. Race between Signaller1 and Waiter2 {{{1
3228 namespace test67 {
3229 // Similar to test66, but there is a real race here.
3231 // Here we create a happens-before arc between Signaller1 and Waiter2
3232 // even though there should be no such arc.
3233 // However, it's probably improssible (or just very hard) to avoid it.
3234 int GLOB = 0;
3235 int C1 = 0;
3236 int C2 = 0;
3237 Mutex MU;
3239 void Signaller1() {
3240 GLOB = 1;
3241 MU.Lock();
3242 C1 = 1;
3243 CV.Signal();
3244 MU.Unlock();
3247 void Signaller2() {
3248 usleep(100000);
3249 MU.Lock();
3250 C2 = 1;
3251 CV.Signal();
3252 MU.Unlock();
3255 void Waiter1() {
3256 MU.Lock();
3257 while (C1 != 1) CV.Wait(&MU);
3258 ANNOTATE_CONDVAR_WAIT(&CV);
3259 MU.Unlock();
3262 void Waiter2() {
3263 MU.Lock();
3264 while (C2 != 1) CV.Wait(&MU);
3265 ANNOTATE_CONDVAR_WAIT(&CV);
3266 MU.Unlock();
3267 GLOB = 2;
3270 void Run() {
3271 FAST_MODE_INIT(&GLOB);
3272 ANNOTATE_EXPECT_RACE(&GLOB, "test67. FN. Race between Signaller1 and Waiter2");
3273 printf("test67: positive\n");
3274 MyThreadArray t(Signaller1, Signaller2, Waiter1, Waiter2);
3275 t.Start();
3276 t.Join();
3277 printf("\tGLOB=%d\n", GLOB);
3279 REGISTER_TEST2(Run, 67, FEATURE|NEEDS_ANNOTATIONS|EXCLUDE_FROM_ALL)
3280 } // namespace test67
3283 // test68: TP. Writes are protected by MU, reads are not. {{{1
3284 namespace test68 {
3285 // In this test, all writes to GLOB are protected by a mutex
3286 // but some reads go unprotected.
3287 // This is certainly a race, but in some cases such code could occur in
3288 // a correct program. For example, the unprotected reads may be used
3289 // for showing statistics and are not required to be precise.
3290 int GLOB = 0;
3291 int COND = 0;
3292 const int N_writers = 3;
3293 Mutex MU, MU1;
3295 void Writer() {
3296 for (int i = 0; i < 100; i++) {
3297 MU.Lock();
3298 GLOB++;
3299 MU.Unlock();
3302 // we are done
3303 MU1.Lock();
3304 COND++;
3305 MU1.Unlock();
3308 void Reader() {
3309 bool cont = true;
3310 while (cont) {
3311 CHECK(GLOB >= 0);
3313 // are we done?
3314 MU1.Lock();
3315 if (COND == N_writers)
3316 cont = false;
3317 MU1.Unlock();
3318 usleep(100);
3322 void Run() {
3323 FAST_MODE_INIT(&GLOB);
3324 ANNOTATE_EXPECT_RACE(&GLOB, "TP. Writes are protected, reads are not.");
3325 printf("test68: positive\n");
3326 MyThreadArray t(Reader, Writer, Writer, Writer);
3327 t.Start();
3328 t.Join();
3329 printf("\tGLOB=%d\n", GLOB);
3331 REGISTER_TEST(Run, 68)
3332 } // namespace test68
3335 // test69: {{{1
3336 namespace test69 {
3337 // This is the same as test68, but annotated.
3338 // We do not want to annotate GLOB as a benign race
3339 // because we want to allow racy reads only in certain places.
3341 // TODO:
3342 int GLOB = 0;
3343 int COND = 0;
3344 const int N_writers = 3;
3345 int FAKE_MU = 0;
3346 Mutex MU, MU1;
3348 void Writer() {
3349 for (int i = 0; i < 10; i++) {
3350 MU.Lock();
3351 GLOB++;
3352 MU.Unlock();
3355 // we are done
3356 MU1.Lock();
3357 COND++;
3358 MU1.Unlock();
3361 void Reader() {
3362 bool cont = true;
3363 while (cont) {
3364 ANNOTATE_IGNORE_READS_BEGIN();
3365 CHECK(GLOB >= 0);
3366 ANNOTATE_IGNORE_READS_END();
3368 // are we done?
3369 MU1.Lock();
3370 if (COND == N_writers)
3371 cont = false;
3372 MU1.Unlock();
3373 usleep(100);
3377 void Run() {
3378 printf("test69: negative\n");
3379 MyThreadArray t(Reader, Writer, Writer, Writer);
3380 t.Start();
3381 t.Join();
3382 printf("\tGLOB=%d\n", GLOB);
3384 REGISTER_TEST(Run, 69)
3385 } // namespace test69
3387 // test70: STAB. Check that TRACE_MEMORY works. {{{1
3388 namespace test70 {
3389 int GLOB = 0;
3390 void Run() {
3391 printf("test70: negative\n");
3392 ANNOTATE_TRACE_MEMORY(&GLOB);
3393 GLOB = 1;
3394 printf("\tGLOB=%d\n", GLOB);
3396 REGISTER_TEST(Run, 70)
3397 } // namespace test70
3401 // test71: TN. strlen, index. {{{1
3402 namespace test71 {
3403 // This test is a reproducer for a benign race in strlen (as well as index, etc).
3404 // Some implementations of strlen may read up to 7 bytes past the end of the string
3405 // thus touching memory which may not belong to this string.
3406 // Such race is benign because the data read past the end of the string is not used.
3408 // Here, we allocate a 8-byte aligned string str and initialize first 5 bytes.
3409 // Then one thread calls strlen(str) (as well as index & rindex)
3410 // and another thread initializes str[5]..str[7].
3412 // This can be fixed in Helgrind by intercepting strlen and replacing it
3413 // with a simpler implementation.
3415 char *str;
3416 void WorkerX() {
3417 usleep(100000);
3418 CHECK(strlen(str) == 4);
3419 CHECK(index(str, 'X') == str);
3420 CHECK(index(str, 'x') == str+1);
3421 CHECK(index(str, 'Y') == NULL);
3422 CHECK(rindex(str, 'X') == str+2);
3423 CHECK(rindex(str, 'x') == str+3);
3424 CHECK(rindex(str, 'Y') == NULL);
3426 void WorkerY() {
3427 str[5] = 'Y';
3428 str[6] = 'Y';
3429 str[7] = '\0';
3432 void Run() {
3433 str = new char[8];
3434 str[0] = 'X';
3435 str[1] = 'x';
3436 str[2] = 'X';
3437 str[3] = 'x';
3438 str[4] = '\0';
3440 printf("test71: negative (strlen & index)\n");
3441 MyThread t1(WorkerY);
3442 MyThread t2(WorkerX);
3443 t1.Start();
3444 t2.Start();
3445 t1.Join();
3446 t2.Join();
3447 printf("\tstrX=%s; strY=%s\n", str, str+5);
3449 REGISTER_TEST(Run, 71)
3450 } // namespace test71
3453 // test72: STAB. Stress test for the number of segment sets (SSETs). {{{1
3454 namespace test72 {
3455 #ifndef NO_BARRIER
3456 // Variation of test33.
3457 // Instead of creating Nlog*N_iter threads,
3458 // we create Nlog threads and do N_iter barriers.
3459 int GLOB = 0;
3460 const int N_iter = 30;
3461 const int Nlog = 16;
3462 const int N = 1 << Nlog;
3463 static int64_t ARR1[N];
3464 static int64_t ARR2[N];
3465 Barrier *barriers[N_iter];
3466 Mutex MU;
3468 void Worker() {
3469 MU.Lock();
3470 int n = ++GLOB;
3471 MU.Unlock();
3473 n %= Nlog;
3475 long t0 = clock();
3476 long t __attribute__((unused)) = t0;
3478 for (int it = 0; it < N_iter; it++) {
3479 if(n == 0) {
3480 //printf("Iter: %d; %ld %ld\n", it, clock() - t, clock() - t0);
3481 t = clock();
3483 // Iterate N_iter times, block on barrier after each iteration.
3484 // This way Helgrind will create new segments after each barrier.
3486 for (int x = 0; x < 2; x++) {
3487 // run the inner loop twice.
3488 // When a memory location is accessed second time it is likely
3489 // that the state (SVal) will be unchanged.
3490 // The memory machine may optimize this case.
3491 for (int i = 0; i < N; i++) {
3492 // ARR1[i] and ARR2[N-1-i] are accessed by threads from i-th subset
3493 if (i & (1 << n)) {
3494 CHECK(ARR1[i] == 0);
3495 CHECK(ARR2[N-1-i] == 0);
3499 barriers[it]->Block();
3504 void Run() {
3505 printf("test72:\n");
3507 std::vector<MyThread*> vec(Nlog);
3509 for (int i = 0; i < N_iter; i++)
3510 barriers[i] = new Barrier(Nlog);
3512 // Create and start Nlog threads
3513 for (int i = 0; i < Nlog; i++) {
3514 vec[i] = new MyThread(Worker);
3515 vec[i]->Start();
3518 // Join all threads.
3519 for (int i = 0; i < Nlog; i++) {
3520 vec[i]->Join();
3521 delete vec[i];
3523 for (int i = 0; i < N_iter; i++)
3524 delete barriers[i];
3526 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
3527 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/
3529 REGISTER_TEST2(Run, 72, STABILITY|PERFORMANCE|EXCLUDE_FROM_ALL);
3530 #endif // NO_BARRIER
3531 } // namespace test72
3534 // test73: STAB. Stress test for the number of (SSETs), different access sizes. {{{1
3535 namespace test73 {
3536 #ifndef NO_BARRIER
3537 // Variation of test72.
3538 // We perform accesses of different sizes to the same location.
3539 int GLOB = 0;
3540 const int N_iter = 2;
3541 const int Nlog = 16;
3542 const int N = 1 << Nlog;
3543 union uint64_union {
3544 uint64_t u64[1];
3545 uint32_t u32[2];
3546 uint16_t u16[4];
3547 uint8_t u8 [8];
3549 static uint64_union ARR1[N];
3550 union uint32_union {
3551 uint32_t u32[1];
3552 uint16_t u16[2];
3553 uint8_t u8 [4];
3555 static uint32_union ARR2[N];
3556 Barrier *barriers[N_iter];
3557 Mutex MU;
3559 void Worker() {
3560 MU.Lock();
3561 int n = ++GLOB;
3562 MU.Unlock();
3564 n %= Nlog;
3566 for (int it = 0; it < N_iter; it++) {
3567 // Iterate N_iter times, block on barrier after each iteration.
3568 // This way Helgrind will create new segments after each barrier.
3570 for (int x = 0; x < 4; x++) {
3571 for (int i = 0; i < N; i++) {
3572 // ARR1[i] are accessed by threads from i-th subset
3573 if (i & (1 << n)) {
3574 for (int off = 0; off < (1 << x); off++) {
3575 switch(x) {
3576 case 0: CHECK(ARR1[i].u64[off] == 0); break;
3577 case 1: CHECK(ARR1[i].u32[off] == 0); break;
3578 case 2: CHECK(ARR1[i].u16[off] == 0); break;
3579 case 3: CHECK(ARR1[i].u8 [off] == 0); break;
3581 switch(x) {
3582 case 1: CHECK(ARR2[i].u32[off] == 0); break;
3583 case 2: CHECK(ARR2[i].u16[off] == 0); break;
3584 case 3: CHECK(ARR2[i].u8 [off] == 0); break;
3590 barriers[it]->Block();
3596 void Run() {
3597 printf("test73:\n");
3599 std::vector<MyThread*> vec(Nlog);
3601 for (int i = 0; i < N_iter; i++)
3602 barriers[i] = new Barrier(Nlog);
3604 // Create and start Nlog threads
3605 for (int i = 0; i < Nlog; i++) {
3606 vec[i] = new MyThread(Worker);
3607 vec[i]->Start();
3610 // Join all threads.
3611 for (int i = 0; i < Nlog; i++) {
3612 vec[i]->Join();
3613 delete vec[i];
3615 for (int i = 0; i < N_iter; i++)
3616 delete barriers[i];
3618 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
3619 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/
3621 REGISTER_TEST2(Run, 73, STABILITY|PERFORMANCE|EXCLUDE_FROM_ALL);
3622 #endif // NO_BARRIER
3623 } // namespace test73
3626 // test74: PERF. A lot of lock/unlock calls. {{{1
3627 namespace test74 {
3628 const int N = 100000;
3629 Mutex MU;
3630 void Run() {
3631 printf("test74: perf\n");
3632 for (int i = 0; i < N; i++ ) {
3633 MU.Lock();
3634 MU.Unlock();
3637 REGISTER_TEST(Run, 74)
3638 } // namespace test74
3641 // test75: TN. Test for sem_post, sem_wait, sem_trywait. {{{1
3642 namespace test75 {
3643 int GLOB = 0;
3644 sem_t sem[2];
3646 void Poster() {
3647 GLOB = 1;
3648 sem_post(&sem[0]);
3649 sem_post(&sem[1]);
3652 void Waiter() {
3653 sem_wait(&sem[0]);
3654 CHECK(GLOB==1);
3656 void TryWaiter() {
3657 usleep(500000);
3658 sem_trywait(&sem[1]);
3659 CHECK(GLOB==1);
3662 void Run() {
3663 #ifndef DRT_NO_SEM
3664 sem_init(&sem[0], 0, 0);
3665 sem_init(&sem[1], 0, 0);
3667 printf("test75: negative\n");
3669 MyThreadArray t(Poster, Waiter);
3670 t.Start();
3671 t.Join();
3673 GLOB = 2;
3675 MyThreadArray t(Poster, TryWaiter);
3676 t.Start();
3677 t.Join();
3679 printf("\tGLOB=%d\n", GLOB);
3681 sem_destroy(&sem[0]);
3682 sem_destroy(&sem[1]);
3683 #endif
3685 REGISTER_TEST(Run, 75)
3686 } // namespace test75
3688 // RefCountedClass {{{1
3689 struct RefCountedClass {
3690 public:
3691 RefCountedClass() {
3692 annotate_unref_ = false;
3693 ref_ = 0;
3694 data_ = 0;
3697 ~RefCountedClass() {
3698 CHECK(ref_ == 0); // race may be reported here
3699 int data_val = data_; // and here
3700 // if MU is not annotated
3701 data_ = 0;
3702 ref_ = -1;
3703 printf("\tRefCountedClass::data_ = %d\n", data_val);
3706 void AccessData() {
3707 this->mu_.Lock();
3708 this->data_++;
3709 this->mu_.Unlock();
3712 void Ref() {
3713 MU.Lock();
3714 CHECK(ref_ >= 0);
3715 ref_++;
3716 MU.Unlock();
3719 void Unref() {
3720 MU.Lock();
3721 CHECK(ref_ > 0);
3722 ref_--;
3723 bool do_delete = ref_ == 0;
3724 if (annotate_unref_) {
3725 ANNOTATE_CONDVAR_SIGNAL(this);
3727 MU.Unlock();
3728 if (do_delete) {
3729 if (annotate_unref_) {
3730 ANNOTATE_CONDVAR_WAIT(this);
3732 delete this;
3736 static void Annotate_MU() {
3737 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU);
3739 void AnnotateUnref() {
3740 annotate_unref_ = true;
3742 void Annotate_Race() {
3743 ANNOTATE_BENIGN_RACE(&this->data_, "needs annotation");
3744 ANNOTATE_BENIGN_RACE(&this->ref_, "needs annotation");
3746 private:
3747 bool annotate_unref_;
3749 int data_;
3750 Mutex mu_; // protects data_
3752 int ref_;
3753 static Mutex MU; // protects ref_
3756 Mutex RefCountedClass::MU;
3758 // test76: FP. Ref counting, no annotations. {{{1
3759 namespace test76 {
3760 #ifndef NO_BARRIER
3761 int GLOB = 0;
3762 Barrier barrier(4);
3763 RefCountedClass *object = NULL;
3764 void Worker() {
3765 object->Ref();
3766 barrier.Block();
3767 object->AccessData();
3768 object->Unref();
3770 void Run() {
3771 printf("test76: false positive (ref counting)\n");
3772 object = new RefCountedClass;
3773 object->Annotate_Race();
3774 MyThreadArray t(Worker, Worker, Worker, Worker);
3775 t.Start();
3776 t.Join();
3778 REGISTER_TEST2(Run, 76, FEATURE)
3779 #endif // NO_BARRIER
3780 } // namespace test76
3784 // test77: TN. Ref counting, MU is annotated. {{{1
3785 namespace test77 {
3786 #ifndef NO_BARRIER
3787 // same as test76, but RefCountedClass::MU is annotated.
3788 int GLOB = 0;
3789 Barrier barrier(4);
3790 RefCountedClass *object = NULL;
3791 void Worker() {
3792 object->Ref();
3793 barrier.Block();
3794 object->AccessData();
3795 object->Unref();
3797 void Run() {
3798 printf("test77: true negative (ref counting), mutex is annotated\n");
3799 RefCountedClass::Annotate_MU();
3800 object = new RefCountedClass;
3801 MyThreadArray t(Worker, Worker, Worker, Worker);
3802 t.Start();
3803 t.Join();
3805 REGISTER_TEST(Run, 77)
3806 #endif // NO_BARRIER
3807 } // namespace test77
3811 // test78: TN. Ref counting, Unref is annotated. {{{1
3812 namespace test78 {
3813 #ifndef NO_BARRIER
3814 // same as test76, but RefCountedClass::Unref is annotated.
3815 int GLOB = 0;
3816 Barrier barrier(4);
3817 RefCountedClass *object = NULL;
3818 void Worker() {
3819 object->Ref();
3820 barrier.Block();
3821 object->AccessData();
3822 object->Unref();
3824 void Run() {
3825 printf("test78: true negative (ref counting), Unref is annotated\n");
3826 RefCountedClass::Annotate_MU();
3827 object = new RefCountedClass;
3828 MyThreadArray t(Worker, Worker, Worker, Worker);
3829 t.Start();
3830 t.Join();
3832 REGISTER_TEST(Run, 78)
3833 #endif // NO_BARRIER
3834 } // namespace test78
3838 // test79 TN. Swap. {{{1
3839 namespace test79 {
3840 #if 0
3841 typedef __gnu_cxx::hash_map<int, int> map_t;
3842 #else
3843 typedef std::map<int, int> map_t;
3844 #endif
3845 map_t MAP;
3846 Mutex MU;
3848 // Here we use swap to pass MAP between threads.
3849 // The synchronization is correct, but w/o ANNOTATE_MUTEX_IS_USED_AS_CONDVAR
3850 // Helgrind will complain.
3852 void Worker1() {
3853 map_t tmp;
3854 MU.Lock();
3855 // We swap the new empty map 'tmp' with 'MAP'.
3856 MAP.swap(tmp);
3857 MU.Unlock();
3858 // tmp (which is the old version of MAP) is destroyed here.
3861 void Worker2() {
3862 MU.Lock();
3863 MAP[1]++; // Just update MAP under MU.
3864 MU.Unlock();
3867 void Worker3() { Worker1(); }
3868 void Worker4() { Worker2(); }
3870 void Run() {
3871 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU);
3872 printf("test79: negative\n");
3873 MyThreadArray t(Worker1, Worker2, Worker3, Worker4);
3874 t.Start();
3875 t.Join();
3877 REGISTER_TEST(Run, 79)
3878 } // namespace test79
3881 // AtomicRefCountedClass. {{{1
3882 // Same as RefCountedClass, but using atomic ops instead of mutex.
3883 struct AtomicRefCountedClass {
3884 public:
3885 AtomicRefCountedClass() {
3886 annotate_unref_ = false;
3887 ref_ = 0;
3888 data_ = 0;
3891 ~AtomicRefCountedClass() {
3892 CHECK(ref_ == 0); // race may be reported here
3893 int data_val = data_; // and here
3894 data_ = 0;
3895 ref_ = -1;
3896 printf("\tRefCountedClass::data_ = %d\n", data_val);
3899 void AccessData() {
3900 this->mu_.Lock();
3901 this->data_++;
3902 this->mu_.Unlock();
3905 void Ref() {
3906 AtomicIncrement(&ref_, 1);
3909 void Unref() {
3910 // DISCLAIMER: I am not sure I've implemented this correctly
3911 // (might require some memory barrier, etc).
3912 // But this implementation of reference counting is enough for
3913 // the purpose of Helgrind demonstration.
3914 AtomicIncrement(&ref_, -1);
3915 if (annotate_unref_) { ANNOTATE_CONDVAR_SIGNAL(this); }
3916 if (ref_ == 0) {
3917 if (annotate_unref_) { ANNOTATE_CONDVAR_WAIT(this); }
3918 delete this;
3922 void AnnotateUnref() {
3923 annotate_unref_ = true;
3925 void Annotate_Race() {
3926 ANNOTATE_BENIGN_RACE(&this->data_, "needs annotation");
3928 private:
3929 bool annotate_unref_;
3931 Mutex mu_;
3932 int data_; // under mu_
3934 int ref_; // used in atomic ops.
3937 // test80: FP. Ref counting with atomics, no annotations. {{{1
3938 namespace test80 {
3939 #ifndef NO_BARRIER
3940 int GLOB = 0;
3941 Barrier barrier(4);
3942 AtomicRefCountedClass *object = NULL;
3943 void Worker() {
3944 object->Ref();
3945 barrier.Block();
3946 object->AccessData();
3947 object->Unref(); // All the tricky stuff is here.
3949 void Run() {
3950 printf("test80: false positive (ref counting)\n");
3951 object = new AtomicRefCountedClass;
3952 object->Annotate_Race();
3953 MyThreadArray t(Worker, Worker, Worker, Worker);
3954 t.Start();
3955 t.Join();
3957 REGISTER_TEST2(Run, 80, FEATURE|EXCLUDE_FROM_ALL)
3958 #endif // NO_BARRIER
3959 } // namespace test80
3962 // test81: TN. Ref counting with atomics, Unref is annotated. {{{1
3963 namespace test81 {
3964 #ifndef NO_BARRIER
3965 // same as test80, but Unref is annotated.
3966 int GLOB = 0;
3967 Barrier barrier(4);
3968 AtomicRefCountedClass *object = NULL;
3969 void Worker() {
3970 object->Ref();
3971 barrier.Block();
3972 object->AccessData();
3973 object->Unref(); // All the tricky stuff is here.
3975 void Run() {
3976 printf("test81: negative (annotated ref counting)\n");
3977 object = new AtomicRefCountedClass;
3978 object->AnnotateUnref();
3979 MyThreadArray t(Worker, Worker, Worker, Worker);
3980 t.Start();
3981 t.Join();
3983 REGISTER_TEST2(Run, 81, FEATURE|EXCLUDE_FROM_ALL)
3984 #endif // NO_BARRIER
3985 } // namespace test81
3988 // test82: Object published w/o synchronization. {{{1
3989 namespace test82 {
3991 // Writer creates a new object and makes the pointer visible to the Reader.
3992 // Reader waits until the object pointer is non-null and reads the object.
3994 // On Core 2 Duo this test will sometimes (quite rarely) fail in
3995 // the CHECK below, at least if compiled with -O2.
3997 // The sequence of events::
3998 // Thread1: Thread2:
3999 // a. arr_[...] = ...
4000 // b. foo[i] = ...
4001 // A. ... = foo[i]; // non NULL
4002 // B. ... = arr_[...];
4004 // Since there is no proper synchronization, during the even (B)
4005 // Thread2 may not see the result of the event (a).
4006 // On x86 and x86_64 this happens due to compiler reordering instructions.
4007 // On other arcitectures it may also happen due to cashe inconsistency.
4009 class FOO {
4010 public:
4011 FOO() {
4012 idx_ = rand() % 1024;
4013 arr_[idx_] = 77777;
4014 // __asm__ __volatile__("" : : : "memory"); // this fixes!
4016 static void check(volatile FOO *foo) {
4017 CHECK(foo->arr_[foo->idx_] == 77777);
4019 private:
4020 int idx_;
4021 int arr_[1024];
4024 const int N = 100000;
4025 static volatile FOO *foo[N];
4026 Mutex MU;
4028 void Writer() {
4029 for (int i = 0; i < N; i++) {
4030 foo[i] = new FOO;
4031 usleep(100);
4035 void Reader() {
4036 for (int i = 0; i < N; i++) {
4037 while (!foo[i]) {
4038 MU.Lock(); // this is NOT a synchronization,
4039 MU.Unlock(); // it just helps foo[i] to become visible in Reader.
4041 if ((i % 100) == 0) {
4042 printf("rd %d\n", i);
4044 // At this point Reader() sees the new value of foo[i]
4045 // but in very rare cases will not see the new value of foo[i]->arr_.
4046 // Thus this CHECK will sometimes fail.
4047 FOO::check(foo[i]);
4051 void Run() {
4052 printf("test82: positive\n");
4053 MyThreadArray t(Writer, Reader);
4054 t.Start();
4055 t.Join();
4057 REGISTER_TEST2(Run, 82, FEATURE|EXCLUDE_FROM_ALL)
4058 } // namespace test82
4061 // test83: Object published w/o synchronization (simple version){{{1
4062 namespace test83 {
4063 // A simplified version of test83 (example of a wrong code).
4064 // This test, though incorrect, will almost never fail.
4065 volatile static int *ptr = NULL;
4066 Mutex MU;
4068 void Writer() {
4069 usleep(100);
4070 ptr = new int(777);
4073 void Reader() {
4074 while(!ptr) {
4075 MU.Lock(); // Not a synchronization!
4076 MU.Unlock();
4078 CHECK(*ptr == 777);
4081 void Run() {
4082 // printf("test83: positive\n");
4083 MyThreadArray t(Writer, Reader);
4084 t.Start();
4085 t.Join();
4087 REGISTER_TEST2(Run, 83, FEATURE|EXCLUDE_FROM_ALL)
4088 } // namespace test83
4091 // test84: TP. True race (regression test for a bug related to atomics){{{1
4092 namespace test84 {
4093 // Helgrind should not create HB arcs for the bus lock even when
4094 // --pure-happens-before=yes is used.
4095 // Bug found in by Bart Van Assche, the test is taken from
4096 // valgrind file drd/tests/atomic_var.c.
4097 static int s_x = 0;
4098 /* s_dummy[] ensures that s_x and s_y are not in the same cache line. */
4099 static char s_dummy[512] = {0};
4100 static int s_y;
4102 void thread_func_1()
4104 s_y = 1;
4105 AtomicIncrement(&s_x, 1);
4108 void thread_func_2()
4110 while (AtomicIncrement(&s_x, 0) == 0)
4112 printf("y = %d\n", s_y);
4116 void Run() {
4117 CHECK(s_dummy[0] == 0); // Avoid compiler warning about 's_dummy unused'.
4118 printf("test84: positive\n");
4119 FAST_MODE_INIT(&s_y);
4120 ANNOTATE_EXPECT_RACE_FOR_TSAN(&s_y, "test84: TP. true race.");
4121 MyThreadArray t(thread_func_1, thread_func_2);
4122 t.Start();
4123 t.Join();
4125 REGISTER_TEST(Run, 84)
4126 } // namespace test84
4129 // test85: Test for RunningOnValgrind(). {{{1
4130 namespace test85 {
4131 int GLOB = 0;
4132 void Run() {
4133 printf("test85: RunningOnValgrind() = %d\n", RunningOnValgrind());
4135 REGISTER_TEST(Run, 85)
4136 } // namespace test85
4139 // test86: Test for race inside DTOR: racey write to vptr. Benign. {{{1
4140 namespace test86 {
4141 // This test shows a racey access to vptr (the pointer to vtbl).
4142 // We have class A and class B derived from A.
4143 // Both classes have a virtual function f() and a virtual DTOR.
4144 // We create an object 'A *a = new B'
4145 // and pass this object from Thread1 to Thread2.
4146 // Thread2 calls a->f(). This call reads a->vtpr.
4147 // Thread1 deletes the object. B::~B waits untill the object can be destroyed
4148 // (flag_stopped == true) but at the very beginning of B::~B
4149 // a->vptr is written to.
4150 // So, we have a race on a->vptr.
4151 // On this particular test this race is benign, but test87 shows
4152 // how such race could harm.
4156 // Threa1: Thread2:
4157 // 1. A a* = new B;
4158 // 2. Q.Put(a); ------------\ .
4159 // \--------------------> a. a = Q.Get();
4160 // b. a->f();
4161 // /--------- c. flag_stopped = true;
4162 // 3. delete a; /
4163 // waits untill flag_stopped <------/
4164 // inside the dtor
4167 bool flag_stopped = false;
4168 Mutex mu;
4170 ProducerConsumerQueue Q(INT_MAX); // Used to pass A* between threads.
4172 struct A {
4173 A() { printf("A::A()\n"); }
4174 virtual ~A() { printf("A::~A()\n"); }
4175 virtual void f() { }
4177 uintptr_t padding[15];
4178 } __attribute__ ((aligned (64)));
4180 struct B: A {
4181 B() { printf("B::B()\n"); }
4182 virtual ~B() {
4183 // The race is here. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
4184 printf("B::~B()\n");
4185 // wait until flag_stopped is true.
4186 mu.LockWhen(Condition<bool>(&ArgIsTrue, &flag_stopped));
4187 mu.Unlock();
4188 printf("B::~B() done\n");
4190 virtual void f() { }
4193 void Waiter() {
4194 A *a = new B;
4195 if (!Tsan_FastMode())
4196 ANNOTATE_EXPECT_RACE(a, "test86: expected race on a->vptr");
4197 printf("Waiter: B created\n");
4198 Q.Put(a);
4199 usleep(100000); // so that Worker calls a->f() first.
4200 printf("Waiter: deleting B\n");
4201 delete a;
4202 printf("Waiter: B deleted\n");
4203 usleep(100000);
4204 printf("Waiter: done\n");
4207 void Worker() {
4208 A *a = reinterpret_cast<A*>(Q.Get());
4209 printf("Worker: got A\n");
4210 a->f();
4212 mu.Lock();
4213 flag_stopped = true;
4214 mu.Unlock();
4215 usleep(200000);
4216 printf("Worker: done\n");
4219 void Run() {
4220 printf("test86: positive, race inside DTOR\n");
4221 MyThreadArray t(Waiter, Worker);
4222 t.Start();
4223 t.Join();
4225 REGISTER_TEST(Run, 86)
4226 } // namespace test86
4229 // test87: Test for race inside DTOR: racey write to vptr. Harmful.{{{1
4230 namespace test87 {
4231 // A variation of test86 where the race is harmful.
4232 // Here we have class C derived from B.
4233 // We create an object 'A *a = new C' in Thread1 and pass it to Thread2.
4234 // Thread2 calls a->f().
4235 // Thread1 calls 'delete a'.
4236 // It first calls C::~C, then B::~B where it rewrites the vptr to point
4237 // to B::vtbl. This is a problem because Thread2 might not have called a->f()
4238 // and now it will call B::f instead of C::f.
4240 bool flag_stopped = false;
4241 Mutex mu;
4243 ProducerConsumerQueue Q(INT_MAX); // Used to pass A* between threads.
4245 struct A {
4246 A() { printf("A::A()\n"); }
4247 virtual ~A() { printf("A::~A()\n"); }
4248 virtual void f() = 0; // pure virtual.
4251 struct B: A {
4252 B() { printf("B::B()\n"); }
4253 virtual ~B() {
4254 // The race is here. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
4255 printf("B::~B()\n");
4256 // wait until flag_stopped is true.
4257 mu.LockWhen(Condition<bool>(&ArgIsTrue, &flag_stopped));
4258 mu.Unlock();
4259 printf("B::~B() done\n");
4261 virtual void f() = 0; // pure virtual.
4264 struct C: B {
4265 C() { printf("C::C()\n"); }
4266 virtual ~C() { printf("C::~C()\n"); }
4267 virtual void f() { }
4270 void Waiter() {
4271 A *a = new C;
4272 Q.Put(a);
4273 delete a;
4276 void Worker() {
4277 A *a = reinterpret_cast<A*>(Q.Get());
4278 a->f();
4280 mu.Lock();
4281 flag_stopped = true;
4282 ANNOTATE_CONDVAR_SIGNAL(&mu);
4283 mu.Unlock();
4286 void Run() {
4287 printf("test87: positive, race inside DTOR\n");
4288 MyThreadArray t(Waiter, Worker);
4289 t.Start();
4290 t.Join();
4292 REGISTER_TEST2(Run, 87, FEATURE|EXCLUDE_FROM_ALL)
4293 } // namespace test87
4296 // test88: Test for ANNOTATE_IGNORE_WRITES_*{{{1
4297 namespace test88 {
4298 // a recey write annotated with ANNOTATE_IGNORE_WRITES_BEGIN/END.
4299 int GLOB = 0;
4300 void Worker() {
4301 ANNOTATE_IGNORE_WRITES_BEGIN();
4302 GLOB = 1;
4303 ANNOTATE_IGNORE_WRITES_END();
4305 void Run() {
4306 printf("test88: negative, test for ANNOTATE_IGNORE_WRITES_*\n");
4307 MyThread t(Worker);
4308 t.Start();
4309 GLOB = 1;
4310 t.Join();
4311 printf("\tGLOB=%d\n", GLOB);
4313 REGISTER_TEST(Run, 88)
4314 } // namespace test88
4317 // test89: Test for debug info. {{{1
4318 namespace test89 {
4319 // Simlpe races with different objects (stack, heap globals; scalars, structs).
4320 // Also, if run with --trace-level=2 this test will show a sequence of
4321 // CTOR and DTOR calls.
4322 struct STRUCT {
4323 int a, b, c;
4326 struct A {
4327 int a;
4328 A() {
4329 ANNOTATE_TRACE_MEMORY(&a);
4330 a = 1;
4332 virtual ~A() {
4333 a = 4;
4337 struct B : A {
4338 B() { CHECK(a == 1); }
4339 virtual ~B() { CHECK(a == 3); }
4341 struct C : B {
4342 C() { a = 2; }
4343 virtual ~C() { a = 3; }
4346 int GLOBAL = 0;
4347 int *STACK = 0;
4348 STRUCT GLOB_STRUCT;
4349 STRUCT *STACK_STRUCT;
4350 STRUCT *HEAP_STRUCT;
4352 void Worker() {
4353 GLOBAL = 1;
4354 *STACK = 1;
4355 GLOB_STRUCT.b = 1;
4356 STACK_STRUCT->b = 1;
4357 HEAP_STRUCT->b = 1;
4360 void Run() {
4361 int stack_var = 0;
4362 STACK = &stack_var;
4364 STRUCT stack_struct;
4365 STACK_STRUCT = &stack_struct;
4367 HEAP_STRUCT = new STRUCT;
4369 printf("test89: negative\n");
4370 MyThreadArray t(Worker, Worker);
4371 t.Start();
4372 t.Join();
4374 delete HEAP_STRUCT;
4376 A *a = new C;
4377 printf("Using 'a->a': %d\n", a->a);
4378 delete a;
4380 REGISTER_TEST2(Run, 89, FEATURE|EXCLUDE_FROM_ALL)
4381 } // namespace test89
4384 // test90: FP. Test for a safely-published pointer (read-only). {{{1
4385 namespace test90 {
4386 // The Publisher creates an object and safely publishes it under a mutex.
4387 // Readers access the object read-only.
4388 // See also test91.
4390 // Without annotations Helgrind will issue a false positive in Reader().
4392 // Choices for annotations:
4393 // -- ANNOTATE_CONDVAR_SIGNAL/ANNOTATE_CONDVAR_WAIT
4394 // -- ANNOTATE_MUTEX_IS_USED_AS_CONDVAR
4395 // -- ANNOTATE_PUBLISH_MEMORY_RANGE.
4397 int *GLOB = 0;
4398 Mutex MU;
4400 void Publisher() {
4401 MU.Lock();
4402 GLOB = (int*)memalign(64, sizeof(int));
4403 *GLOB = 777;
4404 if (!Tsan_PureHappensBefore() && !Tsan_FastMode())
4405 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB, "test90. FP. This is a false positve");
4406 MU.Unlock();
4407 usleep(200000);
4410 void Reader() {
4411 usleep(10000);
4412 while (true) {
4413 MU.Lock();
4414 int *p = GLOB;
4415 MU.Unlock();
4416 if (p) {
4417 CHECK(*p == 777); // Race is reported here.
4418 break;
4423 void Run() {
4424 printf("test90: false positive (safely published pointer).\n");
4425 MyThreadArray t(Publisher, Reader, Reader, Reader);
4426 t.Start();
4427 t.Join();
4428 printf("\t*GLOB=%d\n", *GLOB);
4429 free(GLOB);
4431 REGISTER_TEST(Run, 90)
4432 } // namespace test90
4435 // test91: FP. Test for a safely-published pointer (read-write). {{{1
4436 namespace test91 {
4437 // Similar to test90.
4438 // The Publisher creates an object and safely publishes it under a mutex MU1.
4439 // Accessors get the object under MU1 and access it (read/write) under MU2.
4441 // Without annotations Helgrind will issue a false positive in Accessor().
4444 int *GLOB = 0;
4445 Mutex MU, MU1, MU2;
4447 void Publisher() {
4448 MU1.Lock();
4449 GLOB = (int*)memalign(64, sizeof(int));
4450 *GLOB = 777;
4451 if (!Tsan_PureHappensBefore() && !Tsan_FastMode())
4452 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB, "test91. FP. This is a false positve");
4453 MU1.Unlock();
4456 void Accessor() {
4457 usleep(10000);
4458 while (true) {
4459 MU1.Lock();
4460 int *p = GLOB;
4461 MU1.Unlock();
4462 if (p) {
4463 MU2.Lock();
4464 (*p)++; // Race is reported here.
4465 CHECK(*p > 777);
4466 MU2.Unlock();
4467 break;
4472 void Run() {
4473 printf("test91: false positive (safely published pointer, read/write).\n");
4474 MyThreadArray t(Publisher, Accessor, Accessor, Accessor);
4475 t.Start();
4476 t.Join();
4477 printf("\t*GLOB=%d\n", *GLOB);
4478 free(GLOB);
4480 REGISTER_TEST(Run, 91)
4481 } // namespace test91
4484 // test92: TN. Test for a safely-published pointer (read-write), annotated. {{{1
4485 namespace test92 {
4486 // Similar to test91, but annotated with ANNOTATE_PUBLISH_MEMORY_RANGE.
4489 // Publisher: Accessors:
4491 // 1. MU1.Lock()
4492 // 2. Create GLOB.
4493 // 3. ANNOTATE_PUBLISH_...(GLOB) -------\ .
4494 // 4. MU1.Unlock() \ .
4495 // \ a. MU1.Lock()
4496 // \ b. Get GLOB
4497 // \ c. MU1.Unlock()
4498 // \--> d. Access GLOB
4500 // A happens-before arc is created between ANNOTATE_PUBLISH_MEMORY_RANGE and
4501 // accesses to GLOB.
4503 struct ObjType {
4504 int arr[10];
4507 ObjType *GLOB = 0;
4508 Mutex MU, MU1, MU2;
4510 void Publisher() {
4511 MU1.Lock();
4512 GLOB = new ObjType;
4513 for (int i = 0; i < 10; i++) {
4514 GLOB->arr[i] = 777;
4516 // This annotation should go right before the object is published.
4517 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB, sizeof(*GLOB));
4518 MU1.Unlock();
4521 void Accessor(int index) {
4522 while (true) {
4523 MU1.Lock();
4524 ObjType *p = GLOB;
4525 MU1.Unlock();
4526 if (p) {
4527 MU2.Lock();
4528 p->arr[index]++; // W/o the annotations the race will be reported here.
4529 CHECK(p->arr[index] == 778);
4530 MU2.Unlock();
4531 break;
4536 void Accessor0() { Accessor(0); }
4537 void Accessor5() { Accessor(5); }
4538 void Accessor9() { Accessor(9); }
4540 void Run() {
4541 printf("test92: safely published pointer, read/write, annotated.\n");
4542 MyThreadArray t(Publisher, Accessor0, Accessor5, Accessor9);
4543 t.Start();
4544 t.Join();
4545 printf("\t*GLOB=%d\n", GLOB->arr[0]);
4547 REGISTER_TEST(Run, 92)
4548 } // namespace test92
4551 // test93: TP. Test for incorrect usage of ANNOTATE_PUBLISH_MEMORY_RANGE. {{{1
4552 namespace test93 {
4553 int GLOB = 0;
4555 void Reader() {
4556 CHECK(GLOB == 0);
4559 void Publisher() {
4560 usleep(10000);
4561 // Incorrect, used after the memory has been accessed in another thread.
4562 ANNOTATE_PUBLISH_MEMORY_RANGE(&GLOB, sizeof(GLOB));
4565 void Run() {
4566 printf("test93: positive, misuse of ANNOTATE_PUBLISH_MEMORY_RANGE\n");
4567 MyThreadArray t(Reader, Publisher);
4568 t.Start();
4569 t.Join();
4570 printf("\tGLOB=%d\n", GLOB);
4572 REGISTER_TEST2(Run, 93, FEATURE|EXCLUDE_FROM_ALL)
4573 } // namespace test93
4576 // test94: TP. Check do_cv_signal/fake segment logic {{{1
4577 namespace test94 {
4578 int GLOB;
4580 int COND = 0;
4581 int COND2 = 0;
4582 Mutex MU, MU2;
4583 CondVar CV, CV2;
4585 void Thr1() {
4586 usleep(10000); // Make sure the waiter blocks.
4588 GLOB = 1; // WRITE
4590 MU.Lock();
4591 COND = 1;
4592 CV.Signal();
4593 MU.Unlock();
4595 void Thr2() {
4596 usleep(1000*1000); // Make sure CV2.Signal() "happens after" CV.Signal()
4597 usleep(10000); // Make sure the waiter blocks.
4599 MU2.Lock();
4600 COND2 = 1;
4601 CV2.Signal();
4602 MU2.Unlock();
4604 void Thr3() {
4605 MU.Lock();
4606 while(COND != 1)
4607 CV.Wait(&MU);
4608 MU.Unlock();
4610 void Thr4() {
4611 MU2.Lock();
4612 while(COND2 != 1)
4613 CV2.Wait(&MU2);
4614 MU2.Unlock();
4615 GLOB = 2; // READ: no HB-relation between CV.Signal and CV2.Wait !
4617 void Run() {
4618 FAST_MODE_INIT(&GLOB);
4619 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test94: TP.");
4620 printf("test94: TP. Check do_cv_signal/fake segment logic\n");
4621 MyThreadArray mta(Thr1, Thr2, Thr3, Thr4);
4622 mta.Start();
4623 mta.Join();
4624 printf("\tGLOB=%d\n", GLOB);
4626 REGISTER_TEST(Run, 94);
4627 } // namespace test94
4629 // test95: TP. Check do_cv_signal/fake segment logic {{{1
4630 namespace test95 {
4631 int GLOB = 0;
4633 int COND = 0;
4634 int COND2 = 0;
4635 Mutex MU, MU2;
4636 CondVar CV, CV2;
4638 void Thr1() {
4639 usleep(1000*1000); // Make sure CV2.Signal() "happens before" CV.Signal()
4640 usleep(10000); // Make sure the waiter blocks.
4642 GLOB = 1; // WRITE
4644 MU.Lock();
4645 COND = 1;
4646 CV.Signal();
4647 MU.Unlock();
4649 void Thr2() {
4650 usleep(10000); // Make sure the waiter blocks.
4652 MU2.Lock();
4653 COND2 = 1;
4654 CV2.Signal();
4655 MU2.Unlock();
4657 void Thr3() {
4658 MU.Lock();
4659 while(COND != 1)
4660 CV.Wait(&MU);
4661 MU.Unlock();
4663 void Thr4() {
4664 MU2.Lock();
4665 while(COND2 != 1)
4666 CV2.Wait(&MU2);
4667 MU2.Unlock();
4668 GLOB = 2; // READ: no HB-relation between CV.Signal and CV2.Wait !
4670 void Run() {
4671 FAST_MODE_INIT(&GLOB);
4672 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test95: TP.");
4673 printf("test95: TP. Check do_cv_signal/fake segment logic\n");
4674 MyThreadArray mta(Thr1, Thr2, Thr3, Thr4);
4675 mta.Start();
4676 mta.Join();
4677 printf("\tGLOB=%d\n", GLOB);
4679 REGISTER_TEST(Run, 95);
4680 } // namespace test95
4682 // test96: TN. tricky LockSet behaviour {{{1
4683 // 3 threads access the same memory with three different
4684 // locksets: {A, B}, {B, C}, {C, A}.
4685 // These locksets have empty intersection
4686 namespace test96 {
4687 int GLOB = 0;
4689 Mutex A, B, C;
4691 void Thread1() {
4692 MutexLock a(&A);
4693 MutexLock b(&B);
4694 GLOB++;
4697 void Thread2() {
4698 MutexLock b(&B);
4699 MutexLock c(&C);
4700 GLOB++;
4703 void Thread3() {
4704 MutexLock a(&A);
4705 MutexLock c(&C);
4706 GLOB++;
4709 void Run() {
4710 printf("test96: FP. tricky LockSet behaviour\n");
4711 ANNOTATE_TRACE_MEMORY(&GLOB);
4712 MyThreadArray mta(Thread1, Thread2, Thread3);
4713 mta.Start();
4714 mta.Join();
4715 CHECK(GLOB == 3);
4716 printf("\tGLOB=%d\n", GLOB);
4718 REGISTER_TEST(Run, 96);
4719 } // namespace test96
4721 // test97: This test shows false negative with --fast-mode=yes {{{1
4722 namespace test97 {
4723 const int HG_CACHELINE_SIZE = 64;
4725 Mutex MU;
4727 const int ARRAY_SIZE = HG_CACHELINE_SIZE * 4 / sizeof(int);
4728 int array[ARRAY_SIZE];
4729 int * GLOB = &array[ARRAY_SIZE/2];
4731 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points
4732 to a memory inside a CacheLineZ which is inside array's memory range
4735 void Reader() {
4736 usleep(500000);
4737 CHECK(777 == *GLOB);
4740 void Run() {
4741 MyThreadArray t(Reader);
4742 if (!Tsan_FastMode())
4743 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB, "test97: TP. FN with --fast-mode=yes");
4744 printf("test97: This test shows false negative with --fast-mode=yes\n");
4746 t.Start();
4747 *GLOB = 777;
4748 t.Join();
4751 REGISTER_TEST2(Run, 97, FEATURE)
4752 } // namespace test97
4754 // test98: Synchronization via read/write (or send/recv). {{{1
4755 namespace test98 {
4756 // The synchronization here is done by a pair of read/write calls
4757 // that create a happens-before arc. Same may be done with send/recv.
4758 // Such synchronization is quite unusual in real programs
4759 // (why would one synchronizae via a file or socket?), but
4760 // quite possible in unittests where one threads runs for producer
4761 // and one for consumer.
4763 // A race detector has to create a happens-before arcs for
4764 // {read,send}->{write,recv} even if the file descriptors are different.
4766 int GLOB = 0;
4767 int fd_out = -1;
4768 int fd_in = -1;
4770 void Writer() {
4771 usleep(1000);
4772 GLOB = 1;
4773 const char *str = "Hey there!\n";
4774 IGNORE_RETURN_VALUE(write(fd_out, str, strlen(str) + 1));
4777 void Reader() {
4778 char buff[100];
4779 while (read(fd_in, buff, 100) == 0)
4780 sleep(1);
4781 printf("read: %s\n", buff);
4782 GLOB = 2;
4785 void Run() {
4786 printf("test98: negative, synchronization via I/O\n");
4787 char in_name[100];
4788 char out_name[100];
4789 // we open two files, on for reading and one for writing,
4790 // but the files are actually the same (symlinked).
4791 sprintf(out_name, "/tmp/racecheck_unittest_out.%ld", (long) getpid());
4792 fd_out = creat(out_name, O_WRONLY | S_IRWXU);
4793 #ifdef VGO_darwin
4794 // symlink() is not supported on Darwin. Copy the output file name.
4795 strcpy(in_name, out_name);
4796 #else
4797 sprintf(in_name, "/tmp/racecheck_unittest_in.%ld", (long) getpid());
4798 IGNORE_RETURN_VALUE(symlink(out_name, in_name));
4799 #endif
4800 fd_in = open(in_name, 0, O_RDONLY);
4801 CHECK(fd_out >= 0);
4802 CHECK(fd_in >= 0);
4803 MyThreadArray t(Writer, Reader);
4804 t.Start();
4805 t.Join();
4806 printf("\tGLOB=%d\n", GLOB);
4807 // cleanup
4808 close(fd_in);
4809 close(fd_out);
4810 unlink(in_name);
4811 unlink(out_name);
4813 REGISTER_TEST(Run, 98)
4814 } // namespace test98
4817 // test99: TP. Unit test for a bug in LockWhen*. {{{1
4818 namespace test99 {
4821 bool GLOB = false;
4822 Mutex mu;
4824 static void Thread1() {
4825 for (int i = 0; i < 100; i++) {
4826 mu.LockWhenWithTimeout(Condition<bool>(&ArgIsTrue, &GLOB), 5);
4827 GLOB = false;
4828 mu.Unlock();
4829 usleep(10000);
4833 static void Thread2() {
4834 for (int i = 0; i < 100; i++) {
4835 mu.Lock();
4836 mu.Unlock();
4837 usleep(10000);
4841 void Run() {
4842 printf("test99: regression test for LockWhen*\n");
4843 MyThreadArray t(Thread1, Thread2);
4844 t.Start();
4845 t.Join();
4847 REGISTER_TEST(Run, 99);
4848 } // namespace test99
4851 // test100: Test for initialization bit. {{{1
4852 namespace test100 {
4853 int G1 = 0;
4854 int G2 = 0;
4855 int G3 = 0;
4856 int G4 = 0;
4858 void Creator() {
4859 G1 = 1; CHECK(G1);
4860 G2 = 1;
4861 G3 = 1; CHECK(G3);
4862 G4 = 1;
4865 void Worker1() {
4866 usleep(100000);
4867 CHECK(G1);
4868 CHECK(G2);
4869 G3 = 3;
4870 G4 = 3;
4873 void Worker2() {
4878 void Run() {
4879 printf("test100: test for initialization bit. \n");
4880 MyThreadArray t(Creator, Worker1, Worker2);
4881 ANNOTATE_TRACE_MEMORY(&G1);
4882 ANNOTATE_TRACE_MEMORY(&G2);
4883 ANNOTATE_TRACE_MEMORY(&G3);
4884 ANNOTATE_TRACE_MEMORY(&G4);
4885 t.Start();
4886 t.Join();
4888 REGISTER_TEST2(Run, 100, FEATURE|EXCLUDE_FROM_ALL)
4889 } // namespace test100
4892 // test101: TN. Two signals and two waits. {{{1
4893 namespace test101 {
4894 Mutex MU;
4895 CondVar CV;
4896 int GLOB = 0;
4898 int C1 = 0, C2 = 0;
4900 void Signaller() {
4901 usleep(100000);
4902 MU.Lock();
4903 C1 = 1;
4904 CV.Signal();
4905 printf("signal\n");
4906 MU.Unlock();
4908 GLOB = 1;
4910 usleep(500000);
4911 MU.Lock();
4912 C2 = 1;
4913 CV.Signal();
4914 printf("signal\n");
4915 MU.Unlock();
4918 void Waiter() {
4919 MU.Lock();
4920 while(!C1)
4921 CV.Wait(&MU);
4922 printf("wait\n");
4923 MU.Unlock();
4925 MU.Lock();
4926 while(!C2)
4927 CV.Wait(&MU);
4928 printf("wait\n");
4929 MU.Unlock();
4931 GLOB = 2;
4935 void Run() {
4936 printf("test101: negative\n");
4937 MyThreadArray t(Waiter, Signaller);
4938 t.Start();
4939 t.Join();
4940 printf("\tGLOB=%d\n", GLOB);
4942 REGISTER_TEST(Run, 101)
4943 } // namespace test101
4945 // test102: --fast-mode=yes vs. --initialization-bit=yes {{{1
4946 namespace test102 {
4947 const int HG_CACHELINE_SIZE = 64;
4949 Mutex MU;
4951 const int ARRAY_SIZE = HG_CACHELINE_SIZE * 4 / sizeof(int);
4952 int array[ARRAY_SIZE + 1];
4953 int * GLOB = &array[ARRAY_SIZE/2];
4955 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points
4956 to a memory inside a CacheLineZ which is inside array's memory range
4959 void Reader() {
4960 usleep(200000);
4961 CHECK(777 == GLOB[0]);
4962 usleep(400000);
4963 CHECK(777 == GLOB[1]);
4966 void Run() {
4967 MyThreadArray t(Reader);
4968 if (!Tsan_FastMode())
4969 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB+0, "test102: TP. FN with --fast-mode=yes");
4970 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB+1, "test102: TP");
4971 printf("test102: --fast-mode=yes vs. --initialization-bit=yes\n");
4973 t.Start();
4974 GLOB[0] = 777;
4975 usleep(400000);
4976 GLOB[1] = 777;
4977 t.Join();
4980 REGISTER_TEST2(Run, 102, FEATURE)
4981 } // namespace test102
4983 // test103: Access different memory locations with different LockSets {{{1
4984 namespace test103 {
4985 const int N_MUTEXES = 6;
4986 const int LOCKSET_INTERSECTION_SIZE = 3;
4988 int data[1 << LOCKSET_INTERSECTION_SIZE] = {0};
4989 Mutex MU[N_MUTEXES];
4991 inline int LS_to_idx (int ls) {
4992 return (ls >> (N_MUTEXES - LOCKSET_INTERSECTION_SIZE))
4993 & ((1 << LOCKSET_INTERSECTION_SIZE) - 1);
4996 void Worker() {
4997 for (int ls = 0; ls < (1 << N_MUTEXES); ls++) {
4998 if (LS_to_idx(ls) == 0)
4999 continue;
5000 for (int m = 0; m < N_MUTEXES; m++)
5001 if (ls & (1 << m))
5002 MU[m].Lock();
5004 data[LS_to_idx(ls)]++;
5006 for (int m = N_MUTEXES - 1; m >= 0; m--)
5007 if (ls & (1 << m))
5008 MU[m].Unlock();
5012 void Run() {
5013 printf("test103: Access different memory locations with different LockSets\n");
5014 MyThreadArray t(Worker, Worker, Worker, Worker);
5015 t.Start();
5016 t.Join();
5018 REGISTER_TEST2(Run, 103, FEATURE)
5019 } // namespace test103
5021 // test104: TP. Simple race (write vs write). Heap mem. {{{1
5022 namespace test104 {
5023 int *GLOB = NULL;
5024 void Worker() {
5025 *GLOB = 1;
5028 void Parent() {
5029 MyThread t(Worker);
5030 t.Start();
5031 usleep(100000);
5032 *GLOB = 2;
5033 t.Join();
5035 void Run() {
5036 GLOB = (int*)memalign(64, sizeof(int));
5037 *GLOB = 0;
5038 ANNOTATE_EXPECT_RACE(GLOB, "test104. TP.");
5039 ANNOTATE_TRACE_MEMORY(GLOB);
5040 printf("test104: positive\n");
5041 Parent();
5042 printf("\tGLOB=%d\n", *GLOB);
5043 free(GLOB);
5045 REGISTER_TEST(Run, 104);
5046 } // namespace test104
5049 // test105: Checks how stack grows. {{{1
5050 namespace test105 {
5051 int GLOB = 0;
5053 void F1() {
5054 int ar[32] __attribute__((unused));
5055 // ANNOTATE_TRACE_MEMORY(&ar[0]);
5056 // ANNOTATE_TRACE_MEMORY(&ar[31]);
5057 ar[0] = 1;
5058 ar[31] = 1;
5061 void Worker() {
5062 int ar[32] __attribute__((unused));
5063 // ANNOTATE_TRACE_MEMORY(&ar[0]);
5064 // ANNOTATE_TRACE_MEMORY(&ar[31]);
5065 ar[0] = 1;
5066 ar[31] = 1;
5067 F1();
5070 void Run() {
5071 printf("test105: negative\n");
5072 Worker();
5073 MyThread t(Worker);
5074 t.Start();
5075 t.Join();
5076 printf("\tGLOB=%d\n", GLOB);
5078 REGISTER_TEST(Run, 105)
5079 } // namespace test105
5082 // test106: TN. pthread_once. {{{1
5083 namespace test106 {
5084 int *GLOB = NULL;
5085 static pthread_once_t once = PTHREAD_ONCE_INIT;
5086 void Init() {
5087 GLOB = new int;
5088 ANNOTATE_TRACE_MEMORY(GLOB);
5089 *GLOB = 777;
5092 void Worker0() {
5093 pthread_once(&once, Init);
5095 void Worker1() {
5096 usleep(100000);
5097 pthread_once(&once, Init);
5098 CHECK(*GLOB == 777);
5102 void Run() {
5103 printf("test106: negative\n");
5104 MyThreadArray t(Worker0, Worker1, Worker1, Worker1);
5105 t.Start();
5106 t.Join();
5107 printf("\tGLOB=%d\n", *GLOB);
5109 REGISTER_TEST2(Run, 106, FEATURE)
5110 } // namespace test106
5113 // test107: Test for ANNOTATE_EXPECT_RACE {{{1
5114 namespace test107 {
5115 int GLOB = 0;
5116 void Run() {
5117 printf("test107: negative\n");
5118 ANNOTATE_EXPECT_RACE(&GLOB, "No race in fact. Just checking the tool.");
5119 printf("\tGLOB=%d\n", GLOB);
5121 REGISTER_TEST2(Run, 107, FEATURE|EXCLUDE_FROM_ALL)
5122 } // namespace test107
5125 // test108: TN. initialization of static object. {{{1
5126 namespace test108 {
5127 // Here we have a function-level static object.
5128 // Starting from gcc 4 this is therad safe,
5129 // but is is not thread safe with many other compilers.
5131 // Helgrind supports this kind of initialization by
5132 // intercepting __cxa_guard_acquire/__cxa_guard_release
5133 // and ignoring all accesses between them.
5134 // Helgrind also intercepts pthread_once in the same manner.
5135 class Foo {
5136 public:
5137 Foo() {
5138 ANNOTATE_TRACE_MEMORY(&a_);
5139 a_ = 42;
5141 void Check() const { CHECK(a_ == 42); }
5142 private:
5143 int a_;
5146 const Foo *GetFoo() {
5147 static const Foo *foo = new Foo();
5148 return foo;
5150 void Worker0() {
5151 GetFoo();
5154 void Worker() {
5155 usleep(200000);
5156 const Foo *foo = GetFoo();
5157 foo->Check();
5161 void Run() {
5162 printf("test108: negative, initialization of static object\n");
5163 MyThreadArray t(Worker0, Worker, Worker);
5164 t.Start();
5165 t.Join();
5167 REGISTER_TEST2(Run, 108, FEATURE)
5168 } // namespace test108
5171 // test109: TN. Checking happens before between parent and child threads. {{{1
5172 namespace test109 {
5173 // Check that the detector correctly connects
5174 // pthread_create with the new thread
5175 // and
5176 // thread exit with pthread_join
5177 const int N = 32;
5178 static int GLOB[N];
5180 void Worker(void *a) {
5181 usleep(10000);
5182 // printf("--Worker : %ld %p\n", (int*)a - GLOB, (void*)pthread_self());
5183 int *arg = (int*)a;
5184 (*arg)++;
5187 void Run() {
5188 printf("test109: negative\n");
5189 MyThread *t[N];
5190 for (int i = 0; i < N; i++) {
5191 t[i] = new MyThread(Worker, &GLOB[i]);
5193 for (int i = 0; i < N; i++) {
5194 ANNOTATE_TRACE_MEMORY(&GLOB[i]);
5195 GLOB[i] = 1;
5196 t[i]->Start();
5197 // printf("--Started: %p\n", (void*)t[i]->tid());
5199 for (int i = 0; i < N; i++) {
5200 // printf("--Joining: %p\n", (void*)t[i]->tid());
5201 t[i]->Join();
5202 // printf("--Joined : %p\n", (void*)t[i]->tid());
5203 GLOB[i]++;
5205 for (int i = 0; i < N; i++) delete t[i];
5207 printf("\tGLOB=%d\n", GLOB[13]);
5209 REGISTER_TEST(Run, 109)
5210 } // namespace test109
5213 // test110: TP. Simple races with stack, global and heap objects. {{{1
5214 namespace test110 {
5215 int GLOB = 0;
5216 static int STATIC;
5218 int *STACK = 0;
5220 int *MALLOC;
5221 int *CALLOC;
5222 int *REALLOC;
5223 int *VALLOC;
5224 int *PVALLOC;
5225 int *MEMALIGN;
5226 union pi_pv_union { int* pi; void* pv; } POSIX_MEMALIGN;
5227 int *MMAP;
5229 int *NEW;
5230 int *NEW_ARR;
5232 void Worker() {
5233 GLOB++;
5234 STATIC++;
5236 (*STACK)++;
5238 (*MALLOC)++;
5239 (*CALLOC)++;
5240 (*REALLOC)++;
5241 (*VALLOC)++;
5242 (*PVALLOC)++;
5243 (*MEMALIGN)++;
5244 (*(POSIX_MEMALIGN.pi))++;
5245 (*MMAP)++;
5247 (*NEW)++;
5248 (*NEW_ARR)++;
5250 void Run() {
5251 int x = 0;
5252 STACK = &x;
5254 MALLOC = (int*)malloc(sizeof(int));
5255 CALLOC = (int*)calloc(1, sizeof(int));
5256 REALLOC = (int*)realloc(NULL, sizeof(int));
5257 VALLOC = (int*)valloc(sizeof(int));
5258 PVALLOC = (int*)valloc(sizeof(int)); // TODO: pvalloc breaks helgrind.
5259 MEMALIGN = (int*)memalign(64, sizeof(int));
5260 CHECK(0 == posix_memalign(&POSIX_MEMALIGN.pv, 64, sizeof(int)));
5261 MMAP = (int*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE,
5262 MAP_PRIVATE | MAP_ANON, -1, 0);
5264 NEW = new int;
5265 NEW_ARR = new int[10];
5268 FAST_MODE_INIT(STACK);
5269 ANNOTATE_EXPECT_RACE(STACK, "real race on stack object");
5270 FAST_MODE_INIT(&GLOB);
5271 ANNOTATE_EXPECT_RACE(&GLOB, "real race on global object");
5272 FAST_MODE_INIT(&STATIC);
5273 ANNOTATE_EXPECT_RACE(&STATIC, "real race on a static global object");
5274 FAST_MODE_INIT(MALLOC);
5275 ANNOTATE_EXPECT_RACE(MALLOC, "real race on a malloc-ed object");
5276 FAST_MODE_INIT(CALLOC);
5277 ANNOTATE_EXPECT_RACE(CALLOC, "real race on a calloc-ed object");
5278 FAST_MODE_INIT(REALLOC);
5279 ANNOTATE_EXPECT_RACE(REALLOC, "real race on a realloc-ed object");
5280 FAST_MODE_INIT(VALLOC);
5281 ANNOTATE_EXPECT_RACE(VALLOC, "real race on a valloc-ed object");
5282 FAST_MODE_INIT(PVALLOC);
5283 ANNOTATE_EXPECT_RACE(PVALLOC, "real race on a pvalloc-ed object");
5284 FAST_MODE_INIT(MEMALIGN);
5285 ANNOTATE_EXPECT_RACE(MEMALIGN, "real race on a memalign-ed object");
5286 FAST_MODE_INIT(POSIX_MEMALIGN.pi);
5287 ANNOTATE_EXPECT_RACE(POSIX_MEMALIGN.pi, "real race on a posix_memalign-ed object");
5288 FAST_MODE_INIT(MMAP);
5289 ANNOTATE_EXPECT_RACE(MMAP, "real race on a mmap-ed object");
5291 FAST_MODE_INIT(NEW);
5292 ANNOTATE_EXPECT_RACE(NEW, "real race on a new-ed object");
5293 FAST_MODE_INIT(NEW_ARR);
5294 ANNOTATE_EXPECT_RACE(NEW_ARR, "real race on a new[]-ed object");
5296 MyThreadArray t(Worker, Worker, Worker);
5297 t.Start();
5298 t.Join();
5299 printf("test110: positive (race on a stack object)\n");
5300 printf("\tSTACK=%d\n", *STACK);
5301 CHECK(GLOB <= 3);
5302 CHECK(STATIC <= 3);
5304 free(MALLOC);
5305 free(CALLOC);
5306 free(REALLOC);
5307 free(VALLOC);
5308 free(PVALLOC);
5309 free(MEMALIGN);
5310 free(POSIX_MEMALIGN.pv);
5311 munmap(MMAP, sizeof(int));
5312 delete NEW;
5313 delete [] NEW_ARR;
5315 REGISTER_TEST(Run, 110)
5316 } // namespace test110
5319 // test111: TN. Unit test for a bug related to stack handling. {{{1
5320 namespace test111 {
5321 char *GLOB = 0;
5322 bool COND = false;
5323 Mutex mu;
5324 const int N = 3000;
5326 void write_to_p(char *p, int val) {
5327 for (int i = 0; i < N; i++)
5328 p[i] = val;
5331 static bool ArgIsTrue(bool *arg) {
5332 // printf("ArgIsTrue: %d tid=%d\n", *arg, (int)pthread_self());
5333 return *arg == true;
5336 void f1() {
5337 char some_stack[N];
5338 write_to_p(some_stack, 1);
5339 mu.LockWhen(Condition<bool>(&ArgIsTrue, &COND));
5340 mu.Unlock();
5343 void f2() {
5344 char some_stack[N];
5345 char some_more_stack[N];
5346 write_to_p(some_stack, 2);
5347 write_to_p(some_more_stack, 2);
5350 void f0() { f2(); }
5352 void Worker1() {
5353 f0();
5354 f1();
5355 f2();
5358 void Worker2() {
5359 usleep(100000);
5360 mu.Lock();
5361 COND = true;
5362 mu.Unlock();
5365 void Run() {
5366 printf("test111: regression test\n");
5367 MyThreadArray t(Worker1, Worker1, Worker2);
5368 // AnnotateSetVerbosity(__FILE__, __LINE__, 3);
5369 t.Start();
5370 t.Join();
5371 // AnnotateSetVerbosity(__FILE__, __LINE__, 1);
5373 REGISTER_TEST2(Run, 111, FEATURE)
5374 } // namespace test111
5376 // test112: STAB. Test for ANNOTATE_PUBLISH_MEMORY_RANGE{{{1
5377 namespace test112 {
5378 char *GLOB = 0;
5379 const int N = 64 * 5;
5380 Mutex mu;
5381 bool ready = false; // under mu
5382 int beg, end; // under mu
5384 Mutex mu1;
5386 void Worker() {
5388 bool is_ready = false;
5389 int b, e;
5390 while (!is_ready) {
5391 mu.Lock();
5392 is_ready = ready;
5393 b = beg;
5394 e = end;
5395 mu.Unlock();
5396 usleep(1000);
5399 mu1.Lock();
5400 for (int i = b; i < e; i++) {
5401 GLOB[i]++;
5403 mu1.Unlock();
5406 void PublishRange(int b, int e) {
5407 MyThreadArray t(Worker, Worker);
5408 ready = false; // runs before other threads
5409 t.Start();
5411 ANNOTATE_NEW_MEMORY(GLOB + b, e - b);
5412 ANNOTATE_TRACE_MEMORY(GLOB + b);
5413 for (int j = b; j < e; j++) {
5414 GLOB[j] = 0;
5416 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB + b, e - b);
5418 // hand off
5419 mu.Lock();
5420 ready = true;
5421 beg = b;
5422 end = e;
5423 mu.Unlock();
5425 t.Join();
5428 void Run() {
5429 printf("test112: stability (ANNOTATE_PUBLISH_MEMORY_RANGE)\n");
5430 GLOB = new char [N];
5432 PublishRange(0, 10);
5433 PublishRange(3, 5);
5435 PublishRange(12, 13);
5436 PublishRange(10, 14);
5438 PublishRange(15, 17);
5439 PublishRange(16, 18);
5441 // do few more random publishes.
5442 for (int i = 0; i < 20; i++) {
5443 const int begin = rand() % N;
5444 const int size = (rand() % (N - begin)) + 1;
5445 CHECK(size > 0);
5446 CHECK(begin + size <= N);
5447 PublishRange(begin, begin + size);
5450 printf("GLOB = %d\n", (int)GLOB[0]);
5452 REGISTER_TEST2(Run, 112, STABILITY)
5453 } // namespace test112
5456 // test113: PERF. A lot of lock/unlock calls. Many locks {{{1
5457 namespace test113 {
5458 const int kNumIter = 100000;
5459 const int kNumLocks = 7;
5460 Mutex MU[kNumLocks];
5461 void Run() {
5462 printf("test113: perf\n");
5463 for (int i = 0; i < kNumIter; i++ ) {
5464 for (int j = 0; j < kNumLocks; j++) {
5465 if (i & (1 << j)) MU[j].Lock();
5467 for (int j = kNumLocks - 1; j >= 0; j--) {
5468 if (i & (1 << j)) MU[j].Unlock();
5472 REGISTER_TEST(Run, 113)
5473 } // namespace test113
5476 // test114: STAB. Recursive lock. {{{1
5477 namespace test114 {
5478 int Bar() {
5479 static int bar = 1;
5480 return bar;
5482 int Foo() {
5483 static int foo = Bar();
5484 return foo;
5486 void Worker() {
5487 static int x = Foo();
5488 CHECK(x == 1);
5490 void Run() {
5491 printf("test114: stab\n");
5492 MyThreadArray t(Worker, Worker);
5493 t.Start();
5494 t.Join();
5496 REGISTER_TEST(Run, 114)
5497 } // namespace test114
5500 // test115: TN. sem_open. {{{1
5501 namespace test115 {
5502 int tid = 0;
5503 Mutex mu;
5504 const char *kSemName = "drt-test-sem";
5506 int GLOB = 0;
5508 sem_t *DoSemOpen() {
5509 // TODO: there is some race report inside sem_open
5510 // for which suppressions do not work... (???)
5511 ANNOTATE_IGNORE_WRITES_BEGIN();
5512 sem_t *sem = sem_open(kSemName, O_CREAT, 0600, 3);
5513 ANNOTATE_IGNORE_WRITES_END();
5514 return sem;
5517 void Worker() {
5518 mu.Lock();
5519 int my_tid = tid++;
5520 mu.Unlock();
5522 if (my_tid == 0) {
5523 GLOB = 1;
5526 // if the detector observes a happens-before arc between
5527 // sem_open and sem_wait, it will be silent.
5528 sem_t *sem = DoSemOpen();
5529 usleep(100000);
5530 CHECK(sem != SEM_FAILED);
5531 CHECK(sem_wait(sem) == 0);
5533 if (my_tid > 0) {
5534 CHECK(GLOB == 1);
5538 void Run() {
5539 printf("test115: stab (sem_open())\n");
5541 // just check that sem_open is not completely broken
5542 sem_unlink(kSemName);
5543 sem_t* sem = DoSemOpen();
5544 CHECK(sem != SEM_FAILED);
5545 CHECK(sem_wait(sem) == 0);
5546 sem_unlink(kSemName);
5548 // check that sem_open and sem_wait create a happens-before arc.
5549 MyThreadArray t(Worker, Worker, Worker);
5550 t.Start();
5551 t.Join();
5552 // clean up
5553 sem_unlink(kSemName);
5555 REGISTER_TEST(Run, 115)
5556 } // namespace test115
5559 // test116: TN. some operations with string<> objects. {{{1
5560 namespace test116 {
5562 void Worker() {
5563 string A[10], B[10], C[10];
5564 for (int i = 0; i < 1000; i++) {
5565 for (int j = 0; j < 10; j++) {
5566 string &a = A[j];
5567 string &b = B[j];
5568 string &c = C[j];
5569 a = "sdl;fkjhasdflksj df";
5570 b = "sdf sdf;ljsd ";
5571 c = "'sfdf df";
5572 c = b;
5573 a = c;
5574 b = a;
5575 swap(a,b);
5576 swap(b,c);
5578 for (int j = 0; j < 10; j++) {
5579 string &a = A[j];
5580 string &b = B[j];
5581 string &c = C[j];
5582 a.clear();
5583 b.clear();
5584 c.clear();
5589 void Run() {
5590 printf("test116: negative (strings)\n");
5591 MyThreadArray t(Worker, Worker, Worker);
5592 t.Start();
5593 t.Join();
5595 REGISTER_TEST2(Run, 116, FEATURE|EXCLUDE_FROM_ALL)
5596 } // namespace test116
5598 // test117: TN. Many calls to function-scope static init. {{{1
5599 namespace test117 {
5600 const int N = 50;
5602 int Foo() {
5603 usleep(20000);
5604 return 1;
5607 void Worker(void *a) {
5608 static int foo = Foo();
5609 CHECK(foo == 1);
5612 void Run() {
5613 printf("test117: negative\n");
5614 MyThread *t[N];
5615 for (int i = 0; i < N; i++) {
5616 t[i] = new MyThread(Worker);
5618 for (int i = 0; i < N; i++) {
5619 t[i]->Start();
5621 for (int i = 0; i < N; i++) {
5622 t[i]->Join();
5624 for (int i = 0; i < N; i++) delete t[i];
5626 REGISTER_TEST(Run, 117)
5627 } // namespace test117
5631 // test118 PERF: One signal, multiple waits. {{{1
5632 namespace test118 {
5633 int GLOB = 0;
5634 const int kNumIter = 2000000;
5635 void Signaller() {
5636 usleep(50000);
5637 ANNOTATE_CONDVAR_SIGNAL(&GLOB);
5639 void Waiter() {
5640 for (int i = 0; i < kNumIter; i++) {
5641 ANNOTATE_CONDVAR_WAIT(&GLOB);
5642 if (i == kNumIter / 2)
5643 usleep(100000);
5646 void Run() {
5647 printf("test118: perf\n");
5648 MyThreadArray t(Signaller, Waiter, Signaller, Waiter);
5649 t.Start();
5650 t.Join();
5651 printf("\tGLOB=%d\n", GLOB);
5653 REGISTER_TEST(Run, 118)
5654 } // namespace test118
5657 // test119: TP. Testing that malloc does not introduce any HB arc. {{{1
5658 namespace test119 {
5659 int GLOB = 0;
5660 void Worker1() {
5661 GLOB = 1;
5662 free(malloc(123));
5664 void Worker2() {
5665 usleep(100000);
5666 free(malloc(345));
5667 GLOB = 2;
5669 void Run() {
5670 printf("test119: positive (checking if malloc creates HB arcs)\n");
5671 FAST_MODE_INIT(&GLOB);
5672 if (!(Tsan_PureHappensBefore() && kMallocUsesMutex))
5673 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "true race");
5674 MyThreadArray t(Worker1, Worker2);
5675 t.Start();
5676 t.Join();
5677 printf("\tGLOB=%d\n", GLOB);
5679 REGISTER_TEST(Run, 119)
5680 } // namespace test119
5683 // test120: TP. Thread1: write then read. Thread2: read. {{{1
5684 namespace test120 {
5685 int GLOB = 0;
5687 void Thread1() {
5688 GLOB = 1; // write
5689 CHECK(GLOB); // read
5692 void Thread2() {
5693 usleep(100000);
5694 CHECK(GLOB >= 0); // read
5697 void Run() {
5698 FAST_MODE_INIT(&GLOB);
5699 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "TP (T1: write then read, T2: read)");
5700 printf("test120: positive\n");
5701 MyThreadArray t(Thread1, Thread2);
5702 GLOB = 1;
5703 t.Start();
5704 t.Join();
5705 printf("\tGLOB=%d\n", GLOB);
5707 REGISTER_TEST(Run, 120)
5708 } // namespace test120
5711 // test121: TP. Example of double-checked-locking {{{1
5712 namespace test121 {
5713 struct Foo {
5714 uintptr_t a, b[15];
5715 } __attribute__ ((aligned (64)));
5717 static Mutex mu;
5718 static Foo *foo;
5720 void InitMe() {
5721 if (!foo) {
5722 MutexLock lock(&mu);
5723 if (!foo) {
5724 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo, "test121. Double-checked locking (ptr)");
5725 foo = new Foo;
5726 if (!Tsan_FastMode())
5727 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo->a, "test121. Double-checked locking (obj)");
5728 foo->a = 42;
5733 void UseMe() {
5734 InitMe();
5735 CHECK(foo && foo->a == 42);
5738 void Worker1() { UseMe(); }
5739 void Worker2() { UseMe(); }
5740 void Worker3() { UseMe(); }
5743 void Run() {
5744 FAST_MODE_INIT(&foo);
5745 printf("test121: TP. Example of double-checked-locking\n");
5746 MyThreadArray t1(Worker1, Worker2, Worker3);
5747 t1.Start();
5748 t1.Join();
5749 delete foo;
5751 REGISTER_TEST(Run, 121)
5752 } // namespace test121
5754 // test122 TP: Simple test with RWLock {{{1
5755 namespace test122 {
5756 int VAR1 = 0;
5757 int VAR2 = 0;
5758 RWLock mu;
5760 void WriteWhileHoldingReaderLock(int *p) {
5761 usleep(100000);
5762 ReaderLockScoped lock(&mu); // Reader lock for writing. -- bug.
5763 (*p)++;
5766 void CorrectWrite(int *p) {
5767 WriterLockScoped lock(&mu);
5768 (*p)++;
5771 void Thread1() { WriteWhileHoldingReaderLock(&VAR1); }
5772 void Thread2() { CorrectWrite(&VAR1); }
5773 void Thread3() { CorrectWrite(&VAR2); }
5774 void Thread4() { WriteWhileHoldingReaderLock(&VAR2); }
5777 void Run() {
5778 printf("test122: positive (rw-lock)\n");
5779 VAR1 = 0;
5780 VAR2 = 0;
5781 ANNOTATE_TRACE_MEMORY(&VAR1);
5782 ANNOTATE_TRACE_MEMORY(&VAR2);
5783 if (!Tsan_PureHappensBefore()) {
5784 ANNOTATE_EXPECT_RACE_FOR_TSAN(&VAR1, "test122. TP. ReaderLock-ed while writing");
5785 ANNOTATE_EXPECT_RACE_FOR_TSAN(&VAR2, "test122. TP. ReaderLock-ed while writing");
5787 MyThreadArray t(Thread1, Thread2, Thread3, Thread4);
5788 t.Start();
5789 t.Join();
5791 REGISTER_TEST(Run, 122)
5792 } // namespace test122
5795 // test123 TP: accesses of different sizes. {{{1
5796 namespace test123 {
5798 union uint_union {
5799 uint64_t u64[1];
5800 uint32_t u32[2];
5801 uint16_t u16[4];
5802 uint8_t u8[8];
5805 uint_union MEM[8];
5807 // Q. Hey dude, why so many functions?
5808 // A. I need different stack traces for different accesses.
5810 void Wr64_0() { MEM[0].u64[0] = 1; }
5811 void Wr64_1() { MEM[1].u64[0] = 1; }
5812 void Wr64_2() { MEM[2].u64[0] = 1; }
5813 void Wr64_3() { MEM[3].u64[0] = 1; }
5814 void Wr64_4() { MEM[4].u64[0] = 1; }
5815 void Wr64_5() { MEM[5].u64[0] = 1; }
5816 void Wr64_6() { MEM[6].u64[0] = 1; }
5817 void Wr64_7() { MEM[7].u64[0] = 1; }
5819 void Wr32_0() { MEM[0].u32[0] = 1; }
5820 void Wr32_1() { MEM[1].u32[1] = 1; }
5821 void Wr32_2() { MEM[2].u32[0] = 1; }
5822 void Wr32_3() { MEM[3].u32[1] = 1; }
5823 void Wr32_4() { MEM[4].u32[0] = 1; }
5824 void Wr32_5() { MEM[5].u32[1] = 1; }
5825 void Wr32_6() { MEM[6].u32[0] = 1; }
5826 void Wr32_7() { MEM[7].u32[1] = 1; }
5828 void Wr16_0() { MEM[0].u16[0] = 1; }
5829 void Wr16_1() { MEM[1].u16[1] = 1; }
5830 void Wr16_2() { MEM[2].u16[2] = 1; }
5831 void Wr16_3() { MEM[3].u16[3] = 1; }
5832 void Wr16_4() { MEM[4].u16[0] = 1; }
5833 void Wr16_5() { MEM[5].u16[1] = 1; }
5834 void Wr16_6() { MEM[6].u16[2] = 1; }
5835 void Wr16_7() { MEM[7].u16[3] = 1; }
5837 void Wr8_0() { MEM[0].u8[0] = 1; }
5838 void Wr8_1() { MEM[1].u8[1] = 1; }
5839 void Wr8_2() { MEM[2].u8[2] = 1; }
5840 void Wr8_3() { MEM[3].u8[3] = 1; }
5841 void Wr8_4() { MEM[4].u8[4] = 1; }
5842 void Wr8_5() { MEM[5].u8[5] = 1; }
5843 void Wr8_6() { MEM[6].u8[6] = 1; }
5844 void Wr8_7() { MEM[7].u8[7] = 1; }
5846 void WriteAll64() {
5847 Wr64_0();
5848 Wr64_1();
5849 Wr64_2();
5850 Wr64_3();
5851 Wr64_4();
5852 Wr64_5();
5853 Wr64_6();
5854 Wr64_7();
5857 void WriteAll32() {
5858 Wr32_0();
5859 Wr32_1();
5860 Wr32_2();
5861 Wr32_3();
5862 Wr32_4();
5863 Wr32_5();
5864 Wr32_6();
5865 Wr32_7();
5868 void WriteAll16() {
5869 Wr16_0();
5870 Wr16_1();
5871 Wr16_2();
5872 Wr16_3();
5873 Wr16_4();
5874 Wr16_5();
5875 Wr16_6();
5876 Wr16_7();
5879 void WriteAll8() {
5880 Wr8_0();
5881 Wr8_1();
5882 Wr8_2();
5883 Wr8_3();
5884 Wr8_4();
5885 Wr8_5();
5886 Wr8_6();
5887 Wr8_7();
5890 void W00() { WriteAll64(); }
5891 void W01() { WriteAll64(); }
5892 void W02() { WriteAll64(); }
5894 void W10() { WriteAll32(); }
5895 void W11() { WriteAll32(); }
5896 void W12() { WriteAll32(); }
5898 void W20() { WriteAll16(); }
5899 void W21() { WriteAll16(); }
5900 void W22() { WriteAll16(); }
5902 void W30() { WriteAll8(); }
5903 void W31() { WriteAll8(); }
5904 void W32() { WriteAll8(); }
5906 typedef void (*F)(void);
5908 void TestTwoSizes(F f1, F f2) {
5909 // first f1, then f2
5910 ANNOTATE_NEW_MEMORY(&MEM, sizeof(MEM));
5911 memset(&MEM, 0, sizeof(MEM));
5912 MyThreadArray t1(f1, f2);
5913 t1.Start();
5914 t1.Join();
5915 // reverse order
5916 ANNOTATE_NEW_MEMORY(&MEM, sizeof(MEM));
5917 memset(&MEM, 0, sizeof(MEM));
5918 MyThreadArray t2(f2, f1);
5919 t2.Start();
5920 t2.Join();
5923 void Run() {
5924 printf("test123: positive (different sizes)\n");
5925 TestTwoSizes(W00, W10);
5926 // TestTwoSizes(W01, W20);
5927 // TestTwoSizes(W02, W30);
5928 // TestTwoSizes(W11, W21);
5929 // TestTwoSizes(W12, W31);
5930 // TestTwoSizes(W22, W32);
5933 REGISTER_TEST2(Run, 123, FEATURE|EXCLUDE_FROM_ALL)
5934 } // namespace test123
5937 // test124: What happens if we delete an unlocked lock? {{{1
5938 namespace test124 {
5939 // This test does not worg with pthreads (you can't call
5940 // pthread_mutex_destroy on a locked lock).
5941 int GLOB = 0;
5942 const int N = 1000;
5943 void Worker() {
5944 Mutex *a_large_local_array_of_mutexes;
5945 a_large_local_array_of_mutexes = new Mutex[N];
5946 for (int i = 0; i < N; i++) {
5947 a_large_local_array_of_mutexes[i].Lock();
5949 delete []a_large_local_array_of_mutexes;
5950 GLOB = 1;
5953 void Run() {
5954 printf("test124: negative\n");
5955 MyThreadArray t(Worker, Worker, Worker);
5956 t.Start();
5957 t.Join();
5958 printf("\tGLOB=%d\n", GLOB);
5960 REGISTER_TEST2(Run, 124, FEATURE|EXCLUDE_FROM_ALL)
5961 } // namespace test124
5964 // test125 TN: Backwards lock (annotated). {{{1
5965 namespace test125 {
5966 // This test uses "Backwards mutex" locking protocol.
5967 // We take a *reader* lock when writing to a per-thread data
5968 // (GLOB[thread_num]) and we take a *writer* lock when we
5969 // are reading from the entire array at once.
5971 // Such locking protocol is not understood by ThreadSanitizer's
5972 // hybrid state machine. So, you either have to use a pure-happens-before
5973 // detector ("tsan --pure-happens-before") or apply pure happens-before mode
5974 // to this particular lock by using ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu).
5976 const int n_threads = 3;
5977 RWLock mu;
5978 int GLOB[n_threads];
5980 int adder_num; // updated atomically.
5982 void Adder() {
5983 int my_num = AtomicIncrement(&adder_num, 1);
5985 ReaderLockScoped lock(&mu);
5986 GLOB[my_num]++;
5989 void Aggregator() {
5990 int sum = 0;
5992 WriterLockScoped lock(&mu);
5993 for (int i = 0; i < n_threads; i++) {
5994 sum += GLOB[i];
5997 printf("sum=%d\n", sum);
6000 void Run() {
6001 printf("test125: negative\n");
6003 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu);
6005 // run Adders, then Aggregator
6007 MyThreadArray t(Adder, Adder, Adder, Aggregator);
6008 t.Start();
6009 t.Join();
6012 // Run Aggregator first.
6013 adder_num = 0;
6015 MyThreadArray t(Aggregator, Adder, Adder, Adder);
6016 t.Start();
6017 t.Join();
6021 REGISTER_TEST(Run, 125)
6022 } // namespace test125
6024 // test126 TN: test for BlockingCounter {{{1
6025 namespace test126 {
6026 BlockingCounter *blocking_counter;
6027 int GLOB = 0;
6028 void Worker() {
6029 CHECK(blocking_counter);
6030 CHECK(GLOB == 0);
6031 blocking_counter->DecrementCount();
6033 void Run() {
6034 printf("test126: negative\n");
6035 MyThreadArray t(Worker, Worker, Worker);
6036 blocking_counter = new BlockingCounter(3);
6037 t.Start();
6038 blocking_counter->Wait();
6039 GLOB = 1;
6040 t.Join();
6041 printf("\tGLOB=%d\n", GLOB);
6043 REGISTER_TEST(Run, 126)
6044 } // namespace test126
6047 // test127. Bad code: unlocking a mutex locked by another thread. {{{1
6048 namespace test127 {
6049 Mutex mu;
6050 void Thread1() {
6051 mu.Lock();
6053 void Thread2() {
6054 usleep(100000);
6055 mu.Unlock();
6057 void Run() {
6058 printf("test127: unlocking a mutex locked by another thread.\n");
6059 MyThreadArray t(Thread1, Thread2);
6060 t.Start();
6061 t.Join();
6063 REGISTER_TEST(Run, 127)
6064 } // namespace test127
6066 // test128. Suppressed code in concurrent accesses {{{1
6067 // Please use --suppressions=unittest.supp flag when running this test.
6068 namespace test128 {
6069 Mutex mu;
6070 int GLOB = 0;
6071 void Worker() {
6072 usleep(100000);
6073 mu.Lock();
6074 GLOB++;
6075 mu.Unlock();
6077 void ThisFunctionShouldBeSuppressed() {
6078 GLOB++;
6080 void Run() {
6081 printf("test128: Suppressed code in concurrent accesses.\n");
6082 MyThreadArray t(Worker, ThisFunctionShouldBeSuppressed);
6083 t.Start();
6084 t.Join();
6086 REGISTER_TEST2(Run, 128, FEATURE | EXCLUDE_FROM_ALL)
6087 } // namespace test128
6089 // test129: TN. Synchronization via ReaderLockWhen(). {{{1
6090 namespace test129 {
6091 int GLOB = 0;
6092 Mutex MU;
6093 bool WeirdCondition(int* param) {
6094 *param = GLOB; // a write into Waiter's memory
6095 return GLOB > 0;
6097 void Waiter() {
6098 int param = 0;
6099 MU.ReaderLockWhen(Condition<int>(WeirdCondition, &param));
6100 MU.ReaderUnlock();
6101 CHECK(GLOB > 0);
6102 CHECK(param > 0);
6104 void Waker() {
6105 usleep(100000); // Make sure the waiter blocks.
6106 MU.Lock();
6107 GLOB++;
6108 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
6110 void Run() {
6111 printf("test129: Synchronization via ReaderLockWhen()\n");
6112 MyThread mt(Waiter, NULL, "Waiter Thread");
6113 mt.Start();
6114 Waker();
6115 mt.Join();
6116 printf("\tGLOB=%d\n", GLOB);
6118 REGISTER_TEST2(Run, 129, FEATURE);
6119 } // namespace test129
6121 // test130: TN. Per-thread. {{{1
6122 namespace test130 {
6123 #ifndef NO_TLS
6124 // This test verifies that the race detector handles
6125 // thread-local storage (TLS) correctly.
6126 // As of 09-03-30 ThreadSanitizer has a bug:
6127 // - Thread1 starts
6128 // - Thread1 touches per_thread_global
6129 // - Thread1 ends
6130 // - Thread2 starts (and there is no happens-before relation between it and
6131 // Thread1)
6132 // - Thread2 touches per_thread_global
6133 // It may happen so that Thread2 will have per_thread_global in the same address
6134 // as Thread1. Since there is no happens-before relation between threads,
6135 // ThreadSanitizer reports a race.
6137 // test131 does the same for stack.
6139 static __thread int per_thread_global[10] = {0};
6141 void RealWorker() { // Touch per_thread_global.
6142 per_thread_global[1]++;
6143 errno++;
6146 void Worker() { // Spawn few threads that touch per_thread_global.
6147 MyThreadArray t(RealWorker, RealWorker);
6148 t.Start();
6149 t.Join();
6151 void Worker0() { sleep(0); Worker(); }
6152 void Worker1() { sleep(1); Worker(); }
6153 void Worker2() { sleep(2); Worker(); }
6154 void Worker3() { sleep(3); Worker(); }
6156 void Run() {
6157 printf("test130: Per-thread\n");
6158 MyThreadArray t1(Worker0, Worker1, Worker2, Worker3);
6159 t1.Start();
6160 t1.Join();
6161 printf("\tper_thread_global=%d\n", per_thread_global[1]);
6163 REGISTER_TEST(Run, 130)
6164 #endif // NO_TLS
6165 } // namespace test130
6168 // test131: TN. Stack. {{{1
6169 namespace test131 {
6170 // Same as test130, but for stack.
6172 void RealWorker() { // Touch stack.
6173 int stack_var = 0;
6174 stack_var++;
6177 void Worker() { // Spawn few threads that touch stack.
6178 MyThreadArray t(RealWorker, RealWorker);
6179 t.Start();
6180 t.Join();
6182 void Worker0() { sleep(0); Worker(); }
6183 void Worker1() { sleep(1); Worker(); }
6184 void Worker2() { sleep(2); Worker(); }
6185 void Worker3() { sleep(3); Worker(); }
6187 void Run() {
6188 printf("test131: stack\n");
6189 MyThreadArray t(Worker0, Worker1, Worker2, Worker3);
6190 t.Start();
6191 t.Join();
6193 REGISTER_TEST(Run, 131)
6194 } // namespace test131
6197 // test132: TP. Simple race (write vs write). Works in fast-mode. {{{1
6198 namespace test132 {
6199 int GLOB = 0;
6200 void Worker() { GLOB = 1; }
6202 void Run1() {
6203 FAST_MODE_INIT(&GLOB);
6204 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test132");
6205 printf("test132: positive; &GLOB=%p\n", &GLOB);
6206 ANNOTATE_TRACE_MEMORY(&GLOB);
6207 GLOB = 7;
6208 MyThreadArray t(Worker, Worker);
6209 t.Start();
6210 t.Join();
6213 void Run() {
6214 Run1();
6216 REGISTER_TEST(Run, 132);
6217 } // namespace test132
6220 // test133: TP. Simple race (write vs write). Works in fast mode. {{{1
6221 namespace test133 {
6222 // Same as test132, but everything is run from a separate thread spawned from
6223 // the main thread.
6224 int GLOB = 0;
6225 void Worker() { GLOB = 1; }
6227 void Run1() {
6228 FAST_MODE_INIT(&GLOB);
6229 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test133");
6230 printf("test133: positive; &GLOB=%p\n", &GLOB);
6231 ANNOTATE_TRACE_MEMORY(&GLOB);
6232 GLOB = 7;
6233 MyThreadArray t(Worker, Worker);
6234 t.Start();
6235 t.Join();
6237 void Run() {
6238 MyThread t(Run1);
6239 t.Start();
6240 t.Join();
6242 REGISTER_TEST(Run, 133);
6243 } // namespace test133
6246 // test134 TN. Swap. Variant of test79. {{{1
6247 namespace test134 {
6248 #if 0
6249 typedef __gnu_cxx::hash_map<int, int> map_t;
6250 #else
6251 typedef std::map<int, int> map_t;
6252 #endif
6253 map_t map;
6254 Mutex mu;
6255 // Here we use swap to pass map between threads.
6256 // The synchronization is correct, but w/o the annotation
6257 // any hybrid detector will complain.
6259 // Swap is very unfriendly to the lock-set (and hybrid) race detectors.
6260 // Since tmp is destructed outside the mutex, we need to have a happens-before
6261 // arc between any prior access to map and here.
6262 // Since the internals of tmp are created ouside the mutex and are passed to
6263 // other thread, we need to have a h-b arc between here and any future access.
6264 // These arcs can be created by HAPPENS_{BEFORE,AFTER} annotations, but it is
6265 // much simpler to apply pure-happens-before mode to the mutex mu.
6266 void Swapper() {
6267 map_t tmp;
6268 MutexLock lock(&mu);
6269 ANNOTATE_HAPPENS_AFTER(&map);
6270 // We swap the new empty map 'tmp' with 'map'.
6271 map.swap(tmp);
6272 ANNOTATE_HAPPENS_BEFORE(&map);
6273 // tmp (which is the old version of map) is destroyed here.
6276 void Worker() {
6277 MutexLock lock(&mu);
6278 ANNOTATE_HAPPENS_AFTER(&map);
6279 map[1]++;
6280 ANNOTATE_HAPPENS_BEFORE(&map);
6283 void Run() {
6284 printf("test134: negative (swap)\n");
6285 // ********************** Shorter way: ***********************
6286 // ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu);
6287 MyThreadArray t(Worker, Worker, Swapper, Worker, Worker);
6288 t.Start();
6289 t.Join();
6291 REGISTER_TEST(Run, 134)
6292 } // namespace test134
6294 // test135 TN. Swap. Variant of test79. {{{1
6295 namespace test135 {
6297 void SubWorker() {
6298 const long SIZE = 65536;
6299 for (int i = 0; i < 32; i++) {
6300 int *ptr = (int*)mmap(NULL, SIZE, PROT_READ | PROT_WRITE,
6301 MAP_PRIVATE | MAP_ANON, -1, 0);
6302 *ptr = 42;
6303 munmap(ptr, SIZE);
6307 void Worker() {
6308 MyThreadArray t(SubWorker, SubWorker, SubWorker, SubWorker);
6309 t.Start();
6310 t.Join();
6313 void Run() {
6314 printf("test135: negative (mmap)\n");
6315 MyThreadArray t(Worker, Worker, Worker, Worker);
6316 t.Start();
6317 t.Join();
6319 REGISTER_TEST(Run, 135)
6320 } // namespace test135
6322 // test136. Unlock twice. {{{1
6323 namespace test136 {
6324 void Run() {
6325 printf("test136: unlock twice\n");
6326 pthread_mutexattr_t attr;
6327 CHECK(0 == pthread_mutexattr_init(&attr));
6328 CHECK(0 == pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
6330 pthread_mutex_t mu;
6331 CHECK(0 == pthread_mutex_init(&mu, &attr));
6332 CHECK(0 == pthread_mutex_lock(&mu));
6333 CHECK(0 == pthread_mutex_unlock(&mu));
6334 int ret_unlock = pthread_mutex_unlock(&mu); // unlocking twice.
6335 int ret_destroy = pthread_mutex_destroy(&mu);
6336 printf(" pthread_mutex_unlock returned %d\n", ret_unlock);
6337 printf(" pthread_mutex_destroy returned %d\n", ret_destroy);
6341 REGISTER_TEST(Run, 136)
6342 } // namespace test136
6344 // test137 TP. Races on stack variables. {{{1
6345 namespace test137 {
6346 int GLOB = 0;
6347 ProducerConsumerQueue q(10);
6349 void Worker() {
6350 int stack;
6351 int *tmp = (int*)q.Get();
6352 (*tmp)++;
6353 int *racey = &stack;
6354 q.Put(racey);
6355 (*racey)++;
6356 usleep(150000);
6357 // We may miss the races if we sleep less due to die_memory events...
6360 void Run() {
6361 int tmp = 0;
6362 printf("test137: TP. Races on stack variables.\n");
6363 q.Put(&tmp);
6364 MyThreadArray t(Worker, Worker, Worker, Worker);
6365 t.Start();
6366 t.Join();
6367 q.Get();
6370 REGISTER_TEST2(Run, 137, FEATURE | EXCLUDE_FROM_ALL)
6371 } // namespace test137
6373 // test138 FN. Two closures hit the same thread in ThreadPool. {{{1
6374 namespace test138 {
6375 int GLOB = 0;
6377 void Worker() {
6378 usleep(100000);
6379 GLOB++;
6382 void Run() {
6383 FAST_MODE_INIT(&GLOB);
6384 printf("test138: FN. Two closures hit the same thread in ThreadPool.\n");
6386 // When using thread pools, two concurrent callbacks might be scheduled
6387 // onto the same executor thread. As a result, unnecessary happens-before
6388 // relation may be introduced between callbacks.
6389 // If we set the number of executor threads to 1, any known data
6390 // race detector will be silent. However, the same situation may happen
6391 // with any number of executor threads (with some probability).
6392 ThreadPool tp(1);
6393 tp.StartWorkers();
6394 tp.Add(NewCallback(Worker));
6395 tp.Add(NewCallback(Worker));
6398 REGISTER_TEST2(Run, 138, FEATURE)
6399 } // namespace test138
6401 // test139: FN. A true race hidden by reference counting annotation. {{{1
6402 namespace test139 {
6403 int GLOB = 0;
6404 RefCountedClass *obj;
6406 void Worker1() {
6407 GLOB++; // First access.
6408 obj->Unref();
6411 void Worker2() {
6412 usleep(100000);
6413 obj->Unref();
6414 GLOB++; // Second access.
6417 void Run() {
6418 FAST_MODE_INIT(&GLOB);
6419 printf("test139: FN. A true race hidden by reference counting annotation.\n");
6421 obj = new RefCountedClass;
6422 obj->AnnotateUnref();
6423 obj->Ref();
6424 obj->Ref();
6425 MyThreadArray mt(Worker1, Worker2);
6426 mt.Start();
6427 mt.Join();
6430 REGISTER_TEST2(Run, 139, FEATURE)
6431 } // namespace test139
6433 // test140 TN. Swap. Variant of test79 and test134. {{{1
6434 namespace test140 {
6435 #if 0
6436 typedef __gnu_cxx::hash_map<int, int> Container;
6437 #else
6438 typedef std::map<int,int> Container;
6439 #endif
6440 Mutex mu;
6441 static Container container;
6443 // Here we use swap to pass a Container between threads.
6444 // The synchronization is correct, but w/o the annotation
6445 // any hybrid detector will complain.
6447 // Unlike the test134, we try to have a minimal set of annotations
6448 // so that extra h-b arcs do not hide other races.
6450 // Swap is very unfriendly to the lock-set (and hybrid) race detectors.
6451 // Since tmp is destructed outside the mutex, we need to have a happens-before
6452 // arc between any prior access to map and here.
6453 // Since the internals of tmp are created ouside the mutex and are passed to
6454 // other thread, we need to have a h-b arc between here and any future access.
6456 // We want to be able to annotate swapper so that we don't need to annotate
6457 // anything else.
6458 void Swapper() {
6459 Container tmp;
6460 tmp[1] = tmp[2] = tmp[3] = 0;
6462 MutexLock lock(&mu);
6463 container.swap(tmp);
6464 // we are unpublishing the old container.
6465 ANNOTATE_UNPUBLISH_MEMORY_RANGE(&container, sizeof(container));
6466 // we are publishing the new container.
6467 ANNOTATE_PUBLISH_MEMORY_RANGE(&container, sizeof(container));
6469 tmp[1]++;
6470 tmp[2]++;
6471 // tmp (which is the old version of container) is destroyed here.
6474 void Worker() {
6475 MutexLock lock(&mu);
6476 container[1]++;
6477 int *v = &container[2];
6478 for (int i = 0; i < 10; i++) {
6479 // if uncommented, this will break ANNOTATE_UNPUBLISH_MEMORY_RANGE():
6480 // ANNOTATE_HAPPENS_BEFORE(v);
6481 if (i % 3) {
6482 (*v)++;
6487 void Run() {
6488 printf("test140: negative (swap) %p\n", &container);
6489 MyThreadArray t(Worker, Worker, Swapper, Worker, Worker);
6490 t.Start();
6491 t.Join();
6493 REGISTER_TEST(Run, 140)
6494 } // namespace test140
6496 // test141 FP. unlink/fopen, rmdir/opendir. {{{1
6497 namespace test141 {
6498 int GLOB1 = 0,
6499 GLOB2 = 0;
6500 char *dir_name = NULL,
6501 *filename = NULL;
6503 void Waker1() {
6504 usleep(100000);
6505 GLOB1 = 1; // Write
6506 // unlink deletes a file 'filename'
6507 // which exits spin-loop in Waiter1().
6508 printf(" Deleting file...\n");
6509 CHECK(unlink(filename) == 0);
6512 void Waiter1() {
6513 FILE *tmp;
6514 while ((tmp = fopen(filename, "r")) != NULL) {
6515 fclose(tmp);
6516 usleep(10000);
6518 printf(" ...file has been deleted\n");
6519 GLOB1 = 2; // Write
6522 void Waker2() {
6523 usleep(100000);
6524 GLOB2 = 1; // Write
6525 // rmdir deletes a directory 'dir_name'
6526 // which exit spin-loop in Waker().
6527 printf(" Deleting directory...\n");
6528 CHECK(rmdir(dir_name) == 0);
6531 void Waiter2() {
6532 DIR *tmp;
6533 while ((tmp = opendir(dir_name)) != NULL) {
6534 closedir(tmp);
6535 usleep(10000);
6537 printf(" ...directory has been deleted\n");
6538 GLOB2 = 2;
6541 void Run() {
6542 FAST_MODE_INIT(&GLOB1);
6543 FAST_MODE_INIT(&GLOB2);
6544 printf("test141: FP. unlink/fopen, rmdir/opendir.\n");
6546 dir_name = strdup("/tmp/tsan-XXXXXX");
6547 IGNORE_RETURN_VALUE(mkdtemp(dir_name));
6549 filename = strdup((std::string() + dir_name + "/XXXXXX").c_str());
6550 const int fd = mkstemp(filename);
6551 CHECK(fd >= 0);
6552 close(fd);
6554 MyThreadArray mta1(Waker1, Waiter1);
6555 mta1.Start();
6556 mta1.Join();
6558 MyThreadArray mta2(Waker2, Waiter2);
6559 mta2.Start();
6560 mta2.Join();
6561 free(filename);
6562 filename = 0;
6563 free(dir_name);
6564 dir_name = 0;
6566 REGISTER_TEST(Run, 141)
6567 } // namespace test141
6570 // Simple FIFO queue annotated with PCQ annotations. {{{1
6571 class FifoMessageQueue {
6572 public:
6573 FifoMessageQueue() { ANNOTATE_PCQ_CREATE(this); }
6574 ~FifoMessageQueue() { ANNOTATE_PCQ_DESTROY(this); }
6575 // Send a message. 'message' should be positive.
6576 void Put(int message) {
6577 CHECK(message);
6578 MutexLock lock(&mu_);
6579 ANNOTATE_PCQ_PUT(this);
6580 q_.push(message);
6582 // Return the message from the queue and pop it
6583 // or return 0 if there are no messages.
6584 int Get() {
6585 MutexLock lock(&mu_);
6586 if (q_.empty()) return 0;
6587 int res = q_.front();
6588 q_.pop();
6589 ANNOTATE_PCQ_GET(this);
6590 return res;
6592 private:
6593 Mutex mu_;
6594 queue<int> q_;
6598 // test142: TN. Check PCQ_* annotations. {{{1
6599 namespace test142 {
6600 // Putter writes to array[i] and sends a message 'i'.
6601 // Getters receive messages and read array[message].
6602 // PCQ_* annotations calm down the hybrid detectors.
6604 const int N = 1000;
6605 int array[N+1];
6607 FifoMessageQueue q;
6609 void Putter() {
6610 for (int i = 1; i <= N; i++) {
6611 array[i] = i*i;
6612 q.Put(i);
6613 usleep(1000);
6617 void Getter() {
6618 int non_zero_received = 0;
6619 for (int i = 1; i <= N; i++) {
6620 int res = q.Get();
6621 if (res > 0) {
6622 CHECK(array[res] == res * res);
6623 non_zero_received++;
6625 usleep(1000);
6627 printf("T=%zd: non_zero_received=%d\n",
6628 (size_t)pthread_self(), non_zero_received);
6631 void Run() {
6632 printf("test142: tests PCQ annotations\n");
6633 MyThreadArray t(Putter, Getter, Getter);
6634 t.Start();
6635 t.Join();
6637 REGISTER_TEST(Run, 142)
6638 } // namespace test142
6641 // test143: TP. Check PCQ_* annotations. {{{1
6642 namespace test143 {
6643 // True positive.
6644 // We have a race on GLOB between Putter and one of the Getters.
6645 // Pure h-b will not see it.
6646 // If FifoMessageQueue was annotated using HAPPENS_BEFORE/AFTER, the race would
6647 // be missed too.
6648 // PCQ_* annotations do not hide this race.
6649 int GLOB = 0;
6651 FifoMessageQueue q;
6653 void Putter() {
6654 GLOB = 1;
6655 q.Put(1);
6658 void Getter() {
6659 usleep(10000);
6660 q.Get();
6661 CHECK(GLOB == 1); // Race here
6664 void Run() {
6665 q.Put(1);
6666 if (!Tsan_PureHappensBefore()) {
6667 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "true races");
6669 printf("test143: tests PCQ annotations (true positive)\n");
6670 MyThreadArray t(Putter, Getter, Getter);
6671 t.Start();
6672 t.Join();
6674 REGISTER_TEST(Run, 143);
6675 } // namespace test143
6680 // test300: {{{1
6681 namespace test300 {
6682 int GLOB = 0;
6683 void Run() {
6685 REGISTER_TEST2(Run, 300, RACE_DEMO)
6686 } // namespace test300
6688 // test301: Simple race. {{{1
6689 namespace test301 {
6690 Mutex mu1; // This Mutex guards var.
6691 Mutex mu2; // This Mutex is not related to var.
6692 int var; // GUARDED_BY(mu1)
6694 void Thread1() { // Runs in thread named 'test-thread-1'.
6695 MutexLock lock(&mu1); // Correct Mutex.
6696 var = 1;
6699 void Thread2() { // Runs in thread named 'test-thread-2'.
6700 MutexLock lock(&mu2); // Wrong Mutex.
6701 var = 2;
6704 void Run() {
6705 var = 0;
6706 printf("test301: simple race.\n");
6707 MyThread t1(Thread1, NULL, "test-thread-1");
6708 MyThread t2(Thread2, NULL, "test-thread-2");
6709 t1.Start();
6710 t2.Start();
6711 t1.Join();
6712 t2.Join();
6714 REGISTER_TEST2(Run, 301, RACE_DEMO)
6715 } // namespace test301
6717 // test302: Complex race which happens at least twice. {{{1
6718 namespace test302 {
6719 // In this test we have many different accesses to GLOB and only one access
6720 // is not synchronized properly.
6721 int GLOB = 0;
6723 Mutex MU1;
6724 Mutex MU2;
6725 void Worker() {
6726 for(int i = 0; i < 100; i++) {
6727 switch(i % 4) {
6728 case 0:
6729 // This read is protected correctly.
6730 MU1.Lock(); CHECK(GLOB >= 0); MU1.Unlock();
6731 break;
6732 case 1:
6733 // Here we used the wrong lock! The reason of the race is here.
6734 MU2.Lock(); CHECK(GLOB >= 0); MU2.Unlock();
6735 break;
6736 case 2:
6737 // This read is protected correctly.
6738 MU1.Lock(); CHECK(GLOB >= 0); MU1.Unlock();
6739 break;
6740 case 3:
6741 // This write is protected correctly.
6742 MU1.Lock(); GLOB++; MU1.Unlock();
6743 break;
6745 // sleep a bit so that the threads interleave
6746 // and the race happens at least twice.
6747 usleep(100);
6751 void Run() {
6752 printf("test302: Complex race that happens twice.\n");
6753 MyThread t1(Worker), t2(Worker);
6754 t1.Start();
6755 t2.Start();
6756 t1.Join(); t2.Join();
6758 REGISTER_TEST2(Run, 302, RACE_DEMO)
6759 } // namespace test302
6762 // test303: Need to trace the memory to understand the report. {{{1
6763 namespace test303 {
6764 int GLOB = 0;
6766 Mutex MU;
6767 void Worker1() { CHECK(GLOB >= 0); }
6768 void Worker2() { MU.Lock(); GLOB=1; MU.Unlock();}
6770 void Run() {
6771 printf("test303: a race that needs annotations.\n");
6772 ANNOTATE_TRACE_MEMORY(&GLOB);
6773 MyThreadArray t(Worker1, Worker2);
6774 t.Start();
6775 t.Join();
6777 REGISTER_TEST2(Run, 303, RACE_DEMO)
6778 } // namespace test303
6782 // test304: Can not trace the memory, since it is a library object. {{{1
6783 namespace test304 {
6784 string *STR;
6785 Mutex MU;
6787 void Worker1() {
6788 sleep(0);
6789 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6790 MU.Lock(); CHECK(STR->length() >= 4); MU.Unlock();
6792 void Worker2() {
6793 sleep(1);
6794 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6795 CHECK(STR->length() >= 4); // Unprotected!
6797 void Worker3() {
6798 sleep(2);
6799 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6800 MU.Lock(); CHECK(STR->length() >= 4); MU.Unlock();
6802 void Worker4() {
6803 sleep(3);
6804 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6805 MU.Lock(); *STR += " + a very very long string"; MU.Unlock();
6808 void Run() {
6809 STR = new string ("The String");
6810 printf("test304: a race where memory tracing does not work.\n");
6811 MyThreadArray t(Worker1, Worker2, Worker3, Worker4);
6812 t.Start();
6813 t.Join();
6815 printf("%s\n", STR->c_str());
6816 delete STR;
6818 REGISTER_TEST2(Run, 304, RACE_DEMO)
6819 } // namespace test304
6823 // test305: A bit more tricky: two locks used inconsistenly. {{{1
6824 namespace test305 {
6825 int GLOB = 0;
6827 // In this test GLOB is protected by MU1 and MU2, but inconsistently.
6828 // The TRACES observed by helgrind are:
6829 // TRACE[1]: Access{T2/S2 wr} -> new State{Mod; #LS=2; #SS=1; T2/S2}
6830 // TRACE[2]: Access{T4/S9 wr} -> new State{Mod; #LS=1; #SS=2; T2/S2, T4/S9}
6831 // TRACE[3]: Access{T5/S13 wr} -> new State{Mod; #LS=1; #SS=3; T2/S2, T4/S9, T5/S13}
6832 // TRACE[4]: Access{T6/S19 wr} -> new State{Mod; #LS=0; #SS=4; T2/S2, T4/S9, T5/S13, T6/S19}
6834 // The guilty access is either Worker2() or Worker4(), depending on
6835 // which mutex is supposed to protect GLOB.
6836 Mutex MU1;
6837 Mutex MU2;
6838 void Worker1() { MU1.Lock(); MU2.Lock(); GLOB=1; MU2.Unlock(); MU1.Unlock(); }
6839 void Worker2() { MU1.Lock(); GLOB=2; MU1.Unlock(); }
6840 void Worker3() { MU1.Lock(); MU2.Lock(); GLOB=3; MU2.Unlock(); MU1.Unlock(); }
6841 void Worker4() { MU2.Lock(); GLOB=4; MU2.Unlock(); }
6843 void Run() {
6844 ANNOTATE_TRACE_MEMORY(&GLOB);
6845 printf("test305: simple race.\n");
6846 MyThread t1(Worker1), t2(Worker2), t3(Worker3), t4(Worker4);
6847 t1.Start(); usleep(100);
6848 t2.Start(); usleep(100);
6849 t3.Start(); usleep(100);
6850 t4.Start(); usleep(100);
6851 t1.Join(); t2.Join(); t3.Join(); t4.Join();
6853 REGISTER_TEST2(Run, 305, RACE_DEMO)
6854 } // namespace test305
6856 // test306: Two locks are used to protect a var. {{{1
6857 namespace test306 {
6858 int GLOB = 0;
6859 // Thread1 and Thread2 access the var under two locks.
6860 // Thread3 uses no locks.
6862 Mutex MU1;
6863 Mutex MU2;
6864 void Worker1() { MU1.Lock(); MU2.Lock(); GLOB=1; MU2.Unlock(); MU1.Unlock(); }
6865 void Worker2() { MU1.Lock(); MU2.Lock(); GLOB=3; MU2.Unlock(); MU1.Unlock(); }
6866 void Worker3() { GLOB=4; }
6868 void Run() {
6869 ANNOTATE_TRACE_MEMORY(&GLOB);
6870 printf("test306: simple race.\n");
6871 MyThread t1(Worker1), t2(Worker2), t3(Worker3);
6872 t1.Start(); usleep(100);
6873 t2.Start(); usleep(100);
6874 t3.Start(); usleep(100);
6875 t1.Join(); t2.Join(); t3.Join();
6877 REGISTER_TEST2(Run, 306, RACE_DEMO)
6878 } // namespace test306
6880 // test307: Simple race, code with control flow {{{1
6881 namespace test307 {
6882 int *GLOB = 0;
6883 volatile /*to fake the compiler*/ bool some_condition = true;
6886 void SomeFunc() { }
6888 int FunctionWithControlFlow() {
6889 int unrelated_stuff = 0;
6890 unrelated_stuff++;
6891 SomeFunc(); // "--keep-history=1" will point somewhere here.
6892 if (some_condition) { // Or here
6893 if (some_condition) {
6894 unrelated_stuff++; // Or here.
6895 unrelated_stuff++;
6896 (*GLOB)++; // "--keep-history=2" will point here (experimental).
6899 usleep(100000);
6900 return unrelated_stuff;
6903 void Worker1() { FunctionWithControlFlow(); }
6904 void Worker2() { Worker1(); }
6905 void Worker3() { Worker2(); }
6906 void Worker4() { Worker3(); }
6908 void Run() {
6909 GLOB = new int;
6910 *GLOB = 1;
6911 printf("test307: simple race, code with control flow\n");
6912 MyThreadArray t1(Worker1, Worker2, Worker3, Worker4);
6913 t1.Start();
6914 t1.Join();
6916 REGISTER_TEST2(Run, 307, RACE_DEMO)
6917 } // namespace test307
6919 // test308: Example of double-checked-locking {{{1
6920 namespace test308 {
6921 struct Foo {
6922 int a;
6925 static int is_inited = 0;
6926 static Mutex lock;
6927 static Foo *foo;
6929 void InitMe() {
6930 if (!is_inited) {
6931 lock.Lock();
6932 if (!is_inited) {
6933 foo = new Foo;
6934 foo->a = 42;
6935 is_inited = 1;
6937 lock.Unlock();
6941 void UseMe() {
6942 InitMe();
6943 CHECK(foo && foo->a == 42);
6946 void Worker1() { UseMe(); }
6947 void Worker2() { UseMe(); }
6948 void Worker3() { UseMe(); }
6951 void Run() {
6952 ANNOTATE_TRACE_MEMORY(&is_inited);
6953 printf("test308: Example of double-checked-locking\n");
6954 MyThreadArray t1(Worker1, Worker2, Worker3);
6955 t1.Start();
6956 t1.Join();
6958 REGISTER_TEST2(Run, 308, RACE_DEMO)
6959 } // namespace test308
6961 // test309: Simple race on an STL object. {{{1
6962 namespace test309 {
6963 string GLOB;
6965 void Worker1() {
6966 GLOB="Thread1";
6968 void Worker2() {
6969 usleep(100000);
6970 GLOB="Booooooooooo";
6973 void Run() {
6974 printf("test309: simple race on an STL object.\n");
6975 MyThread t1(Worker1), t2(Worker2);
6976 t1.Start();
6977 t2.Start();
6978 t1.Join(); t2.Join();
6980 REGISTER_TEST2(Run, 309, RACE_DEMO)
6981 } // namespace test309
6983 // test310: One more simple race. {{{1
6984 namespace test310 {
6985 int *PTR = NULL; // GUARDED_BY(mu1)
6987 Mutex mu1; // Protects PTR.
6988 Mutex mu2; // Unrelated to PTR.
6989 Mutex mu3; // Unrelated to PTR.
6991 void Writer1() {
6992 MutexLock lock3(&mu3); // This lock is unrelated to PTR.
6993 MutexLock lock1(&mu1); // Protect PTR.
6994 *PTR = 1;
6997 void Writer2() {
6998 MutexLock lock2(&mu2); // This lock is unrelated to PTR.
6999 MutexLock lock1(&mu1); // Protect PTR.
7000 int some_unrelated_stuff = 0;
7001 if (some_unrelated_stuff == 0)
7002 some_unrelated_stuff++;
7003 *PTR = 2;
7007 void Reader() {
7008 MutexLock lock2(&mu2); // Oh, gosh, this is a wrong mutex!
7009 CHECK(*PTR <= 2);
7012 // Some functions to make the stack trace non-trivial.
7013 void DoWrite1() { Writer1(); }
7014 void Thread1() { DoWrite1(); }
7016 void DoWrite2() { Writer2(); }
7017 void Thread2() { DoWrite2(); }
7019 void DoRead() { Reader(); }
7020 void Thread3() { DoRead(); }
7022 void Run() {
7023 printf("test310: simple race.\n");
7024 PTR = new int;
7025 ANNOTATE_TRACE_MEMORY(PTR);
7026 *PTR = 0;
7027 MyThread t1(Thread1, NULL, "writer1"),
7028 t2(Thread2, NULL, "writer2"),
7029 t3(Thread3, NULL, "buggy reader");
7030 t1.Start();
7031 t2.Start();
7032 usleep(100000); // Let the writers go first.
7033 t3.Start();
7035 t1.Join();
7036 t2.Join();
7037 t3.Join();
7039 REGISTER_TEST2(Run, 310, RACE_DEMO)
7040 } // namespace test310
7042 // test311: Yet another simple race. {{{1
7043 namespace test311 {
7044 int *PTR = NULL; // GUARDED_BY(mu1)
7046 Mutex mu1; // Protects PTR.
7047 Mutex mu2; // Unrelated to PTR.
7048 Mutex mu3; // Unrelated to PTR.
7050 void GoodWriter1() {
7051 MutexLock lock3(&mu3); // This lock is unrelated to PTR.
7052 MutexLock lock1(&mu1); // Protect PTR.
7053 *PTR = 1;
7056 void GoodWriter2() {
7057 MutexLock lock2(&mu2); // This lock is unrelated to PTR.
7058 MutexLock lock1(&mu1); // Protect PTR.
7059 *PTR = 2;
7062 void GoodReader() {
7063 MutexLock lock1(&mu1); // Protect PTR.
7064 CHECK(*PTR >= 0);
7067 void BuggyWriter() {
7068 MutexLock lock2(&mu2); // Wrong mutex!
7069 *PTR = 3;
7072 // Some functions to make the stack trace non-trivial.
7073 void DoWrite1() { GoodWriter1(); }
7074 void Thread1() { DoWrite1(); }
7076 void DoWrite2() { GoodWriter2(); }
7077 void Thread2() { DoWrite2(); }
7079 void DoGoodRead() { GoodReader(); }
7080 void Thread3() { DoGoodRead(); }
7082 void DoBadWrite() { BuggyWriter(); }
7083 void Thread4() { DoBadWrite(); }
7085 void Run() {
7086 printf("test311: simple race.\n");
7087 PTR = new int;
7088 ANNOTATE_TRACE_MEMORY(PTR);
7089 *PTR = 0;
7090 MyThread t1(Thread1, NULL, "good writer1"),
7091 t2(Thread2, NULL, "good writer2"),
7092 t3(Thread3, NULL, "good reader"),
7093 t4(Thread4, NULL, "buggy writer");
7094 t1.Start();
7095 t3.Start();
7096 // t2 goes after t3. This way a pure happens-before detector has no chance.
7097 usleep(10000);
7098 t2.Start();
7099 usleep(100000); // Let the good folks go first.
7100 t4.Start();
7102 t1.Join();
7103 t2.Join();
7104 t3.Join();
7105 t4.Join();
7107 REGISTER_TEST2(Run, 311, RACE_DEMO)
7108 } // namespace test311
7110 // test312: A test with a very deep stack. {{{1
7111 namespace test312 {
7112 int GLOB = 0;
7113 void RaceyWrite() { GLOB++; }
7114 void Func1() { RaceyWrite(); }
7115 void Func2() { Func1(); }
7116 void Func3() { Func2(); }
7117 void Func4() { Func3(); }
7118 void Func5() { Func4(); }
7119 void Func6() { Func5(); }
7120 void Func7() { Func6(); }
7121 void Func8() { Func7(); }
7122 void Func9() { Func8(); }
7123 void Func10() { Func9(); }
7124 void Func11() { Func10(); }
7125 void Func12() { Func11(); }
7126 void Func13() { Func12(); }
7127 void Func14() { Func13(); }
7128 void Func15() { Func14(); }
7129 void Func16() { Func15(); }
7130 void Func17() { Func16(); }
7131 void Func18() { Func17(); }
7132 void Func19() { Func18(); }
7133 void Worker() { Func19(); }
7134 void Run() {
7135 printf("test312: simple race with deep stack.\n");
7136 MyThreadArray t(Worker, Worker, Worker);
7137 t.Start();
7138 t.Join();
7140 REGISTER_TEST2(Run, 312, RACE_DEMO)
7141 } // namespace test312
7143 // test313 TP: test for thread graph output {{{1
7144 namespace test313 {
7145 BlockingCounter *blocking_counter;
7146 int GLOB = 0;
7148 // Worker(N) will do 2^N increments of GLOB, each increment in a separate thread
7149 void Worker(long depth) {
7150 CHECK(depth >= 0);
7151 if (depth > 0) {
7152 ThreadPool pool(2);
7153 pool.StartWorkers();
7154 pool.Add(NewCallback(Worker, depth-1));
7155 pool.Add(NewCallback(Worker, depth-1));
7156 } else {
7157 GLOB++; // Race here
7160 void Run() {
7161 printf("test313: positive\n");
7162 Worker(4);
7163 printf("\tGLOB=%d\n", GLOB);
7165 REGISTER_TEST2(Run, 313, RACE_DEMO)
7166 } // namespace test313
7170 // test400: Demo of a simple false positive. {{{1
7171 namespace test400 {
7172 static Mutex mu;
7173 static vector<int> *vec; // GUARDED_BY(mu);
7175 void InitAllBeforeStartingThreads() {
7176 vec = new vector<int>;
7177 vec->push_back(1);
7178 vec->push_back(2);
7181 void Thread1() {
7182 MutexLock lock(&mu);
7183 vec->pop_back();
7186 void Thread2() {
7187 MutexLock lock(&mu);
7188 vec->pop_back();
7191 //---- Sub-optimal code ---------
7192 size_t NumberOfElementsLeft() {
7193 MutexLock lock(&mu);
7194 return vec->size();
7197 void WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly() {
7198 while(NumberOfElementsLeft()) {
7199 ; // sleep or print or do nothing.
7201 // It is now safe to access vec w/o lock.
7202 // But a hybrid detector (like ThreadSanitizer) can't see it.
7203 // Solutions:
7204 // 1. Use pure happens-before detector (e.g. "tsan --pure-happens-before")
7205 // 2. Call ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu)
7206 // in InitAllBeforeStartingThreads()
7207 // 3. (preferred) Use WaitForAllThreadsToFinish_Good() (see below).
7208 CHECK(vec->empty());
7209 delete vec;
7212 //----- Better code -----------
7214 bool NoElementsLeft(vector<int> *v) {
7215 return v->empty();
7218 void WaitForAllThreadsToFinish_Good() {
7219 mu.LockWhen(Condition<vector<int> >(NoElementsLeft, vec));
7220 mu.Unlock();
7222 // It is now safe to access vec w/o lock.
7223 CHECK(vec->empty());
7224 delete vec;
7228 void Run() {
7229 MyThreadArray t(Thread1, Thread2);
7230 InitAllBeforeStartingThreads();
7231 t.Start();
7232 WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly();
7233 // WaitForAllThreadsToFinish_Good();
7234 t.Join();
7236 REGISTER_TEST2(Run, 400, RACE_DEMO)
7237 } // namespace test400
7239 // test401: Demo of false positive caused by reference counting. {{{1
7240 namespace test401 {
7241 // A simplified example of reference counting.
7242 // DecRef() does ref count increment in a way unfriendly to race detectors.
7243 // DecRefAnnotated() does the same in a friendly way.
7245 static vector<int> *vec;
7246 static int ref_count;
7248 void InitAllBeforeStartingThreads(int number_of_threads) {
7249 vec = new vector<int>;
7250 vec->push_back(1);
7251 ref_count = number_of_threads;
7254 // Correct, but unfriendly to race detectors.
7255 int DecRef() {
7256 return AtomicIncrement(&ref_count, -1);
7259 // Correct and friendly to race detectors.
7260 int DecRefAnnotated() {
7261 ANNOTATE_CONDVAR_SIGNAL(&ref_count);
7262 int res = AtomicIncrement(&ref_count, -1);
7263 if (res == 0) {
7264 ANNOTATE_CONDVAR_WAIT(&ref_count);
7266 return res;
7269 void ThreadWorker() {
7270 CHECK(ref_count > 0);
7271 CHECK(vec->size() == 1);
7272 if (DecRef() == 0) { // Use DecRefAnnotated() instead!
7273 // No one uses vec now ==> delete it.
7274 delete vec; // A false race may be reported here.
7275 vec = NULL;
7279 void Run() {
7280 MyThreadArray t(ThreadWorker, ThreadWorker, ThreadWorker);
7281 InitAllBeforeStartingThreads(3 /*number of threads*/);
7282 t.Start();
7283 t.Join();
7284 CHECK(vec == 0);
7286 REGISTER_TEST2(Run, 401, RACE_DEMO)
7287 } // namespace test401
7289 // test501: Manually call PRINT_* annotations {{{1
7290 namespace test501 {
7291 int COUNTER = 0;
7292 int GLOB = 0;
7293 Mutex muCounter, muGlob[65];
7295 void Worker() {
7296 muCounter.Lock();
7297 int myId = ++COUNTER;
7298 muCounter.Unlock();
7300 usleep(100);
7302 muGlob[myId].Lock();
7303 muGlob[0].Lock();
7304 GLOB++;
7305 muGlob[0].Unlock();
7306 muGlob[myId].Unlock();
7309 void Worker_1() {
7310 MyThreadArray ta (Worker, Worker, Worker, Worker);
7311 ta.Start();
7312 usleep(500000);
7313 ta.Join ();
7316 void Worker_2() {
7317 MyThreadArray ta (Worker_1, Worker_1, Worker_1, Worker_1);
7318 ta.Start();
7319 usleep(300000);
7320 ta.Join ();
7323 void Run() {
7324 ANNOTATE_RESET_STATS();
7325 printf("test501: Manually call PRINT_* annotations.\n");
7326 MyThreadArray ta (Worker_2, Worker_2, Worker_2, Worker_2);
7327 ta.Start();
7328 usleep(100000);
7329 ta.Join ();
7330 ANNOTATE_PRINT_MEMORY_USAGE(0);
7331 ANNOTATE_PRINT_STATS();
7334 REGISTER_TEST2(Run, 501, FEATURE | EXCLUDE_FROM_ALL)
7335 } // namespace test501
7337 // test502: produce lots of segments without cross-thread relations {{{1
7338 namespace test502 {
7341 * This test produces ~1Gb of memory usage when run with the following options:
7343 * --tool=helgrind
7344 * --trace-after-race=0
7345 * --num-callers=2
7346 * --more-context=no
7349 Mutex MU;
7350 int GLOB = 0;
7352 void TP() {
7353 for (int i = 0; i < 750000; i++) {
7354 MU.Lock();
7355 GLOB++;
7356 MU.Unlock();
7360 void Run() {
7361 MyThreadArray t(TP, TP);
7362 printf("test502: produce lots of segments without cross-thread relations\n");
7364 t.Start();
7365 t.Join();
7368 REGISTER_TEST2(Run, 502, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL
7369 | PERFORMANCE)
7370 } // namespace test502
7372 // test503: produce lots of segments with simple HB-relations {{{1
7373 // HB cache-miss rate is ~55%
7374 namespace test503 {
7376 // |- | | | | |
7377 // | \| | | | |
7378 // | |- | | | |
7379 // | | \| | | |
7380 // | | |- | | |
7381 // | | | \| | |
7382 // | | | |- | |
7383 // | | | | \| |
7384 // | | | | |- |
7385 // | | | | | \|
7386 // | | | | | |----
7387 //->| | | | | |
7388 // |- | | | | |
7389 // | \| | | | |
7390 // ...
7392 const int N_threads = 32;
7393 const int ARRAY_SIZE = 128;
7394 int GLOB[ARRAY_SIZE];
7395 ProducerConsumerQueue *Q[N_threads];
7396 int GLOB_limit = 100000;
7397 int count = -1;
7399 void Worker(){
7400 int myId = AtomicIncrement(&count, 1);
7402 ProducerConsumerQueue &myQ = *Q[myId], &nextQ = *Q[(myId+1) % N_threads];
7404 // this code produces a new SS with each new segment
7405 while (myQ.Get() != NULL) {
7406 for (int i = 0; i < ARRAY_SIZE; i++)
7407 GLOB[i]++;
7409 if (myId == 0 && GLOB[0] > GLOB_limit) {
7410 // Stop all threads
7411 for (int i = 0; i < N_threads; i++)
7412 Q[i]->Put(NULL);
7413 } else
7414 nextQ.Put(GLOB);
7418 void Run() {
7419 printf("test503: produce lots of segments with simple HB-relations\n");
7420 for (int i = 0; i < N_threads; i++)
7421 Q[i] = new ProducerConsumerQueue(1);
7422 Q[0]->Put(GLOB);
7425 ThreadPool pool(N_threads);
7426 pool.StartWorkers();
7427 for (int i = 0; i < N_threads; i++) {
7428 pool.Add(NewCallback(Worker));
7430 } // all folks are joined here.
7432 for (int i = 0; i < N_threads; i++)
7433 delete Q[i];
7436 REGISTER_TEST2(Run, 503, MEMORY_USAGE | PRINT_STATS
7437 | PERFORMANCE | EXCLUDE_FROM_ALL)
7438 } // namespace test503
7440 // test504: force massive cache fetch-wback (50% misses, mostly CacheLineZ) {{{1
7441 namespace test504 {
7443 const int N_THREADS = 2,
7444 HG_CACHELINE_COUNT = 1 << 16,
7445 HG_CACHELINE_SIZE = 1 << 6,
7446 HG_CACHE_SIZE = HG_CACHELINE_COUNT * HG_CACHELINE_SIZE;
7448 // int gives us ~4x speed of the byte test
7449 // 4x array size gives us
7450 // total multiplier of 16x over the cachesize
7451 // so we can neglect the cached-at-the-end memory
7452 const int ARRAY_SIZE = 4 * HG_CACHE_SIZE,
7453 ITERATIONS = 30;
7454 int array[ARRAY_SIZE];
7456 int count = 0;
7457 Mutex count_mu;
7459 void Worker() {
7460 count_mu.Lock();
7461 int myId = ++count;
7462 count_mu.Unlock();
7464 // all threads write to different memory locations,
7465 // so no synchronization mechanisms are needed
7466 int lower_bound = ARRAY_SIZE * (myId-1) / N_THREADS,
7467 upper_bound = ARRAY_SIZE * ( myId ) / N_THREADS;
7468 for (int j = 0; j < ITERATIONS; j++)
7469 for (int i = lower_bound; i < upper_bound;
7470 i += HG_CACHELINE_SIZE / sizeof(array[0])) {
7471 array[i] = i; // each array-write generates a cache miss
7475 void Run() {
7476 printf("test504: force massive CacheLineZ fetch-wback\n");
7477 MyThreadArray t(Worker, Worker);
7478 t.Start();
7479 t.Join();
7482 REGISTER_TEST2(Run, 504, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL)
7483 } // namespace test504
7485 // test505: force massive cache fetch-wback (60% misses) {{{1
7486 // modification of test504 - more threads, byte accesses and lots of mutexes
7487 // so it produces lots of CacheLineF misses (30-50% of CacheLineZ misses)
7488 namespace test505 {
7490 const int N_THREADS = 2,
7491 HG_CACHELINE_COUNT = 1 << 16,
7492 HG_CACHELINE_SIZE = 1 << 6,
7493 HG_CACHE_SIZE = HG_CACHELINE_COUNT * HG_CACHELINE_SIZE;
7495 const int ARRAY_SIZE = 4 * HG_CACHE_SIZE,
7496 ITERATIONS = 3;
7497 int64_t array[ARRAY_SIZE];
7499 int count = 0;
7500 Mutex count_mu;
7502 void Worker() {
7503 const int N_MUTEXES = 5;
7504 Mutex mu[N_MUTEXES];
7505 count_mu.Lock();
7506 int myId = ++count;
7507 count_mu.Unlock();
7509 // all threads write to different memory locations,
7510 // so no synchronization mechanisms are needed
7511 int lower_bound = ARRAY_SIZE * (myId-1) / N_THREADS,
7512 upper_bound = ARRAY_SIZE * ( myId ) / N_THREADS;
7513 for (int j = 0; j < ITERATIONS; j++)
7514 for (int mutex_id = 0; mutex_id < N_MUTEXES; mutex_id++) {
7515 Mutex *m = & mu[mutex_id];
7516 m->Lock();
7517 for (int i = lower_bound + mutex_id, cnt = 0;
7518 i < upper_bound;
7519 i += HG_CACHELINE_SIZE / sizeof(array[0]), cnt++) {
7520 array[i] = i; // each array-write generates a cache miss
7522 m->Unlock();
7526 void Run() {
7527 printf("test505: force massive CacheLineF fetch-wback\n");
7528 MyThreadArray t(Worker, Worker);
7529 t.Start();
7530 t.Join();
7533 REGISTER_TEST2(Run, 505, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL)
7534 } // namespace test505
7536 // test506: massive HB's using Barriers {{{1
7537 // HB cache miss is ~40%
7538 // segments consume 10x more memory than SSs
7539 // modification of test39
7540 namespace test506 {
7541 #ifndef NO_BARRIER
7542 // Same as test17 but uses Barrier class (pthread_barrier_t).
7543 int GLOB = 0;
7544 const int N_threads = 64,
7545 ITERATIONS = 1000;
7546 Barrier *barrier[ITERATIONS];
7547 Mutex MU;
7549 void Worker() {
7550 for (int i = 0; i < ITERATIONS; i++) {
7551 MU.Lock();
7552 GLOB++;
7553 MU.Unlock();
7554 barrier[i]->Block();
7557 void Run() {
7558 printf("test506: massive HB's using Barriers\n");
7559 for (int i = 0; i < ITERATIONS; i++) {
7560 barrier[i] = new Barrier(N_threads);
7563 ThreadPool pool(N_threads);
7564 pool.StartWorkers();
7565 for (int i = 0; i < N_threads; i++) {
7566 pool.Add(NewCallback(Worker));
7568 } // all folks are joined here.
7569 CHECK(GLOB == N_threads * ITERATIONS);
7570 for (int i = 0; i < ITERATIONS; i++) {
7571 delete barrier[i];
7574 REGISTER_TEST2(Run, 506, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL);
7575 #endif // NO_BARRIER
7576 } // namespace test506
7578 // test507: vgHelgrind_initIterAtFM/stackClear benchmark {{{1
7579 // vgHelgrind_initIterAtFM/stackClear consume ~8.5%/5.5% CPU
7580 namespace test507 {
7581 const int N_THREADS = 1,
7582 BUFFER_SIZE = 1,
7583 ITERATIONS = 1 << 20;
7585 void Foo() {
7586 struct T {
7587 char temp;
7588 T() {
7589 ANNOTATE_RWLOCK_CREATE(&temp);
7591 ~T() {
7592 ANNOTATE_RWLOCK_DESTROY(&temp);
7594 } s[BUFFER_SIZE];
7595 s->temp = '\0';
7598 void Worker() {
7599 for (int j = 0; j < ITERATIONS; j++) {
7600 Foo();
7604 void Run() {
7605 printf("test507: vgHelgrind_initIterAtFM/stackClear benchmark\n");
7607 ThreadPool pool(N_THREADS);
7608 pool.StartWorkers();
7609 for (int i = 0; i < N_THREADS; i++) {
7610 pool.Add(NewCallback(Worker));
7612 } // all folks are joined here.
7614 REGISTER_TEST2(Run, 507, EXCLUDE_FROM_ALL);
7615 } // namespace test507
7617 // test508: cmp_WordVecs_for_FM benchmark {{{1
7618 // 50+% of CPU consumption by cmp_WordVecs_for_FM
7619 namespace test508 {
7620 const int N_THREADS = 1,
7621 BUFFER_SIZE = 1 << 10,
7622 ITERATIONS = 1 << 9;
7624 void Foo() {
7625 struct T {
7626 char temp;
7627 T() {
7628 ANNOTATE_RWLOCK_CREATE(&temp);
7630 ~T() {
7631 ANNOTATE_RWLOCK_DESTROY(&temp);
7633 } s[BUFFER_SIZE];
7634 s->temp = '\0';
7637 void Worker() {
7638 for (int j = 0; j < ITERATIONS; j++) {
7639 Foo();
7643 void Run() {
7644 printf("test508: cmp_WordVecs_for_FM benchmark\n");
7646 ThreadPool pool(N_THREADS);
7647 pool.StartWorkers();
7648 for (int i = 0; i < N_THREADS; i++) {
7649 pool.Add(NewCallback(Worker));
7651 } // all folks are joined here.
7653 REGISTER_TEST2(Run, 508, EXCLUDE_FROM_ALL);
7654 } // namespace test508
7656 // test509: avl_find_node benchmark {{{1
7657 // 10+% of CPU consumption by avl_find_node
7658 namespace test509 {
7659 const int N_THREADS = 16,
7660 ITERATIONS = 1 << 8;
7662 void Worker() {
7663 std::vector<Mutex*> mu_list;
7664 for (int i = 0; i < ITERATIONS; i++) {
7665 Mutex * mu = new Mutex();
7666 mu_list.push_back(mu);
7667 mu->Lock();
7669 for (int i = ITERATIONS - 1; i >= 0; i--) {
7670 Mutex * mu = mu_list[i];
7671 mu->Unlock();
7672 delete mu;
7676 void Run() {
7677 printf("test509: avl_find_node benchmark\n");
7679 ThreadPool pool(N_THREADS);
7680 pool.StartWorkers();
7681 for (int i = 0; i < N_THREADS; i++) {
7682 pool.Add(NewCallback(Worker));
7684 } // all folks are joined here.
7686 REGISTER_TEST2(Run, 509, EXCLUDE_FROM_ALL);
7687 } // namespace test509
7689 // test510: SS-recycle test {{{1
7690 // this tests shows the case where only ~1% of SS are recycled
7691 namespace test510 {
7692 const int N_THREADS = 16,
7693 ITERATIONS = 1 << 10;
7694 int GLOB = 0;
7696 void Worker() {
7697 usleep(100000);
7698 for (int i = 0; i < ITERATIONS; i++) {
7699 ANNOTATE_CONDVAR_SIGNAL((void*)0xDeadBeef);
7700 GLOB++;
7701 usleep(10);
7705 void Run() {
7706 //ANNOTATE_BENIGN_RACE(&GLOB, "Test");
7707 printf("test510: SS-recycle test\n");
7709 ThreadPool pool(N_THREADS);
7710 pool.StartWorkers();
7711 for (int i = 0; i < N_THREADS; i++) {
7712 pool.Add(NewCallback(Worker));
7714 } // all folks are joined here.
7716 REGISTER_TEST2(Run, 510, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL);
7717 } // namespace test510
7719 // test511: Segment refcounting test ('1' refcounting) {{{1
7720 namespace test511 {
7721 int GLOB = 0;
7723 void Run () {
7724 for (int i = 0; i < 300; i++) {
7725 ANNOTATE_CONDVAR_SIGNAL(&GLOB);
7726 usleep(1000);
7727 GLOB++;
7728 ANNOTATE_CONDVAR_WAIT(&GLOB);
7729 if (i % 100 == 0)
7730 ANNOTATE_PRINT_MEMORY_USAGE(0);
7733 REGISTER_TEST2(Run, 511, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL);
7734 } // namespace test511
7736 // test512: Segment refcounting test ('S' refcounting) {{{1
7737 namespace test512 {
7738 int GLOB = 0;
7739 sem_t SEM;
7741 void Run () {
7742 sem_init(&SEM, 0, 0);
7743 for (int i = 0; i < 300; i++) {
7744 sem_post(&SEM);
7745 usleep(1000);
7746 GLOB++;
7747 sem_wait(&SEM);
7748 /*if (i % 100 == 0)
7749 ANNOTATE_PRINT_MEMORY_USAGE(0);*/
7751 sem_destroy(&SEM);
7753 REGISTER_TEST2(Run, 512, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL);
7754 } // namespace test512
7756 // test513: --fast-mode benchmark {{{1
7757 namespace test513 {
7759 const int N_THREADS = 2,
7760 HG_CACHELINE_SIZE = 1 << 6,
7761 ARRAY_SIZE = HG_CACHELINE_SIZE * 512,
7762 MUTEX_ID_BITS = 8;
7763 // MUTEX_ID_MASK = (1 << MUTEX_ID_BITS) - 1;
7765 // Each thread has its own cacheline and tackles with it intensively
7766 const int ITERATIONS = 1024;
7767 int array[N_THREADS][ARRAY_SIZE];
7769 int count = 0;
7770 Mutex count_mu;
7771 Mutex mutex_arr[N_THREADS][MUTEX_ID_BITS];
7773 void Worker() {
7774 count_mu.Lock();
7775 int myId = count++;
7776 count_mu.Unlock();
7778 // all threads write to different memory locations
7779 for (int j = 0; j < ITERATIONS; j++) {
7780 int mutex_mask = j & MUTEX_ID_BITS;
7781 for (int m = 0; m < MUTEX_ID_BITS; m++)
7782 if (mutex_mask & (1 << m))
7783 mutex_arr[myId][m].Lock();
7785 for (int i = 0; i < ARRAY_SIZE; i++) {
7786 array[myId][i] = i;
7789 for (int m = 0; m < MUTEX_ID_BITS; m++)
7790 if (mutex_mask & (1 << m))
7791 mutex_arr[myId][m].Unlock();
7795 void Run() {
7796 printf("test513: --fast-mode benchmark\n");
7798 ThreadPool pool(N_THREADS);
7799 pool.StartWorkers();
7800 for (int i = 0; i < N_THREADS; i++) {
7801 pool.Add(NewCallback(Worker));
7803 } // all folks are joined here.
7806 REGISTER_TEST2(Run, 513, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL)
7807 } // namespace test513
7809 // End {{{1
7810 // vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=marker