Add DRD suppression patterns for races triggered by std::ostream
[valgrind.git] / drd / tests / tsan_unittest.cpp
blob79fea6b84c7189ef3ed1c671922962457967ad2e
1 /*
2 This file is part of Valgrind, a dynamic binary instrumentation
3 framework.
5 Copyright (C) 2008-2008 Google Inc
6 opensource@google.com
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2 of the
11 License, or (at your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 02111-1307, USA.
23 The GNU General Public License is contained in the file COPYING.
26 // Author: Konstantin Serebryany <opensource@google.com>
28 // This file contains a set of unit tests for a data race detection tool.
32 // This test can be compiled with pthreads (default) or
33 // with any other library that supports threads, locks, cond vars, etc.
34 //
35 // To compile with pthreads:
36 // g++ racecheck_unittest.cc dynamic_annotations.cc
37 // -lpthread -g -DDYNAMIC_ANNOTATIONS=1
38 //
39 // To compile with different library:
40 // 1. cp thread_wrappers_pthread.h thread_wrappers_yourlib.h
41 // 2. edit thread_wrappers_yourlib.h
42 // 3. add '-DTHREAD_WRAPPERS="thread_wrappers_yourlib.h"' to your compilation.
46 // This test must not include any other file specific to threading library,
47 // everything should be inside THREAD_WRAPPERS.
48 #ifndef THREAD_WRAPPERS
49 # define THREAD_WRAPPERS "thread_wrappers_pthread.h"
50 #endif
51 #include THREAD_WRAPPERS
53 #ifndef NEEDS_SEPERATE_RW_LOCK
54 #define RWLock Mutex // Mutex does work as an rw-lock.
55 #define WriterLockScoped MutexLock
56 #define ReaderLockScoped ReaderMutexLock
57 #endif // !NEEDS_SEPERATE_RW_LOCK
60 // Helgrind memory usage testing stuff
61 // If not present in dynamic_annotations.h/.cc - ignore
62 #ifndef ANNOTATE_RESET_STATS
63 #define ANNOTATE_RESET_STATS() do { } while(0)
64 #endif
65 #ifndef ANNOTATE_PRINT_STATS
66 #define ANNOTATE_PRINT_STATS() do { } while(0)
67 #endif
68 #ifndef ANNOTATE_PRINT_MEMORY_USAGE
69 #define ANNOTATE_PRINT_MEMORY_USAGE(a) do { } while(0)
70 #endif
73 // A function that allows to suppress gcc's warnings about
74 // unused return values in a portable way.
75 template <typename T>
76 static inline void IGNORE_RETURN_VALUE(T v)
77 { }
79 #include <vector>
80 #include <string>
81 #include <map>
82 #include <queue>
83 #include <algorithm>
84 #include <cstring> // strlen(), index(), rindex()
85 #include <ctime>
86 #include <sys/time.h>
87 #include <sys/types.h>
88 #include <sys/stat.h>
89 #include <fcntl.h>
90 #include <sys/mman.h> // mmap
91 #include <errno.h>
92 #include <stdint.h> // uintptr_t
93 #include <stdlib.h>
94 #include <dirent.h>
96 #ifndef VGO_darwin
97 #include <malloc.h>
98 #endif
100 #ifdef VGO_solaris
101 #include <strings.h> // index(), rindex()
102 #endif
104 // The tests are
105 // - Stability tests (marked STAB)
106 // - Performance tests (marked PERF)
107 // - Feature tests
108 // - TN (true negative) : no race exists and the tool is silent.
109 // - TP (true positive) : a race exists and reported.
110 // - FN (false negative): a race exists but not reported.
111 // - FP (false positive): no race exists but the tool reports it.
113 // The feature tests are marked according to the behavior of helgrind 3.3.0.
115 // TP and FP tests are annotated with ANNOTATE_EXPECT_RACE,
116 // so, no error reports should be seen when running under helgrind.
118 // When some of the FP cases are fixed in helgrind we'll need
119 // to update this test.
121 // Each test resides in its own namespace.
122 // Namespaces are named test01, test02, ...
123 // Please, *DO NOT* change the logic of existing tests nor rename them.
124 // Create a new test instead.
126 // Some tests use sleep()/usleep().
127 // This is not a synchronization, but a simple way to trigger
128 // some specific behaviour of the race detector's scheduler.
130 // Globals and utilities used by several tests. {{{1
131 CondVar CV;
132 int COND = 0;
135 typedef void (*void_func_void_t)(void);
136 enum TEST_FLAG {
137 FEATURE = 1 << 0,
138 STABILITY = 1 << 1,
139 PERFORMANCE = 1 << 2,
140 EXCLUDE_FROM_ALL = 1 << 3,
141 NEEDS_ANNOTATIONS = 1 << 4,
142 RACE_DEMO = 1 << 5,
143 MEMORY_USAGE = 1 << 6,
144 PRINT_STATS = 1 << 7
147 // Put everything into stderr.
148 Mutex printf_mu;
149 #define printf(args...) \
150 do{ \
151 printf_mu.Lock();\
152 fprintf(stderr, args);\
153 printf_mu.Unlock(); \
154 }while(0)
156 long GetTimeInMs() {
157 struct timeval tv;
158 gettimeofday(&tv, NULL);
159 return (tv.tv_sec * 1000L) + (tv.tv_usec / 1000L);
162 struct Test{
163 void_func_void_t f_;
164 int flags_;
165 Test(void_func_void_t f, int flags)
166 : f_(f)
167 , flags_(flags)
169 Test() : f_(0), flags_(0) {}
170 void Run() {
171 ANNOTATE_RESET_STATS();
172 if (flags_ & PERFORMANCE) {
173 long start = GetTimeInMs();
174 f_();
175 long end = GetTimeInMs();
176 printf ("Time: %4ldms\n", end-start);
177 } else
178 f_();
179 if (flags_ & PRINT_STATS)
180 ANNOTATE_PRINT_STATS();
181 if (flags_ & MEMORY_USAGE)
182 ANNOTATE_PRINT_MEMORY_USAGE(0);
185 std::map<int, Test> TheMapOfTests;
187 #define NOINLINE __attribute__ ((noinline))
188 extern "C" void NOINLINE AnnotateSetVerbosity(const char *, int, int) {};
191 struct TestAdder {
192 TestAdder(void_func_void_t f, int id, int flags = FEATURE) {
193 // AnnotateSetVerbosity(__FILE__, __LINE__, 0);
194 CHECK(TheMapOfTests.count(id) == 0);
195 TheMapOfTests[id] = Test(f, flags);
199 #define REGISTER_TEST(f, id) TestAdder add_test_##id (f, id);
200 #define REGISTER_TEST2(f, id, flags) TestAdder add_test_##id (f, id, flags);
202 static bool ArgIsOne(int *arg) { return *arg == 1; };
203 static bool ArgIsZero(int *arg) { return *arg == 0; };
204 static bool ArgIsTrue(bool *arg) { return *arg == true; };
206 // Call ANNOTATE_EXPECT_RACE only if 'machine' env variable is defined.
207 // Useful to test against several different machines.
208 // Supported machines so far:
209 // MSM_HYBRID1 -- aka MSMProp1
210 // MSM_HYBRID1_INIT_STATE -- aka MSMProp1 with --initialization-state=yes
211 // MSM_THREAD_SANITIZER -- ThreadSanitizer's state machine
212 #define ANNOTATE_EXPECT_RACE_FOR_MACHINE(mem, descr, machine) \
213 while(getenv(machine)) {\
214 ANNOTATE_EXPECT_RACE(mem, descr); \
215 break;\
218 #define ANNOTATE_EXPECT_RACE_FOR_TSAN(mem, descr) \
219 ANNOTATE_EXPECT_RACE_FOR_MACHINE(mem, descr, "MSM_THREAD_SANITIZER")
221 inline bool Tsan_PureHappensBefore() {
222 return true;
225 inline bool Tsan_FastMode() {
226 return getenv("TSAN_FAST_MODE") != NULL;
229 // Initialize *(mem) to 0 if Tsan_FastMode.
230 #define FAST_MODE_INIT(mem) do { if (Tsan_FastMode()) { *(mem) = 0; } } while(0)
232 #ifndef MAIN_INIT_ACTION
233 #define MAIN_INIT_ACTION
234 #endif
238 int main(int argc, char** argv) { // {{{1
239 MAIN_INIT_ACTION;
240 printf("FLAGS [phb=%i, fm=%i]\n", Tsan_PureHappensBefore(), Tsan_FastMode());
241 if (argc == 2 && !strcmp(argv[1], "benchmark")) {
242 for (std::map<int,Test>::iterator it = TheMapOfTests.begin();
243 it != TheMapOfTests.end(); ++it) {
244 if(!(it->second.flags_ & PERFORMANCE)) continue;
245 it->second.Run();
247 } else if (argc == 2 && !strcmp(argv[1], "demo")) {
248 for (std::map<int,Test>::iterator it = TheMapOfTests.begin();
249 it != TheMapOfTests.end(); ++it) {
250 if(!(it->second.flags_ & RACE_DEMO)) continue;
251 it->second.Run();
253 } else if (argc > 1) {
254 // the tests are listed in command line flags
255 for (int i = 1; i < argc; i++) {
256 int f_num = atoi(argv[i]);
257 CHECK(TheMapOfTests.count(f_num));
258 TheMapOfTests[f_num].Run();
260 } else {
261 bool run_tests_with_annotations = false;
262 if (getenv("DRT_ALLOW_ANNOTATIONS")) {
263 run_tests_with_annotations = true;
265 for (std::map<int,Test>::iterator it = TheMapOfTests.begin();
266 it != TheMapOfTests.end();
267 ++it) {
268 if(it->second.flags_ & EXCLUDE_FROM_ALL) continue;
269 if(it->second.flags_ & RACE_DEMO) continue;
270 if((it->second.flags_ & NEEDS_ANNOTATIONS)
271 && run_tests_with_annotations == false) continue;
272 it->second.Run();
277 #ifdef THREAD_WRAPPERS_PTHREAD_H
278 #endif
281 // An array of threads. Create/start/join all elements at once. {{{1
282 class MyThreadArray {
283 public:
284 static const int kSize = 5;
285 typedef void (*F) (void);
286 MyThreadArray(F f1, F f2 = NULL, F f3 = NULL, F f4 = NULL, F f5 = NULL) {
287 ar_[0] = new MyThread(f1);
288 ar_[1] = f2 ? new MyThread(f2) : NULL;
289 ar_[2] = f3 ? new MyThread(f3) : NULL;
290 ar_[3] = f4 ? new MyThread(f4) : NULL;
291 ar_[4] = f5 ? new MyThread(f5) : NULL;
293 void Start() {
294 for(int i = 0; i < kSize; i++) {
295 if(ar_[i]) {
296 ar_[i]->Start();
297 usleep(10);
302 void Join() {
303 for(int i = 0; i < kSize; i++) {
304 if(ar_[i]) {
305 ar_[i]->Join();
310 ~MyThreadArray() {
311 for(int i = 0; i < kSize; i++) {
312 delete ar_[i];
315 private:
316 MyThread *ar_[kSize];
321 // test00: {{{1
322 namespace test00 {
323 int GLOB = 0;
324 void Run() {
325 printf("test00: negative\n");
326 printf("\tGLOB=%d\n", GLOB);
328 REGISTER_TEST(Run, 00)
329 } // namespace test00
332 // test01: TP. Simple race (write vs write). {{{1
333 namespace test01 {
334 int GLOB = 0;
335 void Worker() {
336 GLOB = 1;
339 void Parent() {
340 MyThread t(Worker);
341 t.Start();
342 const timespec delay = { 0, 100 * 1000 * 1000 };
343 nanosleep(&delay, 0);
344 GLOB = 2;
345 t.Join();
347 void Run() {
348 FAST_MODE_INIT(&GLOB);
349 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test01. TP.");
350 ANNOTATE_TRACE_MEMORY(&GLOB);
351 printf("test01: positive\n");
352 Parent();
353 const int tmp = GLOB;
354 printf("\tGLOB=%d\n", tmp);
356 REGISTER_TEST(Run, 1);
357 } // namespace test01
360 // test02: TN. Synchronization via CondVar. {{{1
361 namespace test02 {
362 int GLOB = 0;
363 // Two write accesses to GLOB are synchronized because
364 // the pair of CV.Signal() and CV.Wait() establish happens-before relation.
366 // Waiter: Waker:
367 // 1. COND = 0
368 // 2. Start(Waker)
369 // 3. MU.Lock() a. write(GLOB)
370 // b. MU.Lock()
371 // c. COND = 1
372 // /--- d. CV.Signal()
373 // 4. while(COND) / e. MU.Unlock()
374 // CV.Wait(MU) <---/
375 // 5. MU.Unlock()
376 // 6. write(GLOB)
377 Mutex MU;
379 void Waker() {
380 usleep(100000); // Make sure the waiter blocks.
381 GLOB = 1;
383 MU.Lock();
384 COND = 1;
385 CV.Signal();
386 MU.Unlock();
389 void Waiter() {
390 ThreadPool pool(1);
391 pool.StartWorkers();
392 COND = 0;
393 pool.Add(NewCallback(Waker));
394 MU.Lock();
395 while(COND != 1)
396 CV.Wait(&MU);
397 MU.Unlock();
398 GLOB = 2;
400 void Run() {
401 printf("test02: negative\n");
402 Waiter();
403 printf("\tGLOB=%d\n", GLOB);
405 REGISTER_TEST(Run, 2);
406 } // namespace test02
409 // test03: TN. Synchronization via LockWhen, signaller gets there first. {{{1
410 namespace test03 {
411 int GLOB = 0;
412 // Two write accesses to GLOB are synchronized via conditional critical section.
413 // Note that LockWhen() happens first (we use sleep(1) to make sure)!
415 // Waiter: Waker:
416 // 1. COND = 0
417 // 2. Start(Waker)
418 // a. write(GLOB)
419 // b. MU.Lock()
420 // c. COND = 1
421 // /--- d. MU.Unlock()
422 // 3. MU.LockWhen(COND==1) <---/
423 // 4. MU.Unlock()
424 // 5. write(GLOB)
425 Mutex MU;
427 void Waker() {
428 usleep(100000); // Make sure the waiter blocks.
429 GLOB = 1;
431 MU.Lock();
432 COND = 1; // We are done! Tell the Waiter.
433 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
435 void Waiter() {
436 ThreadPool pool(1);
437 pool.StartWorkers();
438 COND = 0;
439 pool.Add(NewCallback(Waker));
440 MU.LockWhen(Condition(&ArgIsOne, &COND)); // calls ANNOTATE_CONDVAR_WAIT
441 MU.Unlock(); // Waker is done!
443 GLOB = 2;
445 void Run() {
446 printf("test03: negative\n");
447 Waiter();
448 printf("\tGLOB=%d\n", GLOB);
450 REGISTER_TEST2(Run, 3, FEATURE|NEEDS_ANNOTATIONS);
451 } // namespace test03
453 // test04: TN. Synchronization via PCQ. {{{1
454 namespace test04 {
455 int GLOB = 0;
456 ProducerConsumerQueue Q(INT_MAX);
457 // Two write accesses to GLOB are separated by PCQ Put/Get.
459 // Putter: Getter:
460 // 1. write(GLOB)
461 // 2. Q.Put() ---------\ .
462 // \-------> a. Q.Get()
463 // b. write(GLOB)
466 void Putter() {
467 GLOB = 1;
468 Q.Put(NULL);
471 void Getter() {
472 Q.Get();
473 GLOB = 2;
476 void Run() {
477 printf("test04: negative\n");
478 MyThreadArray t(Putter, Getter);
479 t.Start();
480 t.Join();
481 printf("\tGLOB=%d\n", GLOB);
483 REGISTER_TEST(Run, 4);
484 } // namespace test04
487 // test05: FP. Synchronization via CondVar, but waiter does not block. {{{1
488 // Since CondVar::Wait() is not called, we get a false positive.
489 namespace test05 {
490 int GLOB = 0;
491 // Two write accesses to GLOB are synchronized via CondVar.
492 // But race detector can not see it.
493 // See this for details:
494 // http://www.valgrind.org/docs/manual/hg-manual.html#hg-manual.effective-use.
496 // Waiter: Waker:
497 // 1. COND = 0
498 // 2. Start(Waker)
499 // 3. MU.Lock() a. write(GLOB)
500 // b. MU.Lock()
501 // c. COND = 1
502 // d. CV.Signal()
503 // 4. while(COND) e. MU.Unlock()
504 // CV.Wait(MU) <<< not called
505 // 5. MU.Unlock()
506 // 6. write(GLOB)
507 Mutex MU;
509 void Waker() {
510 GLOB = 1;
511 MU.Lock();
512 COND = 1;
513 CV.Signal();
514 MU.Unlock();
517 void Waiter() {
518 ThreadPool pool(1);
519 pool.StartWorkers();
520 COND = 0;
521 pool.Add(NewCallback(Waker));
522 usleep(100000); // Make sure the signaller gets first.
523 MU.Lock();
524 while(COND != 1)
525 CV.Wait(&MU);
526 MU.Unlock();
527 GLOB = 2;
529 void Run() {
530 FAST_MODE_INIT(&GLOB);
531 if (!Tsan_PureHappensBefore())
532 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test05. FP. Unavoidable in hybrid scheme.");
533 printf("test05: unavoidable false positive\n");
534 Waiter();
535 printf("\tGLOB=%d\n", GLOB);
537 REGISTER_TEST(Run, 5);
538 } // namespace test05
541 // test06: TN. Synchronization via CondVar, but Waker gets there first. {{{1
542 namespace test06 {
543 int GLOB = 0;
544 // Same as test05 but we annotated the Wait() loop.
546 // Waiter: Waker:
547 // 1. COND = 0
548 // 2. Start(Waker)
549 // 3. MU.Lock() a. write(GLOB)
550 // b. MU.Lock()
551 // c. COND = 1
552 // /------- d. CV.Signal()
553 // 4. while(COND) / e. MU.Unlock()
554 // CV.Wait(MU) <<< not called /
555 // 6. ANNOTATE_CONDVAR_WAIT(CV, MU) <----/
556 // 5. MU.Unlock()
557 // 6. write(GLOB)
559 Mutex MU;
561 void Waker() {
562 GLOB = 1;
563 MU.Lock();
564 COND = 1;
565 CV.Signal();
566 MU.Unlock();
569 void Waiter() {
570 ThreadPool pool(1);
571 pool.StartWorkers();
572 COND = 0;
573 pool.Add(NewCallback(Waker));
574 usleep(100000); // Make sure the signaller gets first.
575 MU.Lock();
576 while(COND != 1)
577 CV.Wait(&MU);
578 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
580 MU.Unlock();
581 GLOB = 2;
583 void Run() {
584 printf("test06: negative\n");
585 Waiter();
586 printf("\tGLOB=%d\n", GLOB);
588 REGISTER_TEST2(Run, 6, FEATURE|NEEDS_ANNOTATIONS);
589 } // namespace test06
592 // test07: TN. Synchronization via LockWhen(), Signaller is observed first. {{{1
593 namespace test07 {
594 int GLOB = 0;
595 bool COND = 0;
596 // Two write accesses to GLOB are synchronized via conditional critical section.
597 // LockWhen() is observed after COND has been set (due to sleep).
598 // Unlock() calls ANNOTATE_CONDVAR_SIGNAL().
600 // Waiter: Signaller:
601 // 1. COND = 0
602 // 2. Start(Signaller)
603 // a. write(GLOB)
604 // b. MU.Lock()
605 // c. COND = 1
606 // /--- d. MU.Unlock calls ANNOTATE_CONDVAR_SIGNAL
607 // 3. MU.LockWhen(COND==1) <---/
608 // 4. MU.Unlock()
609 // 5. write(GLOB)
611 Mutex MU;
612 void Signaller() {
613 GLOB = 1;
614 MU.Lock();
615 COND = true; // We are done! Tell the Waiter.
616 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
618 void Waiter() {
619 COND = false;
620 MyThread t(Signaller);
621 t.Start();
622 usleep(100000); // Make sure the signaller gets there first.
624 MU.LockWhen(Condition(&ArgIsTrue, &COND)); // calls ANNOTATE_CONDVAR_WAIT
625 MU.Unlock(); // Signaller is done!
627 GLOB = 2; // If LockWhen didn't catch the signal, a race may be reported here.
628 t.Join();
630 void Run() {
631 printf("test07: negative\n");
632 Waiter();
633 printf("\tGLOB=%d\n", GLOB);
635 REGISTER_TEST2(Run, 7, FEATURE|NEEDS_ANNOTATIONS);
636 } // namespace test07
638 // test08: TN. Synchronization via thread start/join. {{{1
639 namespace test08 {
640 int GLOB = 0;
641 // Three accesses to GLOB are separated by thread start/join.
643 // Parent: Worker:
644 // 1. write(GLOB)
645 // 2. Start(Worker) ------------>
646 // a. write(GLOB)
647 // 3. Join(Worker) <------------
648 // 4. write(GLOB)
649 void Worker() {
650 GLOB = 2;
653 void Parent() {
654 MyThread t(Worker);
655 GLOB = 1;
656 t.Start();
657 t.Join();
658 GLOB = 3;
660 void Run() {
661 printf("test08: negative\n");
662 Parent();
663 printf("\tGLOB=%d\n", GLOB);
665 REGISTER_TEST(Run, 8);
666 } // namespace test08
669 // test09: TP. Simple race (read vs write). {{{1
670 namespace test09 {
671 int GLOB = 0;
672 // A simple data race between writer and reader.
673 // Write happens after read (enforced by sleep).
674 // Usually, easily detectable by a race detector.
675 void Writer() {
676 usleep(100000);
677 GLOB = 3;
679 void Reader() {
680 CHECK(GLOB != -777);
683 void Run() {
684 ANNOTATE_TRACE_MEMORY(&GLOB);
685 FAST_MODE_INIT(&GLOB);
686 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test09. TP.");
687 printf("test09: positive\n");
688 MyThreadArray t(Writer, Reader);
689 t.Start();
690 t.Join();
691 printf("\tGLOB=%d\n", GLOB);
693 REGISTER_TEST(Run, 9);
694 } // namespace test09
697 // test10: FN. Simple race (write vs read). {{{1
698 namespace test10 {
699 int GLOB = 0;
700 // A simple data race between writer and reader.
701 // Write happens before Read (enforced by sleep),
702 // otherwise this test is the same as test09.
704 // Writer: Reader:
705 // 1. write(GLOB) a. sleep(long enough so that GLOB
706 // is most likely initialized by Writer)
707 // b. read(GLOB)
710 // Eraser algorithm does not detect the race here,
711 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
713 void Writer() {
714 GLOB = 3;
716 void Reader() {
717 usleep(100000);
718 CHECK(GLOB != -777);
721 void Run() {
722 FAST_MODE_INIT(&GLOB);
723 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test10. TP. FN in MSMHelgrind.");
724 printf("test10: positive\n");
725 MyThreadArray t(Writer, Reader);
726 t.Start();
727 t.Join();
728 printf("\tGLOB=%d\n", GLOB);
730 REGISTER_TEST(Run, 10);
731 } // namespace test10
734 // test11: FP. Synchronization via CondVar, 2 workers. {{{1
735 // This test is properly synchronized, but currently (Dec 2007)
736 // helgrind reports a false positive.
738 // Parent: Worker1, Worker2:
739 // 1. Start(workers) a. read(GLOB)
740 // 2. MU.Lock() b. MU.Lock()
741 // 3. while(COND != 2) /-------- c. CV.Signal()
742 // CV.Wait(&MU) <-------/ d. MU.Unlock()
743 // 4. MU.Unlock()
744 // 5. write(GLOB)
746 namespace test11 {
747 int GLOB = 0;
748 Mutex MU;
749 void Worker() {
750 usleep(200000);
751 CHECK(GLOB != 777);
753 MU.Lock();
754 COND++;
755 CV.Signal();
756 MU.Unlock();
759 void Parent() {
760 COND = 0;
762 MyThreadArray t(Worker, Worker);
763 t.Start();
765 MU.Lock();
766 while(COND != 2) {
767 CV.Wait(&MU);
769 MU.Unlock();
771 GLOB = 2;
773 t.Join();
776 void Run() {
777 // ANNOTATE_EXPECT_RACE(&GLOB, "test11. FP. Fixed by MSMProp1.");
778 printf("test11: negative\n");
779 Parent();
780 printf("\tGLOB=%d\n", GLOB);
782 REGISTER_TEST(Run, 11);
783 } // namespace test11
786 // test12: FP. Synchronization via Mutex, then via PCQ. {{{1
787 namespace test12 {
788 int GLOB = 0;
789 // This test is properly synchronized, but currently (Dec 2007)
790 // helgrind reports a false positive.
792 // First, we write to GLOB under MU, then we synchronize via PCQ,
793 // which is essentially a semaphore.
795 // Putter: Getter:
796 // 1. MU.Lock() a. MU.Lock()
797 // 2. write(GLOB) <---- MU ----> b. write(GLOB)
798 // 3. MU.Unlock() c. MU.Unlock()
799 // 4. Q.Put() ---------------> d. Q.Get()
800 // e. write(GLOB)
802 ProducerConsumerQueue Q(INT_MAX);
803 Mutex MU;
805 void Putter() {
806 MU.Lock();
807 GLOB++;
808 MU.Unlock();
810 Q.Put(NULL);
813 void Getter() {
814 MU.Lock();
815 GLOB++;
816 MU.Unlock();
818 Q.Get();
819 GLOB++;
822 void Run() {
823 // ANNOTATE_EXPECT_RACE(&GLOB, "test12. FP. Fixed by MSMProp1.");
824 printf("test12: negative\n");
825 MyThreadArray t(Putter, Getter);
826 t.Start();
827 t.Join();
828 printf("\tGLOB=%d\n", GLOB);
830 REGISTER_TEST(Run, 12);
831 } // namespace test12
834 // test13: FP. Synchronization via Mutex, then via LockWhen. {{{1
835 namespace test13 {
836 int GLOB = 0;
837 // This test is essentially the same as test12, but uses LockWhen
838 // instead of PCQ.
840 // Waker: Waiter:
841 // 1. MU.Lock() a. MU.Lock()
842 // 2. write(GLOB) <---------- MU ----------> b. write(GLOB)
843 // 3. MU.Unlock() c. MU.Unlock()
844 // 4. MU.Lock() .
845 // 5. COND = 1 .
846 // 6. ANNOTATE_CONDVAR_SIGNAL -------\ .
847 // 7. MU.Unlock() \ .
848 // \----> d. MU.LockWhen(COND == 1)
849 // e. MU.Unlock()
850 // f. write(GLOB)
851 Mutex MU;
853 void Waker() {
854 MU.Lock();
855 GLOB++;
856 MU.Unlock();
858 MU.Lock();
859 COND = 1;
860 ANNOTATE_CONDVAR_SIGNAL(&MU);
861 MU.Unlock();
864 void Waiter() {
865 MU.Lock();
866 GLOB++;
867 MU.Unlock();
869 MU.LockWhen(Condition(&ArgIsOne, &COND));
870 MU.Unlock();
871 GLOB++;
874 void Run() {
875 // ANNOTATE_EXPECT_RACE(&GLOB, "test13. FP. Fixed by MSMProp1.");
876 printf("test13: negative\n");
877 COND = 0;
879 MyThreadArray t(Waker, Waiter);
880 t.Start();
881 t.Join();
883 printf("\tGLOB=%d\n", GLOB);
885 REGISTER_TEST2(Run, 13, FEATURE|NEEDS_ANNOTATIONS);
886 } // namespace test13
889 // test14: FP. Synchronization via PCQ, reads, 2 workers. {{{1
890 namespace test14 {
891 int GLOB = 0;
892 // This test is properly synchronized, but currently (Dec 2007)
893 // helgrind reports a false positive.
895 // This test is similar to test11, but uses PCQ (semaphore).
897 // Putter2: Putter1: Getter:
898 // 1. read(GLOB) a. read(GLOB)
899 // 2. Q2.Put() ----\ b. Q1.Put() -----\ .
900 // \ \--------> A. Q1.Get()
901 // \----------------------------------> B. Q2.Get()
902 // C. write(GLOB)
903 ProducerConsumerQueue Q1(INT_MAX), Q2(INT_MAX);
905 void Putter1() {
906 CHECK(GLOB != 777);
907 Q1.Put(NULL);
909 void Putter2() {
910 CHECK(GLOB != 777);
911 Q2.Put(NULL);
913 void Getter() {
914 Q1.Get();
915 Q2.Get();
916 GLOB++;
918 void Run() {
919 // ANNOTATE_EXPECT_RACE(&GLOB, "test14. FP. Fixed by MSMProp1.");
920 printf("test14: negative\n");
921 MyThreadArray t(Getter, Putter1, Putter2);
922 t.Start();
923 t.Join();
924 printf("\tGLOB=%d\n", GLOB);
926 REGISTER_TEST(Run, 14);
927 } // namespace test14
930 // test15: TN. Synchronization via LockWhen. One waker and 2 waiters. {{{1
931 namespace test15 {
932 // Waker: Waiter1, Waiter2:
933 // 1. write(GLOB)
934 // 2. MU.Lock()
935 // 3. COND = 1
936 // 4. ANNOTATE_CONDVAR_SIGNAL ------------> a. MU.LockWhen(COND == 1)
937 // 5. MU.Unlock() b. MU.Unlock()
938 // c. read(GLOB)
940 int GLOB = 0;
941 Mutex MU;
943 void Waker() {
944 GLOB = 2;
946 MU.Lock();
947 COND = 1;
948 ANNOTATE_CONDVAR_SIGNAL(&MU);
949 MU.Unlock();
952 void Waiter() {
953 MU.LockWhen(Condition(&ArgIsOne, &COND));
954 MU.Unlock();
955 CHECK(GLOB != 777);
959 void Run() {
960 COND = 0;
961 printf("test15: negative\n");
962 MyThreadArray t(Waker, Waiter, Waiter);
963 t.Start();
964 t.Join();
965 printf("\tGLOB=%d\n", GLOB);
967 REGISTER_TEST(Run, 15);
968 } // namespace test15
971 // test16: FP. Barrier (emulated by CV), 2 threads. {{{1
972 namespace test16 {
973 // Worker1: Worker2:
974 // 1. MU.Lock() a. MU.Lock()
975 // 2. write(GLOB) <------------ MU ----------> b. write(GLOB)
976 // 3. MU.Unlock() c. MU.Unlock()
977 // 4. MU2.Lock() d. MU2.Lock()
978 // 5. COND-- e. COND--
979 // 6. ANNOTATE_CONDVAR_SIGNAL(MU2) ---->V .
980 // 7. MU2.Await(COND == 0) <------------+------ f. ANNOTATE_CONDVAR_SIGNAL(MU2)
981 // 8. MU2.Unlock() V-----> g. MU2.Await(COND == 0)
982 // 9. read(GLOB) h. MU2.Unlock()
983 // i. read(GLOB)
986 // TODO: This way we may create too many edges in happens-before graph.
987 // Arndt Mühlenfeld in his PhD (TODO: link) suggests creating special nodes in
988 // happens-before graph to reduce the total number of edges.
989 // See figure 3.14.
992 int GLOB = 0;
993 Mutex MU;
994 Mutex MU2;
996 void Worker() {
997 MU.Lock();
998 GLOB++;
999 MU.Unlock();
1001 MU2.Lock();
1002 COND--;
1003 ANNOTATE_CONDVAR_SIGNAL(&MU2);
1004 MU2.Await(Condition(&ArgIsZero, &COND));
1005 MU2.Unlock();
1007 CHECK(GLOB == 2);
1010 void Run() {
1011 // ANNOTATE_EXPECT_RACE(&GLOB, "test16. FP. Fixed by MSMProp1 + Barrier support.");
1012 COND = 2;
1013 printf("test16: negative\n");
1014 MyThreadArray t(Worker, Worker);
1015 t.Start();
1016 t.Join();
1017 printf("\tGLOB=%d\n", GLOB);
1019 REGISTER_TEST2(Run, 16, FEATURE|NEEDS_ANNOTATIONS);
1020 } // namespace test16
1023 // test17: FP. Barrier (emulated by CV), 3 threads. {{{1
1024 namespace test17 {
1025 // Same as test16, but with 3 threads.
1026 int GLOB = 0;
1027 Mutex MU;
1028 Mutex MU2;
1030 void Worker() {
1031 MU.Lock();
1032 GLOB++;
1033 MU.Unlock();
1035 MU2.Lock();
1036 COND--;
1037 ANNOTATE_CONDVAR_SIGNAL(&MU2);
1038 MU2.Await(Condition(&ArgIsZero, &COND));
1039 MU2.Unlock();
1041 CHECK(GLOB == 3);
1044 void Run() {
1045 // ANNOTATE_EXPECT_RACE(&GLOB, "test17. FP. Fixed by MSMProp1 + Barrier support.");
1046 COND = 3;
1047 printf("test17: negative\n");
1048 MyThreadArray t(Worker, Worker, Worker);
1049 t.Start();
1050 t.Join();
1051 printf("\tGLOB=%d\n", GLOB);
1053 REGISTER_TEST2(Run, 17, FEATURE|NEEDS_ANNOTATIONS);
1054 } // namespace test17
1057 // test18: TN. Synchronization via Await(), signaller gets there first. {{{1
1058 namespace test18 {
1059 int GLOB = 0;
1060 Mutex MU;
1061 // Same as test03, but uses Mutex::Await() instead of Mutex::LockWhen().
1063 void Waker() {
1064 usleep(100000); // Make sure the waiter blocks.
1065 GLOB = 1;
1067 MU.Lock();
1068 COND = 1; // We are done! Tell the Waiter.
1069 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1071 void Waiter() {
1072 ThreadPool pool(1);
1073 pool.StartWorkers();
1074 COND = 0;
1075 pool.Add(NewCallback(Waker));
1077 MU.Lock();
1078 MU.Await(Condition(&ArgIsOne, &COND)); // calls ANNOTATE_CONDVAR_WAIT
1079 MU.Unlock(); // Waker is done!
1081 GLOB = 2;
1083 void Run() {
1084 printf("test18: negative\n");
1085 Waiter();
1086 printf("\tGLOB=%d\n", GLOB);
1088 REGISTER_TEST2(Run, 18, FEATURE|NEEDS_ANNOTATIONS);
1089 } // namespace test18
1091 // test19: TN. Synchronization via AwaitWithTimeout(). {{{1
1092 namespace test19 {
1093 int GLOB = 0;
1094 // Same as test18, but with AwaitWithTimeout. Do not timeout.
1095 Mutex MU;
1096 void Waker() {
1097 usleep(100000); // Make sure the waiter blocks.
1098 GLOB = 1;
1100 MU.Lock();
1101 COND = 1; // We are done! Tell the Waiter.
1102 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1104 void Waiter() {
1105 ThreadPool pool(1);
1106 pool.StartWorkers();
1107 COND = 0;
1108 pool.Add(NewCallback(Waker));
1110 MU.Lock();
1111 CHECK(MU.AwaitWithTimeout(Condition(&ArgIsOne, &COND), INT_MAX));
1112 MU.Unlock();
1114 GLOB = 2;
1116 void Run() {
1117 printf("test19: negative\n");
1118 Waiter();
1119 printf("\tGLOB=%d\n", GLOB);
1121 REGISTER_TEST2(Run, 19, FEATURE|NEEDS_ANNOTATIONS);
1122 } // namespace test19
1124 // test20: TP. Incorrect synchronization via AwaitWhen(), timeout. {{{1
1125 namespace test20 {
1126 int GLOB = 0;
1127 Mutex MU;
1128 // True race. We timeout in AwaitWhen.
1129 void Waker() {
1130 GLOB = 1;
1131 usleep(100 * 1000);
1133 void Waiter() {
1134 ThreadPool pool(1);
1135 pool.StartWorkers();
1136 COND = 0;
1137 pool.Add(NewCallback(Waker));
1139 MU.Lock();
1140 CHECK(!MU.AwaitWithTimeout(Condition(&ArgIsOne, &COND), 100));
1141 MU.Unlock();
1143 GLOB = 2;
1145 void Run() {
1146 FAST_MODE_INIT(&GLOB);
1147 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test20. TP.");
1148 printf("test20: positive\n");
1149 Waiter();
1150 printf("\tGLOB=%d\n", GLOB);
1152 REGISTER_TEST2(Run, 20, FEATURE|NEEDS_ANNOTATIONS);
1153 } // namespace test20
1155 // test21: TP. Incorrect synchronization via LockWhenWithTimeout(). {{{1
1156 namespace test21 {
1157 int GLOB = 0;
1158 // True race. We timeout in LockWhenWithTimeout().
1159 Mutex MU;
1160 void Waker() {
1161 GLOB = 1;
1162 usleep(100 * 1000);
1164 void Waiter() {
1165 ThreadPool pool(1);
1166 pool.StartWorkers();
1167 COND = 0;
1168 pool.Add(NewCallback(Waker));
1170 CHECK(!MU.LockWhenWithTimeout(Condition(&ArgIsOne, &COND), 100));
1171 MU.Unlock();
1173 GLOB = 2;
1175 void Run() {
1176 FAST_MODE_INIT(&GLOB);
1177 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test21. TP.");
1178 printf("test21: positive\n");
1179 Waiter();
1180 printf("\tGLOB=%d\n", GLOB);
1182 REGISTER_TEST2(Run, 21, FEATURE|NEEDS_ANNOTATIONS);
1183 } // namespace test21
1185 // test22: TP. Incorrect synchronization via CondVar::WaitWithTimeout(). {{{1
1186 namespace test22 {
1187 int GLOB = 0;
1188 Mutex MU;
1189 // True race. We timeout in CondVar::WaitWithTimeout().
1190 void Waker() {
1191 GLOB = 1;
1192 usleep(100 * 1000);
1194 void Waiter() {
1195 ThreadPool pool(1);
1196 pool.StartWorkers();
1197 COND = 0;
1198 pool.Add(NewCallback(Waker));
1200 int64_t ms_left_to_wait = 100;
1201 int64_t deadline_ms = GetCurrentTimeMillis() + ms_left_to_wait;
1202 MU.Lock();
1203 while(COND != 1 && ms_left_to_wait > 0) {
1204 CV.WaitWithTimeout(&MU, ms_left_to_wait);
1205 ms_left_to_wait = deadline_ms - GetCurrentTimeMillis();
1207 MU.Unlock();
1209 GLOB = 2;
1211 void Run() {
1212 FAST_MODE_INIT(&GLOB);
1213 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test22. TP.");
1214 printf("test22: positive\n");
1215 Waiter();
1216 printf("\tGLOB=%d\n", GLOB);
1218 REGISTER_TEST(Run, 22);
1219 } // namespace test22
1221 // test23: TN. TryLock, ReaderLock, ReaderTryLock. {{{1
1222 namespace test23 {
1223 // Correct synchronization with TryLock, Lock, ReaderTryLock, ReaderLock.
1224 int GLOB = 0;
1225 Mutex MU;
1226 void Worker_TryLock() {
1227 for (int i = 0; i < 20; i++) {
1228 while (true) {
1229 if (MU.TryLock()) {
1230 GLOB++;
1231 MU.Unlock();
1232 break;
1234 usleep(1000);
1239 void Worker_ReaderTryLock() {
1240 for (int i = 0; i < 20; i++) {
1241 while (true) {
1242 if (MU.ReaderTryLock()) {
1243 CHECK(GLOB != 777);
1244 MU.ReaderUnlock();
1245 break;
1247 usleep(1000);
1252 void Worker_ReaderLock() {
1253 for (int i = 0; i < 20; i++) {
1254 MU.ReaderLock();
1255 CHECK(GLOB != 777);
1256 MU.ReaderUnlock();
1257 usleep(1000);
1261 void Worker_Lock() {
1262 for (int i = 0; i < 20; i++) {
1263 MU.Lock();
1264 GLOB++;
1265 MU.Unlock();
1266 usleep(1000);
1270 void Run() {
1271 printf("test23: negative\n");
1272 MyThreadArray t(Worker_TryLock,
1273 Worker_ReaderTryLock,
1274 Worker_ReaderLock,
1275 Worker_Lock
1277 t.Start();
1278 t.Join();
1279 printf("\tGLOB=%d\n", GLOB);
1281 REGISTER_TEST(Run, 23);
1282 } // namespace test23
1284 // test24: TN. Synchronization via ReaderLockWhen(). {{{1
1285 namespace test24 {
1286 int GLOB = 0;
1287 Mutex MU;
1288 // Same as test03, but uses ReaderLockWhen().
1290 void Waker() {
1291 usleep(100000); // Make sure the waiter blocks.
1292 GLOB = 1;
1294 MU.Lock();
1295 COND = 1; // We are done! Tell the Waiter.
1296 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1298 void Waiter() {
1299 ThreadPool pool(1);
1300 pool.StartWorkers();
1301 COND = 0;
1302 pool.Add(NewCallback(Waker));
1303 MU.ReaderLockWhen(Condition(&ArgIsOne, &COND));
1304 MU.ReaderUnlock();
1306 GLOB = 2;
1308 void Run() {
1309 printf("test24: negative\n");
1310 Waiter();
1311 printf("\tGLOB=%d\n", GLOB);
1313 REGISTER_TEST2(Run, 24, FEATURE|NEEDS_ANNOTATIONS);
1314 } // namespace test24
1316 // test25: TN. Synchronization via ReaderLockWhenWithTimeout(). {{{1
1317 namespace test25 {
1318 int GLOB = 0;
1319 Mutex MU;
1320 // Same as test24, but uses ReaderLockWhenWithTimeout().
1321 // We do not timeout.
1323 void Waker() {
1324 usleep(100000); // Make sure the waiter blocks.
1325 GLOB = 1;
1327 MU.Lock();
1328 COND = 1; // We are done! Tell the Waiter.
1329 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1331 void Waiter() {
1332 ThreadPool pool(1);
1333 pool.StartWorkers();
1334 COND = 0;
1335 pool.Add(NewCallback(Waker));
1336 CHECK(MU.ReaderLockWhenWithTimeout(Condition(&ArgIsOne, &COND), INT_MAX));
1337 MU.ReaderUnlock();
1339 GLOB = 2;
1341 void Run() {
1342 printf("test25: negative\n");
1343 Waiter();
1344 printf("\tGLOB=%d\n", GLOB);
1346 REGISTER_TEST2(Run, 25, FEATURE|NEEDS_ANNOTATIONS);
1347 } // namespace test25
1349 // test26: TP. Incorrect synchronization via ReaderLockWhenWithTimeout(). {{{1
1350 namespace test26 {
1351 int GLOB = 0;
1352 Mutex MU;
1353 // Same as test25, but we timeout and incorrectly assume happens-before.
1355 void Waker() {
1356 GLOB = 1;
1357 usleep(10000);
1359 void Waiter() {
1360 ThreadPool pool(1);
1361 pool.StartWorkers();
1362 COND = 0;
1363 pool.Add(NewCallback(Waker));
1364 CHECK(!MU.ReaderLockWhenWithTimeout(Condition(&ArgIsOne, &COND), 100));
1365 MU.ReaderUnlock();
1367 GLOB = 2;
1369 void Run() {
1370 FAST_MODE_INIT(&GLOB);
1371 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test26. TP");
1372 printf("test26: positive\n");
1373 Waiter();
1374 printf("\tGLOB=%d\n", GLOB);
1376 REGISTER_TEST2(Run, 26, FEATURE|NEEDS_ANNOTATIONS);
1377 } // namespace test26
1380 // test27: TN. Simple synchronization via SpinLock. {{{1
1381 namespace test27 {
1382 #ifndef NO_SPINLOCK
1383 int GLOB = 0;
1384 SpinLock MU;
1385 void Worker() {
1386 MU.Lock();
1387 GLOB++;
1388 MU.Unlock();
1389 usleep(10000);
1392 void Run() {
1393 printf("test27: negative\n");
1394 MyThreadArray t(Worker, Worker, Worker, Worker);
1395 t.Start();
1396 t.Join();
1397 printf("\tGLOB=%d\n", GLOB);
1399 REGISTER_TEST2(Run, 27, FEATURE|NEEDS_ANNOTATIONS);
1400 #endif // NO_SPINLOCK
1401 } // namespace test27
1404 // test28: TN. Synchronization via Mutex, then PCQ. 3 threads {{{1
1405 namespace test28 {
1406 // Putter1: Getter: Putter2:
1407 // 1. MU.Lock() A. MU.Lock()
1408 // 2. write(GLOB) B. write(GLOB)
1409 // 3. MU.Unlock() C. MU.Unlock()
1410 // 4. Q.Put() ---------\ /------- D. Q.Put()
1411 // 5. MU.Lock() \-------> a. Q.Get() / E. MU.Lock()
1412 // 6. read(GLOB) b. Q.Get() <---------/ F. read(GLOB)
1413 // 7. MU.Unlock() (sleep) G. MU.Unlock()
1414 // c. read(GLOB)
1415 ProducerConsumerQueue Q(INT_MAX);
1416 int GLOB = 0;
1417 Mutex MU;
1419 void Putter() {
1420 MU.Lock();
1421 GLOB++;
1422 MU.Unlock();
1424 Q.Put(NULL);
1426 MU.Lock();
1427 CHECK(GLOB != 777);
1428 MU.Unlock();
1431 void Getter() {
1432 Q.Get();
1433 Q.Get();
1434 usleep(100000);
1435 CHECK(GLOB == 2);
1438 void Run() {
1439 printf("test28: negative\n");
1440 MyThreadArray t(Getter, Putter, Putter);
1441 t.Start();
1442 t.Join();
1443 printf("\tGLOB=%d\n", GLOB);
1445 REGISTER_TEST(Run, 28);
1446 } // namespace test28
1449 // test29: TN. Synchronization via Mutex, then PCQ. 4 threads. {{{1
1450 namespace test29 {
1451 // Similar to test28, but has two Getters and two PCQs.
1452 ProducerConsumerQueue *Q1, *Q2;
1453 Mutex MU;
1454 int GLOB = 0;
1456 void Putter(ProducerConsumerQueue *q) {
1457 MU.Lock();
1458 GLOB++;
1459 MU.Unlock();
1461 q->Put(NULL);
1462 q->Put(NULL);
1464 MU.Lock();
1465 CHECK(GLOB != 777);
1466 MU.Unlock();
1470 void Putter1() { Putter(Q1); }
1471 void Putter2() { Putter(Q2); }
1473 void Getter() {
1474 Q1->Get();
1475 Q2->Get();
1476 usleep(100000);
1477 CHECK(GLOB == 2);
1478 usleep(48000); // TODO: remove this when FP in test32 is fixed.
1481 void Run() {
1482 printf("test29: negative\n");
1483 Q1 = new ProducerConsumerQueue(INT_MAX);
1484 Q2 = new ProducerConsumerQueue(INT_MAX);
1485 MyThreadArray t(Getter, Getter, Putter1, Putter2);
1486 t.Start();
1487 t.Join();
1488 printf("\tGLOB=%d\n", GLOB);
1489 delete Q1;
1490 delete Q2;
1492 REGISTER_TEST(Run, 29);
1493 } // namespace test29
1496 // test30: TN. Synchronization via 'safe' race. Writer vs multiple Readers. {{{1
1497 namespace test30 {
1498 // This test shows a very risky kind of synchronization which is very easy
1499 // to get wrong. Actually, I am not sure I've got it right.
1501 // Writer: Reader1, Reader2, ..., ReaderN:
1502 // 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY
1503 // 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n)
1504 // 3. BOUNDARY++; c. read(GLOB[i]: i < n)
1506 // Here we have a 'safe' race on accesses to BOUNDARY and
1507 // no actual races on accesses to GLOB[]:
1508 // Writer writes to GLOB[i] where i>=BOUNDARY and then increments BOUNDARY.
1509 // Readers read BOUNDARY and read GLOB[i] where i<BOUNDARY.
1511 // I am not completely sure that this scheme guaranties no race between
1512 // accesses to GLOB since compilers and CPUs
1513 // are free to rearrange memory operations.
1514 // I am actually sure that this scheme is wrong unless we use
1515 // some smart memory fencing...
1518 const int N = 48;
1519 static int GLOB[N];
1520 volatile int BOUNDARY = 0;
1522 void Writer() {
1523 for (int i = 0; i < N; i++) {
1524 CHECK(BOUNDARY == i);
1525 for (int j = i; j < N; j++) {
1526 GLOB[j] = j;
1528 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY+1));
1529 BOUNDARY++;
1530 usleep(1000);
1534 void Reader() {
1535 int n;
1536 do {
1537 n = BOUNDARY;
1538 if (n == 0) continue;
1539 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n));
1540 for (int i = 0; i < n; i++) {
1541 CHECK(GLOB[i] == i);
1543 usleep(100);
1544 } while(n < N);
1547 void Run() {
1548 FAST_MODE_INIT(&BOUNDARY);
1549 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY), "test30. Sync via 'safe' race.");
1550 printf("test30: negative\n");
1551 MyThreadArray t(Writer, Reader, Reader, Reader);
1552 t.Start();
1553 t.Join();
1554 printf("\tGLOB=%d\n", GLOB[N-1]);
1556 REGISTER_TEST2(Run, 30, FEATURE|NEEDS_ANNOTATIONS);
1557 } // namespace test30
1560 // test31: TN. Synchronization via 'safe' race. Writer vs Writer. {{{1
1561 namespace test31 {
1562 // This test is similar to test30, but
1563 // it has one Writer instead of mulitple Readers.
1565 // Writer1: Writer2
1566 // 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY
1567 // 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n)
1568 // 3. BOUNDARY++; c. write(GLOB[i]: i < n)
1571 const int N = 48;
1572 static int GLOB[N];
1573 volatile int BOUNDARY = 0;
1575 void Writer1() {
1576 for (int i = 0; i < N; i++) {
1577 CHECK(BOUNDARY == i);
1578 for (int j = i; j < N; j++) {
1579 GLOB[j] = j;
1581 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY+1));
1582 BOUNDARY++;
1583 usleep(1000);
1587 void Writer2() {
1588 int n;
1589 do {
1590 n = BOUNDARY;
1591 if (n == 0) continue;
1592 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n));
1593 for (int i = 0; i < n; i++) {
1594 if(GLOB[i] == i) {
1595 GLOB[i]++;
1598 usleep(100);
1599 } while(n < N);
1602 void Run() {
1603 FAST_MODE_INIT(&BOUNDARY);
1604 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY), "test31. Sync via 'safe' race.");
1605 printf("test31: negative\n");
1606 MyThreadArray t(Writer1, Writer2);
1607 t.Start();
1608 t.Join();
1609 printf("\tGLOB=%d\n", GLOB[N-1]);
1611 REGISTER_TEST2(Run, 31, FEATURE|NEEDS_ANNOTATIONS);
1612 } // namespace test31
1615 // test32: FP. Synchronization via thread create/join. W/R. {{{1
1616 namespace test32 {
1617 // This test is well synchronized but helgrind 3.3.0 reports a race.
1619 // Parent: Writer: Reader:
1620 // 1. Start(Reader) -----------------------\ .
1621 // \ .
1622 // 2. Start(Writer) ---\ \ .
1623 // \---> a. MU.Lock() \--> A. sleep(long enough)
1624 // b. write(GLOB)
1625 // /---- c. MU.Unlock()
1626 // 3. Join(Writer) <---/
1627 // B. MU.Lock()
1628 // C. read(GLOB)
1629 // /------------ D. MU.Unlock()
1630 // 4. Join(Reader) <----------------/
1631 // 5. write(GLOB)
1634 // The call to sleep() in Reader is not part of synchronization,
1635 // it is required to trigger the false positive in helgrind 3.3.0.
1637 int GLOB = 0;
1638 Mutex MU;
1640 void Writer() {
1641 MU.Lock();
1642 GLOB = 1;
1643 MU.Unlock();
1646 void Reader() {
1647 usleep(480000);
1648 MU.Lock();
1649 CHECK(GLOB != 777);
1650 MU.Unlock();
1653 void Parent() {
1654 MyThread r(Reader);
1655 MyThread w(Writer);
1656 r.Start();
1657 w.Start();
1659 w.Join(); // 'w' joins first.
1660 r.Join();
1662 GLOB = 2;
1665 void Run() {
1666 // ANNOTATE_EXPECT_RACE(&GLOB, "test32. FP. Fixed by MSMProp1.");
1667 printf("test32: negative\n");
1668 Parent();
1669 printf("\tGLOB=%d\n", GLOB);
1672 REGISTER_TEST(Run, 32);
1673 } // namespace test32
1676 // test33: STAB. Stress test for the number of thread sets (TSETs). {{{1
1677 namespace test33 {
1678 int GLOB = 0;
1679 // Here we access N memory locations from within log(N) threads.
1680 // We do it in such a way that helgrind creates nearly all possible TSETs.
1681 // Then we join all threads and start again (N_iter times).
1682 const int N_iter = 48;
1683 const int Nlog = 15;
1684 const int N = 1 << Nlog;
1685 static int ARR[N];
1686 Mutex MU;
1688 void Worker() {
1689 MU.Lock();
1690 int n = ++GLOB;
1691 MU.Unlock();
1693 n %= Nlog;
1694 for (int i = 0; i < N; i++) {
1695 // ARR[i] is accessed by threads from i-th subset
1696 if (i & (1 << n)) {
1697 CHECK(ARR[i] == 0);
1702 void Run() {
1703 printf("test33:\n");
1705 std::vector<MyThread*> vec(Nlog);
1707 for (int j = 0; j < N_iter; j++) {
1708 // Create and start Nlog threads
1709 for (int i = 0; i < Nlog; i++) {
1710 vec[i] = new MyThread(Worker);
1712 for (int i = 0; i < Nlog; i++) {
1713 vec[i]->Start();
1715 // Join all threads.
1716 for (int i = 0; i < Nlog; i++) {
1717 vec[i]->Join();
1718 delete vec[i];
1720 printf("------------------\n");
1723 printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
1724 GLOB, ARR[1], ARR[7], ARR[N-1]);
1726 REGISTER_TEST2(Run, 33, STABILITY|EXCLUDE_FROM_ALL);
1727 } // namespace test33
1730 // test34: STAB. Stress test for the number of locks sets (LSETs). {{{1
1731 namespace test34 {
1732 // Similar to test33, but for lock sets.
1733 int GLOB = 0;
1734 const int N_iter = 48;
1735 const int Nlog = 10;
1736 const int N = 1 << Nlog;
1737 static int ARR[N];
1738 static Mutex *MUs[Nlog];
1740 void Worker() {
1741 for (int i = 0; i < N; i++) {
1742 // ARR[i] is protected by MUs from i-th subset of all MUs
1743 for (int j = 0; j < Nlog; j++) if (i & (1 << j)) MUs[j]->Lock();
1744 CHECK(ARR[i] == 0);
1745 for (int j = 0; j < Nlog; j++) if (i & (1 << j)) MUs[j]->Unlock();
1749 void Run() {
1750 printf("test34:\n");
1751 for (int iter = 0; iter < N_iter; iter++) {
1752 for (int i = 0; i < Nlog; i++) {
1753 MUs[i] = new Mutex;
1755 MyThreadArray t(Worker, Worker);
1756 t.Start();
1757 t.Join();
1758 for (int i = 0; i < Nlog; i++) {
1759 delete MUs[i];
1761 printf("------------------\n");
1763 printf("\tGLOB=%d\n", GLOB);
1765 REGISTER_TEST2(Run, 34, STABILITY|EXCLUDE_FROM_ALL);
1766 } // namespace test34
1769 // test35: PERF. Lots of mutexes and lots of call to free(). {{{1
1770 namespace test35 {
1771 // Helgrind 3.3.0 has very slow in shadow_mem_make_NoAccess(). Fixed locally.
1772 // With the fix helgrind runs this test about a minute.
1773 // Without the fix -- about 5 minutes. (on c2d 2.4GHz).
1775 // TODO: need to figure out the best way for performance testing.
1776 int **ARR;
1777 const int N_mu = 25000;
1778 const int N_free = 48000;
1780 void Worker() {
1781 for (int i = 0; i < N_free; i++)
1782 CHECK(777 == *ARR[i]);
1785 void Run() {
1786 printf("test35:\n");
1787 std::vector<Mutex*> mus;
1789 ARR = new int *[N_free];
1790 for (int i = 0; i < N_free; i++) {
1791 const int c = N_free / N_mu;
1792 if ((i % c) == 0) {
1793 mus.push_back(new Mutex);
1794 mus.back()->Lock();
1795 mus.back()->Unlock();
1797 ARR[i] = new int(777);
1800 // Need to put all ARR[i] into shared state in order
1801 // to trigger the performance bug.
1802 MyThreadArray t(Worker, Worker);
1803 t.Start();
1804 t.Join();
1806 for (int i = 0; i < N_free; i++) delete ARR[i];
1807 delete [] ARR;
1809 for (size_t i = 0; i < mus.size(); i++) {
1810 delete mus[i];
1813 REGISTER_TEST2(Run, 35, PERFORMANCE|EXCLUDE_FROM_ALL);
1814 } // namespace test35
1817 // test36: TN. Synchronization via Mutex, then PCQ. 3 threads. W/W {{{1
1818 namespace test36 {
1819 // variation of test28 (W/W instead of W/R)
1821 // Putter1: Getter: Putter2:
1822 // 1. MU.Lock(); A. MU.Lock()
1823 // 2. write(GLOB) B. write(GLOB)
1824 // 3. MU.Unlock() C. MU.Unlock()
1825 // 4. Q.Put() ---------\ /------- D. Q.Put()
1826 // 5. MU1.Lock() \-------> a. Q.Get() / E. MU1.Lock()
1827 // 6. MU.Lock() b. Q.Get() <---------/ F. MU.Lock()
1828 // 7. write(GLOB) G. write(GLOB)
1829 // 8. MU.Unlock() H. MU.Unlock()
1830 // 9. MU1.Unlock() (sleep) I. MU1.Unlock()
1831 // c. MU1.Lock()
1832 // d. write(GLOB)
1833 // e. MU1.Unlock()
1834 ProducerConsumerQueue Q(INT_MAX);
1835 int GLOB = 0;
1836 Mutex MU, MU1;
1838 void Putter() {
1839 MU.Lock();
1840 GLOB++;
1841 MU.Unlock();
1843 Q.Put(NULL);
1845 MU1.Lock();
1846 MU.Lock();
1847 GLOB++;
1848 MU.Unlock();
1849 MU1.Unlock();
1852 void Getter() {
1853 Q.Get();
1854 Q.Get();
1855 usleep(100000);
1856 MU1.Lock();
1857 GLOB++;
1858 MU1.Unlock();
1861 void Run() {
1862 printf("test36: negative \n");
1863 MyThreadArray t(Getter, Putter, Putter);
1864 t.Start();
1865 t.Join();
1866 printf("\tGLOB=%d\n", GLOB);
1868 REGISTER_TEST(Run, 36);
1869 } // namespace test36
1872 // test37: TN. Simple synchronization (write vs read). {{{1
1873 namespace test37 {
1874 int GLOB = 0;
1875 Mutex MU;
1876 // Similar to test10, but properly locked.
1877 // Writer: Reader:
1878 // 1. MU.Lock()
1879 // 2. write
1880 // 3. MU.Unlock()
1881 // a. MU.Lock()
1882 // b. read
1883 // c. MU.Unlock();
1885 void Writer() {
1886 MU.Lock();
1887 GLOB = 3;
1888 MU.Unlock();
1890 void Reader() {
1891 usleep(100000);
1892 MU.Lock();
1893 CHECK(GLOB != -777);
1894 MU.Unlock();
1897 void Run() {
1898 printf("test37: negative\n");
1899 MyThreadArray t(Writer, Reader);
1900 t.Start();
1901 t.Join();
1902 printf("\tGLOB=%d\n", GLOB);
1904 REGISTER_TEST(Run, 37);
1905 } // namespace test37
1908 // test38: TN. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1
1909 namespace test38 {
1910 // Fusion of test29 and test36.
1912 // Putter1: Putter2: Getter1: Getter2:
1913 // MU1.Lock() MU1.Lock()
1914 // write(GLOB) write(GLOB)
1915 // MU1.Unlock() MU1.Unlock()
1916 // Q1.Put() Q2.Put()
1917 // Q1.Put() Q2.Put()
1918 // MU1.Lock() MU1.Lock()
1919 // MU2.Lock() MU2.Lock()
1920 // write(GLOB) write(GLOB)
1921 // MU2.Unlock() MU2.Unlock()
1922 // MU1.Unlock() MU1.Unlock() sleep sleep
1923 // Q1.Get() Q1.Get()
1924 // Q2.Get() Q2.Get()
1925 // MU2.Lock() MU2.Lock()
1926 // write(GLOB) write(GLOB)
1927 // MU2.Unlock() MU2.Unlock()
1931 ProducerConsumerQueue *Q1, *Q2;
1932 int GLOB = 0;
1933 Mutex MU, MU1, MU2;
1935 void Putter(ProducerConsumerQueue *q) {
1936 MU1.Lock();
1937 GLOB++;
1938 MU1.Unlock();
1940 q->Put(NULL);
1941 q->Put(NULL);
1943 MU1.Lock();
1944 MU2.Lock();
1945 GLOB++;
1946 MU2.Unlock();
1947 MU1.Unlock();
1951 void Putter1() { Putter(Q1); }
1952 void Putter2() { Putter(Q2); }
1954 void Getter() {
1955 usleep(100000);
1956 Q1->Get();
1957 Q2->Get();
1959 MU2.Lock();
1960 GLOB++;
1961 MU2.Unlock();
1963 usleep(48000); // TODO: remove this when FP in test32 is fixed.
1966 void Run() {
1967 printf("test38: negative\n");
1968 Q1 = new ProducerConsumerQueue(INT_MAX);
1969 Q2 = new ProducerConsumerQueue(INT_MAX);
1970 MyThreadArray t(Getter, Getter, Putter1, Putter2);
1971 t.Start();
1972 t.Join();
1973 printf("\tGLOB=%d\n", GLOB);
1974 delete Q1;
1975 delete Q2;
1977 REGISTER_TEST(Run, 38);
1978 } // namespace test38
1980 // test39: FP. Barrier. {{{1
1981 namespace test39 {
1982 #ifndef NO_BARRIER
1983 // Same as test17 but uses Barrier class (pthread_barrier_t).
1984 int GLOB = 0;
1985 const int N_threads = 3;
1986 Barrier barrier(N_threads);
1987 Mutex MU;
1989 void Worker() {
1990 MU.Lock();
1991 GLOB++;
1992 MU.Unlock();
1993 barrier.Block();
1994 CHECK(GLOB == N_threads);
1996 void Run() {
1997 ANNOTATE_TRACE_MEMORY(&GLOB);
1998 // ANNOTATE_EXPECT_RACE(&GLOB, "test39. FP. Fixed by MSMProp1. Barrier.");
1999 printf("test39: negative\n");
2001 ThreadPool pool(N_threads);
2002 pool.StartWorkers();
2003 for (int i = 0; i < N_threads; i++) {
2004 pool.Add(NewCallback(Worker));
2006 } // all folks are joined here.
2007 printf("\tGLOB=%d\n", GLOB);
2009 REGISTER_TEST(Run, 39);
2010 #endif // NO_BARRIER
2011 } // namespace test39
2014 // test40: FP. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1
2015 namespace test40 {
2016 // Similar to test38 but with different order of events (due to sleep).
2018 // Putter1: Putter2: Getter1: Getter2:
2019 // MU1.Lock() MU1.Lock()
2020 // write(GLOB) write(GLOB)
2021 // MU1.Unlock() MU1.Unlock()
2022 // Q1.Put() Q2.Put()
2023 // Q1.Put() Q2.Put()
2024 // Q1.Get() Q1.Get()
2025 // Q2.Get() Q2.Get()
2026 // MU2.Lock() MU2.Lock()
2027 // write(GLOB) write(GLOB)
2028 // MU2.Unlock() MU2.Unlock()
2030 // MU1.Lock() MU1.Lock()
2031 // MU2.Lock() MU2.Lock()
2032 // write(GLOB) write(GLOB)
2033 // MU2.Unlock() MU2.Unlock()
2034 // MU1.Unlock() MU1.Unlock()
2037 ProducerConsumerQueue *Q1, *Q2;
2038 int GLOB = 0;
2039 Mutex MU, MU1, MU2;
2041 void Putter(ProducerConsumerQueue *q) {
2042 MU1.Lock();
2043 GLOB++;
2044 MU1.Unlock();
2046 q->Put(NULL);
2047 q->Put(NULL);
2048 usleep(100000);
2050 MU1.Lock();
2051 MU2.Lock();
2052 GLOB++;
2053 MU2.Unlock();
2054 MU1.Unlock();
2058 void Putter1() { Putter(Q1); }
2059 void Putter2() { Putter(Q2); }
2061 void Getter() {
2062 Q1->Get();
2063 Q2->Get();
2065 MU2.Lock();
2066 GLOB++;
2067 MU2.Unlock();
2069 usleep(48000); // TODO: remove this when FP in test32 is fixed.
2072 void Run() {
2073 // ANNOTATE_EXPECT_RACE(&GLOB, "test40. FP. Fixed by MSMProp1. Complex Stuff.");
2074 printf("test40: negative\n");
2075 Q1 = new ProducerConsumerQueue(INT_MAX);
2076 Q2 = new ProducerConsumerQueue(INT_MAX);
2077 MyThreadArray t(Getter, Getter, Putter1, Putter2);
2078 t.Start();
2079 t.Join();
2080 printf("\tGLOB=%d\n", GLOB);
2081 delete Q1;
2082 delete Q2;
2084 REGISTER_TEST(Run, 40);
2085 } // namespace test40
2087 // test41: TN. Test for race that appears when loading a dynamic symbol. {{{1
2088 namespace test41 {
2089 void Worker() {
2090 ANNOTATE_NO_OP(NULL); // An empty function, loaded from dll.
2092 void Run() {
2093 printf("test41: negative\n");
2094 MyThreadArray t(Worker, Worker, Worker);
2095 t.Start();
2096 t.Join();
2098 REGISTER_TEST2(Run, 41, FEATURE|NEEDS_ANNOTATIONS);
2099 } // namespace test41
2102 // test42: TN. Using the same cond var several times. {{{1
2103 namespace test42 {
2104 int GLOB = 0;
2105 int COND = 0;
2106 int N_threads = 3;
2107 Mutex MU;
2109 void Worker1() {
2110 GLOB=1;
2112 MU.Lock();
2113 COND = 1;
2114 CV.Signal();
2115 MU.Unlock();
2117 MU.Lock();
2118 while (COND != 0)
2119 CV.Wait(&MU);
2120 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2121 MU.Unlock();
2123 GLOB=3;
2127 void Worker2() {
2129 MU.Lock();
2130 while (COND != 1)
2131 CV.Wait(&MU);
2132 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2133 MU.Unlock();
2135 GLOB=2;
2137 MU.Lock();
2138 COND = 0;
2139 CV.Signal();
2140 MU.Unlock();
2144 void Run() {
2145 // ANNOTATE_EXPECT_RACE(&GLOB, "test42. TN. debugging.");
2146 printf("test42: negative\n");
2147 MyThreadArray t(Worker1, Worker2);
2148 t.Start();
2149 t.Join();
2150 printf("\tGLOB=%d\n", GLOB);
2152 REGISTER_TEST2(Run, 42, FEATURE|NEEDS_ANNOTATIONS);
2153 } // namespace test42
2157 // test43: TN. {{{1
2158 namespace test43 {
2160 // Putter: Getter:
2161 // 1. write
2162 // 2. Q.Put() --\ .
2163 // 3. read \--> a. Q.Get()
2164 // b. read
2165 int GLOB = 0;
2166 ProducerConsumerQueue Q(INT_MAX);
2167 void Putter() {
2168 GLOB = 1;
2169 Q.Put(NULL);
2170 CHECK(GLOB == 1);
2172 void Getter() {
2173 Q.Get();
2174 usleep(100000);
2175 CHECK(GLOB == 1);
2177 void Run() {
2178 printf("test43: negative\n");
2179 MyThreadArray t(Putter, Getter);
2180 t.Start();
2181 t.Join();
2182 printf("\tGLOB=%d\n", GLOB);
2184 REGISTER_TEST(Run, 43)
2185 } // namespace test43
2188 // test44: FP. {{{1
2189 namespace test44 {
2191 // Putter: Getter:
2192 // 1. read
2193 // 2. Q.Put() --\ .
2194 // 3. MU.Lock() \--> a. Q.Get()
2195 // 4. write
2196 // 5. MU.Unlock()
2197 // b. MU.Lock()
2198 // c. write
2199 // d. MU.Unlock();
2200 int GLOB = 0;
2201 Mutex MU;
2202 ProducerConsumerQueue Q(INT_MAX);
2203 void Putter() {
2204 CHECK(GLOB == 0);
2205 Q.Put(NULL);
2206 MU.Lock();
2207 GLOB = 1;
2208 MU.Unlock();
2210 void Getter() {
2211 Q.Get();
2212 usleep(100000);
2213 MU.Lock();
2214 GLOB = 1;
2215 MU.Unlock();
2217 void Run() {
2218 // ANNOTATE_EXPECT_RACE(&GLOB, "test44. FP. Fixed by MSMProp1.");
2219 printf("test44: negative\n");
2220 MyThreadArray t(Putter, Getter);
2221 t.Start();
2222 t.Join();
2223 printf("\tGLOB=%d\n", GLOB);
2225 REGISTER_TEST(Run, 44)
2226 } // namespace test44
2229 // test45: TN. {{{1
2230 namespace test45 {
2232 // Putter: Getter:
2233 // 1. read
2234 // 2. Q.Put() --\ .
2235 // 3. MU.Lock() \--> a. Q.Get()
2236 // 4. write
2237 // 5. MU.Unlock()
2238 // b. MU.Lock()
2239 // c. read
2240 // d. MU.Unlock();
2241 int GLOB = 0;
2242 Mutex MU;
2243 ProducerConsumerQueue Q(INT_MAX);
2244 void Putter() {
2245 CHECK(GLOB == 0);
2246 Q.Put(NULL);
2247 MU.Lock();
2248 GLOB++;
2249 MU.Unlock();
2251 void Getter() {
2252 Q.Get();
2253 usleep(100000);
2254 MU.Lock();
2255 CHECK(GLOB <= 1);
2256 MU.Unlock();
2258 void Run() {
2259 printf("test45: negative\n");
2260 MyThreadArray t(Putter, Getter);
2261 t.Start();
2262 t.Join();
2263 printf("\tGLOB=%d\n", GLOB);
2265 REGISTER_TEST(Run, 45)
2266 } // namespace test45
2269 // test46: FN. {{{1
2270 namespace test46 {
2272 // First: Second:
2273 // 1. write
2274 // 2. MU.Lock()
2275 // 3. write
2276 // 4. MU.Unlock() (sleep)
2277 // a. MU.Lock()
2278 // b. write
2279 // c. MU.Unlock();
2280 int GLOB = 0;
2281 Mutex MU;
2282 void First() {
2283 GLOB++;
2284 MU.Lock();
2285 GLOB++;
2286 MU.Unlock();
2288 void Second() {
2289 usleep(480000);
2290 MU.Lock();
2291 GLOB++;
2292 MU.Unlock();
2294 // just a print.
2295 // If we move it to Run() we will get report in MSMHelgrind
2296 // due to its false positive (test32).
2297 MU.Lock();
2298 printf("\tGLOB=%d\n", GLOB);
2299 MU.Unlock();
2301 void Run() {
2302 ANNOTATE_TRACE_MEMORY(&GLOB);
2303 MyThreadArray t(First, Second);
2304 t.Start();
2305 t.Join();
2307 REGISTER_TEST(Run, 46)
2308 } // namespace test46
2311 // test47: TP. Not detected by pure happens-before detectors. {{{1
2312 namespace test47 {
2313 // A true race that can not be detected by a pure happens-before
2314 // race detector.
2316 // First: Second:
2317 // 1. write
2318 // 2. MU.Lock()
2319 // 3. MU.Unlock() (sleep)
2320 // a. MU.Lock()
2321 // b. MU.Unlock();
2322 // c. write
2323 int GLOB = 0;
2324 Mutex MU;
2325 void First() {
2326 GLOB=1;
2327 MU.Lock();
2328 MU.Unlock();
2330 void Second() {
2331 usleep(480000);
2332 MU.Lock();
2333 MU.Unlock();
2334 GLOB++;
2336 void Run() {
2337 FAST_MODE_INIT(&GLOB);
2338 if (!Tsan_PureHappensBefore())
2339 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test47. TP. Not detected by pure HB.");
2340 printf("test47: positive\n");
2341 MyThreadArray t(First, Second);
2342 t.Start();
2343 t.Join();
2344 printf("\tGLOB=%d\n", GLOB);
2346 REGISTER_TEST(Run, 47)
2347 } // namespace test47
2350 // test48: FN. Simple race (single write vs multiple reads). {{{1
2351 namespace test48 {
2352 int GLOB = 0;
2353 // same as test10 but with single writer and multiple readers
2354 // A simple data race between single writer and multiple readers.
2355 // Write happens before Reads (enforced by sleep(1)),
2358 // Writer: Readers:
2359 // 1. write(GLOB) a. sleep(long enough so that GLOB
2360 // is most likely initialized by Writer)
2361 // b. read(GLOB)
2364 // Eraser algorithm does not detect the race here,
2365 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
2367 void Writer() {
2368 GLOB = 3;
2370 void Reader() {
2371 usleep(100000);
2372 CHECK(GLOB != -777);
2375 void Run() {
2376 FAST_MODE_INIT(&GLOB);
2377 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test48. TP. FN in MSMHelgrind.");
2378 printf("test48: positive\n");
2379 MyThreadArray t(Writer, Reader,Reader,Reader);
2380 t.Start();
2381 t.Join();
2382 printf("\tGLOB=%d\n", GLOB);
2384 REGISTER_TEST(Run, 48)
2385 } // namespace test48
2388 // test49: FN. Simple race (single write vs multiple reads). {{{1
2389 namespace test49 {
2390 int GLOB = 0;
2391 // same as test10 but with multiple read operations done by a single reader
2392 // A simple data race between writer and readers.
2393 // Write happens before Read (enforced by sleep(1)),
2395 // Writer: Reader:
2396 // 1. write(GLOB) a. sleep(long enough so that GLOB
2397 // is most likely initialized by Writer)
2398 // b. read(GLOB)
2399 // c. read(GLOB)
2400 // d. read(GLOB)
2401 // e. read(GLOB)
2404 // Eraser algorithm does not detect the race here,
2405 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
2407 void Writer() {
2408 GLOB = 3;
2410 void Reader() {
2411 usleep(100000);
2412 CHECK(GLOB != -777);
2413 CHECK(GLOB != -777);
2414 CHECK(GLOB != -777);
2415 CHECK(GLOB != -777);
2418 void Run() {
2419 FAST_MODE_INIT(&GLOB);
2420 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test49. TP. FN in MSMHelgrind.");
2421 printf("test49: positive\n");
2422 MyThreadArray t(Writer, Reader);
2423 t.Start();
2424 t.Join();
2425 printf("\tGLOB=%d\n", GLOB);
2427 REGISTER_TEST(Run, 49);
2428 } // namespace test49
2431 // test50: TP. Synchronization via CondVar. {{{1
2432 namespace test50 {
2433 int GLOB = 0;
2434 Mutex MU;
2435 // Two last write accesses to GLOB are not synchronized
2437 // Waiter: Waker:
2438 // 1. COND = 0
2439 // 2. Start(Waker)
2440 // 3. MU.Lock() a. write(GLOB)
2441 // b. MU.Lock()
2442 // c. COND = 1
2443 // /--- d. CV.Signal()
2444 // 4. while(COND != 1) / e. MU.Unlock()
2445 // CV.Wait(MU) <---/
2446 // 5. MU.Unlock()
2447 // 6. write(GLOB) f. MU.Lock()
2448 // g. write(GLOB)
2449 // h. MU.Unlock()
2452 void Waker() {
2453 usleep(100000); // Make sure the waiter blocks.
2455 GLOB = 1;
2457 MU.Lock();
2458 COND = 1;
2459 CV.Signal();
2460 MU.Unlock();
2462 usleep(100000);
2463 MU.Lock();
2464 GLOB = 3;
2465 MU.Unlock();
2468 void Waiter() {
2469 ThreadPool pool(1);
2470 pool.StartWorkers();
2471 COND = 0;
2472 pool.Add(NewCallback(Waker));
2474 MU.Lock();
2475 while(COND != 1)
2476 CV.Wait(&MU);
2477 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2478 MU.Unlock();
2480 GLOB = 2;
2482 void Run() {
2483 FAST_MODE_INIT(&GLOB);
2484 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test50. TP.");
2485 printf("test50: positive\n");
2486 Waiter();
2487 printf("\tGLOB=%d\n", GLOB);
2489 REGISTER_TEST2(Run, 50, FEATURE|NEEDS_ANNOTATIONS);
2490 } // namespace test50
2493 // test51: TP. Synchronization via CondVar: problem with several signals. {{{1
2494 namespace test51 {
2495 int GLOB = 0;
2496 int COND = 0;
2497 Mutex MU;
2500 // scheduler dependent results because of several signals
2501 // second signal will be lost
2503 // Waiter: Waker:
2504 // 1. Start(Waker)
2505 // 2. MU.Lock()
2506 // 3. while(COND)
2507 // CV.Wait(MU)<-\ .
2508 // 4. MU.Unlock() \ .
2509 // 5. write(GLOB) \ a. write(GLOB)
2510 // \ b. MU.Lock()
2511 // \ c. COND = 1
2512 // \--- d. CV.Signal()
2513 // e. MU.Unlock()
2515 // f. write(GLOB)
2517 // g. MU.Lock()
2518 // h. COND = 1
2519 // LOST<---- i. CV.Signal()
2520 // j. MU.Unlock()
2522 void Waker() {
2524 usleep(10000); // Make sure the waiter blocks.
2526 GLOB = 1;
2528 MU.Lock();
2529 COND = 1;
2530 CV.Signal();
2531 MU.Unlock();
2533 usleep(10000); // Make sure the waiter is signalled.
2535 GLOB = 2;
2537 MU.Lock();
2538 COND = 1;
2539 CV.Signal(); //Lost Signal
2540 MU.Unlock();
2543 void Waiter() {
2545 ThreadPool pool(1);
2546 pool.StartWorkers();
2547 pool.Add(NewCallback(Waker));
2549 MU.Lock();
2550 while(COND != 1)
2551 CV.Wait(&MU);
2552 MU.Unlock();
2555 GLOB = 3;
2557 void Run() {
2558 FAST_MODE_INIT(&GLOB);
2559 ANNOTATE_EXPECT_RACE(&GLOB, "test51. TP.");
2560 printf("test51: positive\n");
2561 Waiter();
2562 printf("\tGLOB=%d\n", GLOB);
2564 REGISTER_TEST(Run, 51);
2565 } // namespace test51
2568 // test52: TP. Synchronization via CondVar: problem with several signals. {{{1
2569 namespace test52 {
2570 int GLOB = 0;
2571 int COND = 0;
2572 Mutex MU;
2574 // same as test51 but the first signal will be lost
2575 // scheduler dependent results because of several signals
2577 // Waiter: Waker:
2578 // 1. Start(Waker)
2579 // a. write(GLOB)
2580 // b. MU.Lock()
2581 // c. COND = 1
2582 // LOST<---- d. CV.Signal()
2583 // e. MU.Unlock()
2585 // 2. MU.Lock()
2586 // 3. while(COND)
2587 // CV.Wait(MU)<-\ .
2588 // 4. MU.Unlock() \ f. write(GLOB)
2589 // 5. write(GLOB) \ .
2590 // \ g. MU.Lock()
2591 // \ h. COND = 1
2592 // \--- i. CV.Signal()
2593 // j. MU.Unlock()
2595 void Waker() {
2597 GLOB = 1;
2599 MU.Lock();
2600 COND = 1;
2601 CV.Signal(); //lost signal
2602 MU.Unlock();
2604 usleep(20000); // Make sure the waiter blocks
2606 GLOB = 2;
2608 MU.Lock();
2609 COND = 1;
2610 CV.Signal();
2611 MU.Unlock();
2614 void Waiter() {
2615 ThreadPool pool(1);
2616 pool.StartWorkers();
2617 pool.Add(NewCallback(Waker));
2619 usleep(10000); // Make sure the first signal will be lost
2621 MU.Lock();
2622 while(COND != 1)
2623 CV.Wait(&MU);
2624 MU.Unlock();
2626 GLOB = 3;
2628 void Run() {
2629 FAST_MODE_INIT(&GLOB);
2630 ANNOTATE_EXPECT_RACE(&GLOB, "test52. TP.");
2631 printf("test52: positive\n");
2632 Waiter();
2633 printf("\tGLOB=%d\n", GLOB);
2635 REGISTER_TEST(Run, 52);
2636 } // namespace test52
2639 // test53: FP. Synchronization via implicit semaphore. {{{1
2640 namespace test53 {
2641 // Correctly synchronized test, but the common lockset is empty.
2642 // The variable FLAG works as an implicit semaphore.
2643 // MSMHelgrind still does not complain since it does not maintain the lockset
2644 // at the exclusive state. But MSMProp1 does complain.
2645 // See also test54.
2648 // Initializer: Users
2649 // 1. MU1.Lock()
2650 // 2. write(GLOB)
2651 // 3. FLAG = true
2652 // 4. MU1.Unlock()
2653 // a. MU1.Lock()
2654 // b. f = FLAG;
2655 // c. MU1.Unlock()
2656 // d. if (!f) goto a.
2657 // e. MU2.Lock()
2658 // f. write(GLOB)
2659 // g. MU2.Unlock()
2662 int GLOB = 0;
2663 bool FLAG = false;
2664 Mutex MU1, MU2;
2666 void Initializer() {
2667 MU1.Lock();
2668 GLOB = 1000;
2669 FLAG = true;
2670 MU1.Unlock();
2671 usleep(100000); // just in case
2674 void User() {
2675 bool f = false;
2676 while(!f) {
2677 MU1.Lock();
2678 f = FLAG;
2679 MU1.Unlock();
2680 usleep(10000);
2682 // at this point Initializer will not access GLOB again
2683 MU2.Lock();
2684 CHECK(GLOB >= 1000);
2685 GLOB++;
2686 MU2.Unlock();
2689 void Run() {
2690 FAST_MODE_INIT(&GLOB);
2691 if (!Tsan_PureHappensBefore())
2692 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test53. FP. Implicit semaphore");
2693 printf("test53: FP. false positive, Implicit semaphore\n");
2694 MyThreadArray t(Initializer, User, User);
2695 t.Start();
2696 t.Join();
2697 printf("\tGLOB=%d\n", GLOB);
2699 REGISTER_TEST(Run, 53)
2700 } // namespace test53
2703 // test54: TN. Synchronization via implicit semaphore. Annotated {{{1
2704 namespace test54 {
2705 // Same as test53, but annotated.
2706 int GLOB = 0;
2707 bool FLAG = false;
2708 Mutex MU1, MU2;
2710 void Initializer() {
2711 MU1.Lock();
2712 GLOB = 1000;
2713 FLAG = true;
2714 ANNOTATE_CONDVAR_SIGNAL(&GLOB);
2715 MU1.Unlock();
2716 usleep(100000); // just in case
2719 void User() {
2720 bool f = false;
2721 while(!f) {
2722 MU1.Lock();
2723 f = FLAG;
2724 MU1.Unlock();
2725 usleep(10000);
2727 // at this point Initializer will not access GLOB again
2728 ANNOTATE_CONDVAR_WAIT(&GLOB);
2729 MU2.Lock();
2730 CHECK(GLOB >= 1000);
2731 GLOB++;
2732 MU2.Unlock();
2735 void Run() {
2736 printf("test54: negative\n");
2737 MyThreadArray t(Initializer, User, User);
2738 t.Start();
2739 t.Join();
2740 printf("\tGLOB=%d\n", GLOB);
2742 REGISTER_TEST2(Run, 54, FEATURE|NEEDS_ANNOTATIONS)
2743 } // namespace test54
2746 // test55: FP. Synchronization with TryLock. Not easy for race detectors {{{1
2747 namespace test55 {
2748 // "Correct" synchronization with TryLock and Lock.
2750 // This scheme is actually very risky.
2751 // It is covered in detail in this video:
2752 // http://youtube.com/watch?v=mrvAqvtWYb4 (slide 36, near 50-th minute).
2753 int GLOB = 0;
2754 Mutex MU;
2756 void Worker_Lock() {
2757 GLOB = 1;
2758 MU.Lock();
2761 void Worker_TryLock() {
2762 while (true) {
2763 if (!MU.TryLock()) {
2764 MU.Unlock();
2765 break;
2767 else
2768 MU.Unlock();
2769 usleep(100);
2771 GLOB = 2;
2774 void Run() {
2775 printf("test55:\n");
2776 MyThreadArray t(Worker_Lock, Worker_TryLock);
2777 t.Start();
2778 t.Join();
2779 printf("\tGLOB=%d\n", GLOB);
2781 REGISTER_TEST2(Run, 55, FEATURE|EXCLUDE_FROM_ALL);
2782 } // namespace test55
2786 // test56: TP. Use of ANNOTATE_BENIGN_RACE. {{{1
2787 namespace test56 {
2788 // For whatever reason the user wants to treat
2789 // a race on GLOB as a benign race.
2790 int GLOB = 0;
2791 int GLOB2 = 0;
2793 void Worker() {
2794 GLOB++;
2797 void Run() {
2798 ANNOTATE_BENIGN_RACE(&GLOB, "test56. Use of ANNOTATE_BENIGN_RACE.");
2799 ANNOTATE_BENIGN_RACE(&GLOB2, "No race. The tool should be silent");
2800 printf("test56: positive\n");
2801 MyThreadArray t(Worker, Worker, Worker, Worker);
2802 t.Start();
2803 t.Join();
2804 printf("\tGLOB=%d\n", GLOB);
2806 REGISTER_TEST2(Run, 56, FEATURE|NEEDS_ANNOTATIONS)
2807 } // namespace test56
2810 // test57: TN: Correct use of atomics. {{{1
2811 namespace test57 {
2812 int GLOB = 0;
2813 void Writer() {
2814 for (int i = 0; i < 10; i++) {
2815 AtomicIncrement(&GLOB, 1);
2816 usleep(1000);
2819 void Reader() {
2820 while (GLOB < 20) usleep(1000);
2822 void Run() {
2823 printf("test57: negative\n");
2824 MyThreadArray t(Writer, Writer, Reader, Reader);
2825 t.Start();
2826 t.Join();
2827 CHECK(GLOB == 20);
2828 printf("\tGLOB=%d\n", GLOB);
2830 REGISTER_TEST(Run, 57)
2831 } // namespace test57
2834 // test58: TN. User defined synchronization. {{{1
2835 namespace test58 {
2836 int GLOB1 = 1;
2837 int GLOB2 = 2;
2838 int FLAG1 = 0;
2839 int FLAG2 = 0;
2841 // Correctly synchronized test, but the common lockset is empty.
2842 // The variables FLAG1 and FLAG2 used for synchronization and as
2843 // temporary variables for swapping two global values.
2844 // Such kind of synchronization is rarely used (Excluded from all tests??).
2846 void Worker2() {
2847 FLAG1=GLOB2;
2849 while(!FLAG2)
2851 GLOB2=FLAG2;
2854 void Worker1() {
2855 FLAG2=GLOB1;
2857 while(!FLAG1)
2859 GLOB1=FLAG1;
2862 void Run() {
2863 printf("test58:\n");
2864 MyThreadArray t(Worker1, Worker2);
2865 t.Start();
2866 t.Join();
2867 printf("\tGLOB1=%d\n", GLOB1);
2868 printf("\tGLOB2=%d\n", GLOB2);
2870 REGISTER_TEST2(Run, 58, FEATURE|EXCLUDE_FROM_ALL)
2871 } // namespace test58
2875 // test59: TN. User defined synchronization. Annotated {{{1
2876 namespace test59 {
2877 int COND1 = 0;
2878 int COND2 = 0;
2879 int GLOB1 = 1;
2880 int GLOB2 = 2;
2881 int FLAG1 = 0;
2882 int FLAG2 = 0;
2883 // same as test 58 but annotated
2885 void Worker2() {
2886 FLAG1=GLOB2;
2887 ANNOTATE_CONDVAR_SIGNAL(&COND2);
2888 while(!FLAG2) usleep(1);
2889 ANNOTATE_CONDVAR_WAIT(&COND1);
2890 GLOB2=FLAG2;
2893 void Worker1() {
2894 FLAG2=GLOB1;
2895 ANNOTATE_CONDVAR_SIGNAL(&COND1);
2896 while(!FLAG1) usleep(1);
2897 ANNOTATE_CONDVAR_WAIT(&COND2);
2898 GLOB1=FLAG1;
2901 void Run() {
2902 printf("test59: negative\n");
2903 ANNOTATE_BENIGN_RACE(&FLAG1, "synchronization via 'safe' race");
2904 ANNOTATE_BENIGN_RACE(&FLAG2, "synchronization via 'safe' race");
2905 MyThreadArray t(Worker1, Worker2);
2906 t.Start();
2907 t.Join();
2908 printf("\tGLOB1=%d\n", GLOB1);
2909 printf("\tGLOB2=%d\n", GLOB2);
2911 REGISTER_TEST2(Run, 59, FEATURE|NEEDS_ANNOTATIONS)
2912 } // namespace test59
2915 // test60: TN. Correct synchronization using signal-wait {{{1
2916 namespace test60 {
2917 int COND1 = 0;
2918 int COND2 = 0;
2919 int GLOB1 = 1;
2920 int GLOB2 = 2;
2921 int FLAG2 = 0;
2922 int FLAG1 = 0;
2923 Mutex MU;
2924 // same as test 59 but synchronized with signal-wait.
2926 void Worker2() {
2927 FLAG1=GLOB2;
2929 MU.Lock();
2930 COND1 = 1;
2931 CV.Signal();
2932 MU.Unlock();
2934 MU.Lock();
2935 while(COND2 != 1)
2936 CV.Wait(&MU);
2937 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2938 MU.Unlock();
2940 GLOB2=FLAG2;
2943 void Worker1() {
2944 FLAG2=GLOB1;
2946 MU.Lock();
2947 COND2 = 1;
2948 CV.Signal();
2949 MU.Unlock();
2951 MU.Lock();
2952 while(COND1 != 1)
2953 CV.Wait(&MU);
2954 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2955 MU.Unlock();
2957 GLOB1=FLAG1;
2960 void Run() {
2961 printf("test60: negative\n");
2962 MyThreadArray t(Worker1, Worker2);
2963 t.Start();
2964 t.Join();
2965 printf("\tGLOB1=%d\n", GLOB1);
2966 printf("\tGLOB2=%d\n", GLOB2);
2968 REGISTER_TEST2(Run, 60, FEATURE|NEEDS_ANNOTATIONS)
2969 } // namespace test60
2972 // test61: TN. Synchronization via Mutex as in happens-before, annotated. {{{1
2973 namespace test61 {
2974 Mutex MU;
2975 int GLOB = 0;
2976 int *P1 = NULL, *P2 = NULL;
2978 // In this test Mutex lock/unlock operations introduce happens-before relation.
2979 // We annotate the code so that MU is treated as in pure happens-before detector.
2982 void Putter() {
2983 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU);
2984 MU.Lock();
2985 if (P1 == NULL) {
2986 P1 = &GLOB;
2987 *P1 = 1;
2989 MU.Unlock();
2992 void Getter() {
2993 bool done = false;
2994 while (!done) {
2995 MU.Lock();
2996 if (P1) {
2997 done = true;
2998 P2 = P1;
2999 P1 = NULL;
3001 MU.Unlock();
3003 *P2 = 2;
3007 void Run() {
3008 printf("test61: negative\n");
3009 MyThreadArray t(Putter, Getter);
3010 t.Start();
3011 t.Join();
3012 printf("\tGLOB=%d\n", GLOB);
3014 REGISTER_TEST2(Run, 61, FEATURE|NEEDS_ANNOTATIONS)
3015 } // namespace test61
3018 // test62: STAB. Create as many segments as possible. {{{1
3019 namespace test62 {
3020 // Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments.
3021 // A better scheme is to implement garbage collection for segments.
3022 ProducerConsumerQueue Q(INT_MAX);
3023 const int N = 1 << 22;
3025 void Putter() {
3026 for (int i = 0; i < N; i++){
3027 if ((i % (N / 8)) == 0) {
3028 printf("i=%d\n", i);
3030 Q.Put(NULL);
3034 void Getter() {
3035 for (int i = 0; i < N; i++)
3036 Q.Get();
3039 void Run() {
3040 printf("test62:\n");
3041 MyThreadArray t(Putter, Getter);
3042 t.Start();
3043 t.Join();
3045 REGISTER_TEST2(Run, 62, STABILITY|EXCLUDE_FROM_ALL)
3046 } // namespace test62
3049 // test63: STAB. Create as many segments as possible and do it fast. {{{1
3050 namespace test63 {
3051 // Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments.
3052 // A better scheme is to implement garbage collection for segments.
3053 const int N = 1 << 24;
3054 int C = 0;
3056 void Putter() {
3057 for (int i = 0; i < N; i++){
3058 if ((i % (N / 8)) == 0) {
3059 printf("i=%d\n", i);
3061 ANNOTATE_CONDVAR_SIGNAL(&C);
3065 void Getter() {
3068 void Run() {
3069 printf("test63:\n");
3070 MyThreadArray t(Putter, Getter);
3071 t.Start();
3072 t.Join();
3074 REGISTER_TEST2(Run, 63, STABILITY|EXCLUDE_FROM_ALL)
3075 } // namespace test63
3078 // test64: TP. T2 happens-before T3, but T1 is independent. Reads in T1/T2. {{{1
3079 namespace test64 {
3080 // True race between T1 and T3:
3082 // T1: T2: T3:
3083 // 1. read(GLOB) (sleep)
3084 // a. read(GLOB)
3085 // b. Q.Put() -----> A. Q.Get()
3086 // B. write(GLOB)
3090 int GLOB = 0;
3091 ProducerConsumerQueue Q(INT_MAX);
3093 void T1() {
3094 CHECK(GLOB == 0);
3097 void T2() {
3098 usleep(100000);
3099 CHECK(GLOB == 0);
3100 Q.Put(NULL);
3103 void T3() {
3104 Q.Get();
3105 GLOB = 1;
3109 void Run() {
3110 FAST_MODE_INIT(&GLOB);
3111 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test64: TP.");
3112 printf("test64: positive\n");
3113 MyThreadArray t(T1, T2, T3);
3114 t.Start();
3115 t.Join();
3116 printf("\tGLOB=%d\n", GLOB);
3118 REGISTER_TEST(Run, 64)
3119 } // namespace test64
3122 // test65: TP. T2 happens-before T3, but T1 is independent. Writes in T1/T2. {{{1
3123 namespace test65 {
3124 // Similar to test64.
3125 // True race between T1 and T3:
3127 // T1: T2: T3:
3128 // 1. MU.Lock()
3129 // 2. write(GLOB)
3130 // 3. MU.Unlock() (sleep)
3131 // a. MU.Lock()
3132 // b. write(GLOB)
3133 // c. MU.Unlock()
3134 // d. Q.Put() -----> A. Q.Get()
3135 // B. write(GLOB)
3139 int GLOB = 0;
3140 Mutex MU;
3141 ProducerConsumerQueue Q(INT_MAX);
3143 void T1() {
3144 MU.Lock();
3145 GLOB++;
3146 MU.Unlock();
3149 void T2() {
3150 usleep(100000);
3151 MU.Lock();
3152 GLOB++;
3153 MU.Unlock();
3154 Q.Put(NULL);
3157 void T3() {
3158 Q.Get();
3159 GLOB = 1;
3163 void Run() {
3164 FAST_MODE_INIT(&GLOB);
3165 if (!Tsan_PureHappensBefore())
3166 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test65. TP.");
3167 printf("test65: positive\n");
3168 MyThreadArray t(T1, T2, T3);
3169 t.Start();
3170 t.Join();
3171 printf("\tGLOB=%d\n", GLOB);
3173 REGISTER_TEST(Run, 65)
3174 } // namespace test65
3177 // test66: TN. Two separate pairs of signaller/waiter using the same CV. {{{1
3178 namespace test66 {
3179 int GLOB1 = 0;
3180 int GLOB2 = 0;
3181 int C1 = 0;
3182 int C2 = 0;
3183 Mutex MU;
3185 void Signaller1() {
3186 GLOB1 = 1;
3187 MU.Lock();
3188 C1 = 1;
3189 CV.Signal();
3190 MU.Unlock();
3193 void Signaller2() {
3194 GLOB2 = 1;
3195 usleep(100000);
3196 MU.Lock();
3197 C2 = 1;
3198 CV.Signal();
3199 MU.Unlock();
3202 void Waiter1() {
3203 MU.Lock();
3204 while (C1 != 1) CV.Wait(&MU);
3205 ANNOTATE_CONDVAR_WAIT(&CV);
3206 MU.Unlock();
3207 GLOB1 = 2;
3210 void Waiter2() {
3211 MU.Lock();
3212 while (C2 != 1) CV.Wait(&MU);
3213 ANNOTATE_CONDVAR_WAIT(&CV);
3214 MU.Unlock();
3215 GLOB2 = 2;
3218 void Run() {
3219 printf("test66: negative\n");
3220 MyThreadArray t(Signaller1, Signaller2, Waiter1, Waiter2);
3221 t.Start();
3222 t.Join();
3223 printf("\tGLOB=%d/%d\n", GLOB1, GLOB2);
3225 REGISTER_TEST2(Run, 66, FEATURE|NEEDS_ANNOTATIONS)
3226 } // namespace test66
3229 // test67: FN. Race between Signaller1 and Waiter2 {{{1
3230 namespace test67 {
3231 // Similar to test66, but there is a real race here.
3233 // Here we create a happens-before arc between Signaller1 and Waiter2
3234 // even though there should be no such arc.
3235 // However, it's probably improssible (or just very hard) to avoid it.
3236 int GLOB = 0;
3237 int C1 = 0;
3238 int C2 = 0;
3239 Mutex MU;
3241 void Signaller1() {
3242 GLOB = 1;
3243 MU.Lock();
3244 C1 = 1;
3245 CV.Signal();
3246 MU.Unlock();
3249 void Signaller2() {
3250 usleep(100000);
3251 MU.Lock();
3252 C2 = 1;
3253 CV.Signal();
3254 MU.Unlock();
3257 void Waiter1() {
3258 MU.Lock();
3259 while (C1 != 1) CV.Wait(&MU);
3260 ANNOTATE_CONDVAR_WAIT(&CV);
3261 MU.Unlock();
3264 void Waiter2() {
3265 MU.Lock();
3266 while (C2 != 1) CV.Wait(&MU);
3267 ANNOTATE_CONDVAR_WAIT(&CV);
3268 MU.Unlock();
3269 GLOB = 2;
3272 void Run() {
3273 FAST_MODE_INIT(&GLOB);
3274 ANNOTATE_EXPECT_RACE(&GLOB, "test67. FN. Race between Signaller1 and Waiter2");
3275 printf("test67: positive\n");
3276 MyThreadArray t(Signaller1, Signaller2, Waiter1, Waiter2);
3277 t.Start();
3278 t.Join();
3279 printf("\tGLOB=%d\n", GLOB);
3281 REGISTER_TEST2(Run, 67, FEATURE|NEEDS_ANNOTATIONS|EXCLUDE_FROM_ALL)
3282 } // namespace test67
3285 // test68: TP. Writes are protected by MU, reads are not. {{{1
3286 namespace test68 {
3287 // In this test, all writes to GLOB are protected by a mutex
3288 // but some reads go unprotected.
3289 // This is certainly a race, but in some cases such code could occur in
3290 // a correct program. For example, the unprotected reads may be used
3291 // for showing statistics and are not required to be precise.
3292 int GLOB = 0;
3293 int COND = 0;
3294 const int N_writers = 3;
3295 Mutex MU, MU1;
3297 void Writer() {
3298 for (int i = 0; i < 100; i++) {
3299 MU.Lock();
3300 GLOB++;
3301 MU.Unlock();
3304 // we are done
3305 MU1.Lock();
3306 COND++;
3307 MU1.Unlock();
3310 void Reader() {
3311 bool cont = true;
3312 while (cont) {
3313 CHECK(GLOB >= 0);
3315 // are we done?
3316 MU1.Lock();
3317 if (COND == N_writers)
3318 cont = false;
3319 MU1.Unlock();
3320 usleep(100);
3324 void Run() {
3325 FAST_MODE_INIT(&GLOB);
3326 ANNOTATE_EXPECT_RACE(&GLOB, "TP. Writes are protected, reads are not.");
3327 printf("test68: positive\n");
3328 MyThreadArray t(Reader, Writer, Writer, Writer);
3329 t.Start();
3330 t.Join();
3331 printf("\tGLOB=%d\n", GLOB);
3333 REGISTER_TEST(Run, 68)
3334 } // namespace test68
3337 // test69: {{{1
3338 namespace test69 {
3339 // This is the same as test68, but annotated.
3340 // We do not want to annotate GLOB as a benign race
3341 // because we want to allow racy reads only in certain places.
3343 // TODO:
3344 int GLOB = 0;
3345 int COND = 0;
3346 const int N_writers = 3;
3347 int FAKE_MU = 0;
3348 Mutex MU, MU1;
3350 void Writer() {
3351 for (int i = 0; i < 10; i++) {
3352 MU.Lock();
3353 GLOB++;
3354 MU.Unlock();
3357 // we are done
3358 MU1.Lock();
3359 COND++;
3360 MU1.Unlock();
3363 void Reader() {
3364 bool cont = true;
3365 while (cont) {
3366 ANNOTATE_IGNORE_READS_BEGIN();
3367 CHECK(GLOB >= 0);
3368 ANNOTATE_IGNORE_READS_END();
3370 // are we done?
3371 MU1.Lock();
3372 if (COND == N_writers)
3373 cont = false;
3374 MU1.Unlock();
3375 usleep(100);
3379 void Run() {
3380 printf("test69: negative\n");
3381 MyThreadArray t(Reader, Writer, Writer, Writer);
3382 t.Start();
3383 t.Join();
3384 printf("\tGLOB=%d\n", GLOB);
3386 REGISTER_TEST(Run, 69)
3387 } // namespace test69
3389 // test70: STAB. Check that TRACE_MEMORY works. {{{1
3390 namespace test70 {
3391 int GLOB = 0;
3392 void Run() {
3393 printf("test70: negative\n");
3394 ANNOTATE_TRACE_MEMORY(&GLOB);
3395 GLOB = 1;
3396 printf("\tGLOB=%d\n", GLOB);
3398 REGISTER_TEST(Run, 70)
3399 } // namespace test70
3403 // test71: TN. strlen, index. {{{1
3404 namespace test71 {
3405 // This test is a reproducer for a benign race in strlen (as well as index, etc).
3406 // Some implementations of strlen may read up to 7 bytes past the end of the string
3407 // thus touching memory which may not belong to this string.
3408 // Such race is benign because the data read past the end of the string is not used.
3410 // Here, we allocate a 8-byte aligned string str and initialize first 5 bytes.
3411 // Then one thread calls strlen(str) (as well as index & rindex)
3412 // and another thread initializes str[5]..str[7].
3414 // This can be fixed in Helgrind by intercepting strlen and replacing it
3415 // with a simpler implementation.
3417 char *str;
3418 void WorkerX() {
3419 usleep(100000);
3420 CHECK(strlen(str) == 4);
3421 CHECK(index(str, 'X') == str);
3422 CHECK(index(str, 'x') == str+1);
3423 CHECK(index(str, 'Y') == NULL);
3424 CHECK(rindex(str, 'X') == str+2);
3425 CHECK(rindex(str, 'x') == str+3);
3426 CHECK(rindex(str, 'Y') == NULL);
3428 void WorkerY() {
3429 str[5] = 'Y';
3430 str[6] = 'Y';
3431 str[7] = '\0';
3434 void Run() {
3435 str = new char[8];
3436 str[0] = 'X';
3437 str[1] = 'x';
3438 str[2] = 'X';
3439 str[3] = 'x';
3440 str[4] = '\0';
3442 printf("test71: negative (strlen & index)\n");
3443 MyThread t1(WorkerY);
3444 MyThread t2(WorkerX);
3445 t1.Start();
3446 t2.Start();
3447 t1.Join();
3448 t2.Join();
3449 printf("\tstrX=%s; strY=%s\n", str, str+5);
3451 REGISTER_TEST(Run, 71)
3452 } // namespace test71
3455 // test72: STAB. Stress test for the number of segment sets (SSETs). {{{1
3456 namespace test72 {
3457 #ifndef NO_BARRIER
3458 // Variation of test33.
3459 // Instead of creating Nlog*N_iter threads,
3460 // we create Nlog threads and do N_iter barriers.
3461 int GLOB = 0;
3462 const int N_iter = 30;
3463 const int Nlog = 16;
3464 const int N = 1 << Nlog;
3465 static int64_t ARR1[N];
3466 static int64_t ARR2[N];
3467 Barrier *barriers[N_iter];
3468 Mutex MU;
3470 void Worker() {
3471 MU.Lock();
3472 int n = ++GLOB;
3473 MU.Unlock();
3475 n %= Nlog;
3477 long t0 = clock();
3478 long t __attribute__((unused)) = t0;
3480 for (int it = 0; it < N_iter; it++) {
3481 if(n == 0) {
3482 //printf("Iter: %d; %ld %ld\n", it, clock() - t, clock() - t0);
3483 t = clock();
3485 // Iterate N_iter times, block on barrier after each iteration.
3486 // This way Helgrind will create new segments after each barrier.
3488 for (int x = 0; x < 2; x++) {
3489 // run the inner loop twice.
3490 // When a memory location is accessed second time it is likely
3491 // that the state (SVal) will be unchanged.
3492 // The memory machine may optimize this case.
3493 for (int i = 0; i < N; i++) {
3494 // ARR1[i] and ARR2[N-1-i] are accessed by threads from i-th subset
3495 if (i & (1 << n)) {
3496 CHECK(ARR1[i] == 0);
3497 CHECK(ARR2[N-1-i] == 0);
3501 barriers[it]->Block();
3506 void Run() {
3507 printf("test72:\n");
3509 std::vector<MyThread*> vec(Nlog);
3511 for (int i = 0; i < N_iter; i++)
3512 barriers[i] = new Barrier(Nlog);
3514 // Create and start Nlog threads
3515 for (int i = 0; i < Nlog; i++) {
3516 vec[i] = new MyThread(Worker);
3517 vec[i]->Start();
3520 // Join all threads.
3521 for (int i = 0; i < Nlog; i++) {
3522 vec[i]->Join();
3523 delete vec[i];
3525 for (int i = 0; i < N_iter; i++)
3526 delete barriers[i];
3528 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
3529 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/
3531 REGISTER_TEST2(Run, 72, STABILITY|PERFORMANCE|EXCLUDE_FROM_ALL);
3532 #endif // NO_BARRIER
3533 } // namespace test72
3536 // test73: STAB. Stress test for the number of (SSETs), different access sizes. {{{1
3537 namespace test73 {
3538 #ifndef NO_BARRIER
3539 // Variation of test72.
3540 // We perform accesses of different sizes to the same location.
3541 int GLOB = 0;
3542 const int N_iter = 2;
3543 const int Nlog = 16;
3544 const int N = 1 << Nlog;
3545 union uint64_union {
3546 uint64_t u64[1];
3547 uint32_t u32[2];
3548 uint16_t u16[4];
3549 uint8_t u8 [8];
3551 static uint64_union ARR1[N];
3552 union uint32_union {
3553 uint32_t u32[1];
3554 uint16_t u16[2];
3555 uint8_t u8 [4];
3557 static uint32_union ARR2[N];
3558 Barrier *barriers[N_iter];
3559 Mutex MU;
3561 void Worker() {
3562 MU.Lock();
3563 int n = ++GLOB;
3564 MU.Unlock();
3566 n %= Nlog;
3568 for (int it = 0; it < N_iter; it++) {
3569 // Iterate N_iter times, block on barrier after each iteration.
3570 // This way Helgrind will create new segments after each barrier.
3572 for (int x = 0; x < 4; x++) {
3573 for (int i = 0; i < N; i++) {
3574 // ARR1[i] are accessed by threads from i-th subset
3575 if (i & (1 << n)) {
3576 for (int off = 0; off < (1 << x); off++) {
3577 switch(x) {
3578 case 0: CHECK(ARR1[i].u64[off] == 0); break;
3579 case 1: CHECK(ARR1[i].u32[off] == 0); break;
3580 case 2: CHECK(ARR1[i].u16[off] == 0); break;
3581 case 3: CHECK(ARR1[i].u8 [off] == 0); break;
3583 switch(x) {
3584 case 1: CHECK(ARR2[i].u32[off] == 0); break;
3585 case 2: CHECK(ARR2[i].u16[off] == 0); break;
3586 case 3: CHECK(ARR2[i].u8 [off] == 0); break;
3592 barriers[it]->Block();
3598 void Run() {
3599 printf("test73:\n");
3601 std::vector<MyThread*> vec(Nlog);
3603 for (int i = 0; i < N_iter; i++)
3604 barriers[i] = new Barrier(Nlog);
3606 // Create and start Nlog threads
3607 for (int i = 0; i < Nlog; i++) {
3608 vec[i] = new MyThread(Worker);
3609 vec[i]->Start();
3612 // Join all threads.
3613 for (int i = 0; i < Nlog; i++) {
3614 vec[i]->Join();
3615 delete vec[i];
3617 for (int i = 0; i < N_iter; i++)
3618 delete barriers[i];
3620 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
3621 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/
3623 REGISTER_TEST2(Run, 73, STABILITY|PERFORMANCE|EXCLUDE_FROM_ALL);
3624 #endif // NO_BARRIER
3625 } // namespace test73
3628 // test74: PERF. A lot of lock/unlock calls. {{{1
3629 namespace test74 {
3630 const int N = 100000;
3631 Mutex MU;
3632 void Run() {
3633 printf("test74: perf\n");
3634 for (int i = 0; i < N; i++ ) {
3635 MU.Lock();
3636 MU.Unlock();
3639 REGISTER_TEST(Run, 74)
3640 } // namespace test74
3643 // test75: TN. Test for sem_post, sem_wait, sem_trywait. {{{1
3644 namespace test75 {
3645 int GLOB = 0;
3646 sem_t sem[2];
3648 void Poster() {
3649 GLOB = 1;
3650 sem_post(&sem[0]);
3651 sem_post(&sem[1]);
3654 void Waiter() {
3655 sem_wait(&sem[0]);
3656 CHECK(GLOB==1);
3658 void TryWaiter() {
3659 usleep(500000);
3660 sem_trywait(&sem[1]);
3661 CHECK(GLOB==1);
3664 void Run() {
3665 #ifndef DRT_NO_SEM
3666 sem_init(&sem[0], 0, 0);
3667 sem_init(&sem[1], 0, 0);
3669 printf("test75: negative\n");
3671 MyThreadArray t(Poster, Waiter);
3672 t.Start();
3673 t.Join();
3675 GLOB = 2;
3677 MyThreadArray t(Poster, TryWaiter);
3678 t.Start();
3679 t.Join();
3681 printf("\tGLOB=%d\n", GLOB);
3683 sem_destroy(&sem[0]);
3684 sem_destroy(&sem[1]);
3685 #endif
3687 REGISTER_TEST(Run, 75)
3688 } // namespace test75
3690 // RefCountedClass {{{1
3691 struct RefCountedClass {
3692 public:
3693 RefCountedClass() {
3694 annotate_unref_ = false;
3695 ref_ = 0;
3696 data_ = 0;
3699 ~RefCountedClass() {
3700 CHECK(ref_ == 0); // race may be reported here
3701 int data_val = data_; // and here
3702 // if MU is not annotated
3703 data_ = 0;
3704 ref_ = -1;
3705 printf("\tRefCountedClass::data_ = %d\n", data_val);
3708 void AccessData() {
3709 this->mu_.Lock();
3710 this->data_++;
3711 this->mu_.Unlock();
3714 void Ref() {
3715 MU.Lock();
3716 CHECK(ref_ >= 0);
3717 ref_++;
3718 MU.Unlock();
3721 void Unref() {
3722 MU.Lock();
3723 CHECK(ref_ > 0);
3724 ref_--;
3725 bool do_delete = ref_ == 0;
3726 if (annotate_unref_) {
3727 ANNOTATE_CONDVAR_SIGNAL(this);
3729 MU.Unlock();
3730 if (do_delete) {
3731 if (annotate_unref_) {
3732 ANNOTATE_CONDVAR_WAIT(this);
3734 delete this;
3738 static void Annotate_MU() {
3739 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU);
3741 void AnnotateUnref() {
3742 annotate_unref_ = true;
3744 void Annotate_Race() {
3745 ANNOTATE_BENIGN_RACE(&this->data_, "needs annotation");
3746 ANNOTATE_BENIGN_RACE(&this->ref_, "needs annotation");
3748 private:
3749 bool annotate_unref_;
3751 int data_;
3752 Mutex mu_; // protects data_
3754 int ref_;
3755 static Mutex MU; // protects ref_
3758 Mutex RefCountedClass::MU;
3760 // test76: FP. Ref counting, no annotations. {{{1
3761 namespace test76 {
3762 #ifndef NO_BARRIER
3763 int GLOB = 0;
3764 Barrier barrier(4);
3765 RefCountedClass *object = NULL;
3766 void Worker() {
3767 object->Ref();
3768 barrier.Block();
3769 object->AccessData();
3770 object->Unref();
3772 void Run() {
3773 printf("test76: false positive (ref counting)\n");
3774 object = new RefCountedClass;
3775 object->Annotate_Race();
3776 MyThreadArray t(Worker, Worker, Worker, Worker);
3777 t.Start();
3778 t.Join();
3780 REGISTER_TEST2(Run, 76, FEATURE)
3781 #endif // NO_BARRIER
3782 } // namespace test76
3786 // test77: TN. Ref counting, MU is annotated. {{{1
3787 namespace test77 {
3788 #ifndef NO_BARRIER
3789 // same as test76, but RefCountedClass::MU is annotated.
3790 int GLOB = 0;
3791 Barrier barrier(4);
3792 RefCountedClass *object = NULL;
3793 void Worker() {
3794 object->Ref();
3795 barrier.Block();
3796 object->AccessData();
3797 object->Unref();
3799 void Run() {
3800 printf("test77: true negative (ref counting), mutex is annotated\n");
3801 RefCountedClass::Annotate_MU();
3802 object = new RefCountedClass;
3803 MyThreadArray t(Worker, Worker, Worker, Worker);
3804 t.Start();
3805 t.Join();
3807 REGISTER_TEST(Run, 77)
3808 #endif // NO_BARRIER
3809 } // namespace test77
3813 // test78: TN. Ref counting, Unref is annotated. {{{1
3814 namespace test78 {
3815 #ifndef NO_BARRIER
3816 // same as test76, but RefCountedClass::Unref is annotated.
3817 int GLOB = 0;
3818 Barrier barrier(4);
3819 RefCountedClass *object = NULL;
3820 void Worker() {
3821 object->Ref();
3822 barrier.Block();
3823 object->AccessData();
3824 object->Unref();
3826 void Run() {
3827 printf("test78: true negative (ref counting), Unref is annotated\n");
3828 RefCountedClass::Annotate_MU();
3829 object = new RefCountedClass;
3830 MyThreadArray t(Worker, Worker, Worker, Worker);
3831 t.Start();
3832 t.Join();
3834 REGISTER_TEST(Run, 78)
3835 #endif // NO_BARRIER
3836 } // namespace test78
3840 // test79 TN. Swap. {{{1
3841 namespace test79 {
3842 #if 0
3843 typedef __gnu_cxx::hash_map<int, int> map_t;
3844 #else
3845 typedef std::map<int, int> map_t;
3846 #endif
3847 map_t MAP;
3848 Mutex MU;
3850 // Here we use swap to pass MAP between threads.
3851 // The synchronization is correct, but w/o ANNOTATE_MUTEX_IS_USED_AS_CONDVAR
3852 // Helgrind will complain.
3854 void Worker1() {
3855 map_t tmp;
3856 MU.Lock();
3857 // We swap the new empty map 'tmp' with 'MAP'.
3858 MAP.swap(tmp);
3859 MU.Unlock();
3860 // tmp (which is the old version of MAP) is destroyed here.
3863 void Worker2() {
3864 MU.Lock();
3865 MAP[1]++; // Just update MAP under MU.
3866 MU.Unlock();
3869 void Worker3() { Worker1(); }
3870 void Worker4() { Worker2(); }
3872 void Run() {
3873 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU);
3874 printf("test79: negative\n");
3875 MyThreadArray t(Worker1, Worker2, Worker3, Worker4);
3876 t.Start();
3877 t.Join();
3879 REGISTER_TEST(Run, 79)
3880 } // namespace test79
3883 // AtomicRefCountedClass. {{{1
3884 // Same as RefCountedClass, but using atomic ops instead of mutex.
3885 struct AtomicRefCountedClass {
3886 public:
3887 AtomicRefCountedClass() {
3888 annotate_unref_ = false;
3889 ref_ = 0;
3890 data_ = 0;
3893 ~AtomicRefCountedClass() {
3894 CHECK(ref_ == 0); // race may be reported here
3895 int data_val = data_; // and here
3896 data_ = 0;
3897 ref_ = -1;
3898 printf("\tRefCountedClass::data_ = %d\n", data_val);
3901 void AccessData() {
3902 this->mu_.Lock();
3903 this->data_++;
3904 this->mu_.Unlock();
3907 void Ref() {
3908 AtomicIncrement(&ref_, 1);
3911 void Unref() {
3912 // DISCLAIMER: I am not sure I've implemented this correctly
3913 // (might require some memory barrier, etc).
3914 // But this implementation of reference counting is enough for
3915 // the purpose of Helgrind demonstration.
3916 AtomicIncrement(&ref_, -1);
3917 if (annotate_unref_) { ANNOTATE_CONDVAR_SIGNAL(this); }
3918 if (ref_ == 0) {
3919 if (annotate_unref_) { ANNOTATE_CONDVAR_WAIT(this); }
3920 delete this;
3924 void AnnotateUnref() {
3925 annotate_unref_ = true;
3927 void Annotate_Race() {
3928 ANNOTATE_BENIGN_RACE(&this->data_, "needs annotation");
3930 private:
3931 bool annotate_unref_;
3933 Mutex mu_;
3934 int data_; // under mu_
3936 int ref_; // used in atomic ops.
3939 // test80: FP. Ref counting with atomics, no annotations. {{{1
3940 namespace test80 {
3941 #ifndef NO_BARRIER
3942 int GLOB = 0;
3943 Barrier barrier(4);
3944 AtomicRefCountedClass *object = NULL;
3945 void Worker() {
3946 object->Ref();
3947 barrier.Block();
3948 object->AccessData();
3949 object->Unref(); // All the tricky stuff is here.
3951 void Run() {
3952 printf("test80: false positive (ref counting)\n");
3953 object = new AtomicRefCountedClass;
3954 object->Annotate_Race();
3955 MyThreadArray t(Worker, Worker, Worker, Worker);
3956 t.Start();
3957 t.Join();
3959 REGISTER_TEST2(Run, 80, FEATURE|EXCLUDE_FROM_ALL)
3960 #endif // NO_BARRIER
3961 } // namespace test80
3964 // test81: TN. Ref counting with atomics, Unref is annotated. {{{1
3965 namespace test81 {
3966 #ifndef NO_BARRIER
3967 // same as test80, but Unref is annotated.
3968 int GLOB = 0;
3969 Barrier barrier(4);
3970 AtomicRefCountedClass *object = NULL;
3971 void Worker() {
3972 object->Ref();
3973 barrier.Block();
3974 object->AccessData();
3975 object->Unref(); // All the tricky stuff is here.
3977 void Run() {
3978 printf("test81: negative (annotated ref counting)\n");
3979 object = new AtomicRefCountedClass;
3980 object->AnnotateUnref();
3981 MyThreadArray t(Worker, Worker, Worker, Worker);
3982 t.Start();
3983 t.Join();
3985 REGISTER_TEST2(Run, 81, FEATURE|EXCLUDE_FROM_ALL)
3986 #endif // NO_BARRIER
3987 } // namespace test81
3990 // test82: Object published w/o synchronization. {{{1
3991 namespace test82 {
3993 // Writer creates a new object and makes the pointer visible to the Reader.
3994 // Reader waits until the object pointer is non-null and reads the object.
3996 // On Core 2 Duo this test will sometimes (quite rarely) fail in
3997 // the CHECK below, at least if compiled with -O2.
3999 // The sequence of events::
4000 // Thread1: Thread2:
4001 // a. arr_[...] = ...
4002 // b. foo[i] = ...
4003 // A. ... = foo[i]; // non NULL
4004 // B. ... = arr_[...];
4006 // Since there is no proper synchronization, during the even (B)
4007 // Thread2 may not see the result of the event (a).
4008 // On x86 and x86_64 this happens due to compiler reordering instructions.
4009 // On other arcitectures it may also happen due to cashe inconsistency.
4011 class FOO {
4012 public:
4013 FOO() {
4014 idx_ = rand() % 1024;
4015 arr_[idx_] = 77777;
4016 // __asm__ __volatile__("" : : : "memory"); // this fixes!
4018 static void check(volatile FOO *foo) {
4019 CHECK(foo->arr_[foo->idx_] == 77777);
4021 private:
4022 int idx_;
4023 int arr_[1024];
4026 const int N = 100000;
4027 static volatile FOO *foo[N];
4028 Mutex MU;
4030 void Writer() {
4031 for (int i = 0; i < N; i++) {
4032 foo[i] = new FOO;
4033 usleep(100);
4037 void Reader() {
4038 for (int i = 0; i < N; i++) {
4039 while (!foo[i]) {
4040 MU.Lock(); // this is NOT a synchronization,
4041 MU.Unlock(); // it just helps foo[i] to become visible in Reader.
4043 if ((i % 100) == 0) {
4044 printf("rd %d\n", i);
4046 // At this point Reader() sees the new value of foo[i]
4047 // but in very rare cases will not see the new value of foo[i]->arr_.
4048 // Thus this CHECK will sometimes fail.
4049 FOO::check(foo[i]);
4053 void Run() {
4054 printf("test82: positive\n");
4055 MyThreadArray t(Writer, Reader);
4056 t.Start();
4057 t.Join();
4059 REGISTER_TEST2(Run, 82, FEATURE|EXCLUDE_FROM_ALL)
4060 } // namespace test82
4063 // test83: Object published w/o synchronization (simple version){{{1
4064 namespace test83 {
4065 // A simplified version of test83 (example of a wrong code).
4066 // This test, though incorrect, will almost never fail.
4067 volatile static int *ptr = NULL;
4068 Mutex MU;
4070 void Writer() {
4071 usleep(100);
4072 ptr = new int(777);
4075 void Reader() {
4076 while(!ptr) {
4077 MU.Lock(); // Not a synchronization!
4078 MU.Unlock();
4080 CHECK(*ptr == 777);
4083 void Run() {
4084 // printf("test83: positive\n");
4085 MyThreadArray t(Writer, Reader);
4086 t.Start();
4087 t.Join();
4089 REGISTER_TEST2(Run, 83, FEATURE|EXCLUDE_FROM_ALL)
4090 } // namespace test83
4093 // test84: TP. True race (regression test for a bug related to atomics){{{1
4094 namespace test84 {
4095 // Helgrind should not create HB arcs for the bus lock even when
4096 // --pure-happens-before=yes is used.
4097 // Bug found in by Bart Van Assche, the test is taken from
4098 // valgrind file drd/tests/atomic_var.c.
4099 static int s_x = 0;
4100 /* s_dummy[] ensures that s_x and s_y are not in the same cache line. */
4101 static char s_dummy[512] = {0};
4102 static int s_y;
4104 void thread_func_1()
4106 s_y = 1;
4107 AtomicIncrement(&s_x, 1);
4110 void thread_func_2()
4112 while (AtomicIncrement(&s_x, 0) == 0)
4114 printf("y = %d\n", s_y);
4118 void Run() {
4119 CHECK(s_dummy[0] == 0); // Avoid compiler warning about 's_dummy unused'.
4120 printf("test84: positive\n");
4121 FAST_MODE_INIT(&s_y);
4122 ANNOTATE_EXPECT_RACE_FOR_TSAN(&s_y, "test84: TP. true race.");
4123 MyThreadArray t(thread_func_1, thread_func_2);
4124 t.Start();
4125 t.Join();
4127 REGISTER_TEST(Run, 84)
4128 } // namespace test84
4131 // test85: Test for RunningOnValgrind(). {{{1
4132 namespace test85 {
4133 int GLOB = 0;
4134 void Run() {
4135 printf("test85: RunningOnValgrind() = %d\n", RunningOnValgrind());
4137 REGISTER_TEST(Run, 85)
4138 } // namespace test85
4141 // test86: Test for race inside DTOR: racey write to vptr. Benign. {{{1
4142 namespace test86 {
4143 // This test shows a racey access to vptr (the pointer to vtbl).
4144 // We have class A and class B derived from A.
4145 // Both classes have a virtual function f() and a virtual DTOR.
4146 // We create an object 'A *a = new B'
4147 // and pass this object from Thread1 to Thread2.
4148 // Thread2 calls a->f(). This call reads a->vtpr.
4149 // Thread1 deletes the object. B::~B waits untill the object can be destroyed
4150 // (flag_stopped == true) but at the very beginning of B::~B
4151 // a->vptr is written to.
4152 // So, we have a race on a->vptr.
4153 // On this particular test this race is benign, but test87 shows
4154 // how such race could harm.
4158 // Threa1: Thread2:
4159 // 1. A a* = new B;
4160 // 2. Q.Put(a); ------------\ .
4161 // \--------------------> a. a = Q.Get();
4162 // b. a->f();
4163 // /--------- c. flag_stopped = true;
4164 // 3. delete a; /
4165 // waits untill flag_stopped <------/
4166 // inside the dtor
4169 bool flag_stopped = false;
4170 Mutex mu;
4172 ProducerConsumerQueue Q(INT_MAX); // Used to pass A* between threads.
4174 struct A {
4175 A() { printf("A::A()\n"); }
4176 virtual ~A() { printf("A::~A()\n"); }
4177 virtual void f() { }
4179 uintptr_t padding[15];
4180 } __attribute__ ((aligned (64)));
4182 struct B: A {
4183 B() { printf("B::B()\n"); }
4184 virtual ~B() {
4185 // The race is here. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
4186 printf("B::~B()\n");
4187 // wait until flag_stopped is true.
4188 mu.LockWhen(Condition(&ArgIsTrue, &flag_stopped));
4189 mu.Unlock();
4190 printf("B::~B() done\n");
4192 virtual void f() { }
4195 void Waiter() {
4196 A *a = new B;
4197 if (!Tsan_FastMode())
4198 ANNOTATE_EXPECT_RACE(a, "test86: expected race on a->vptr");
4199 printf("Waiter: B created\n");
4200 Q.Put(a);
4201 usleep(100000); // so that Worker calls a->f() first.
4202 printf("Waiter: deleting B\n");
4203 delete a;
4204 printf("Waiter: B deleted\n");
4205 usleep(100000);
4206 printf("Waiter: done\n");
4209 void Worker() {
4210 A *a = reinterpret_cast<A*>(Q.Get());
4211 printf("Worker: got A\n");
4212 a->f();
4214 mu.Lock();
4215 flag_stopped = true;
4216 mu.Unlock();
4217 usleep(200000);
4218 printf("Worker: done\n");
4221 void Run() {
4222 printf("test86: positive, race inside DTOR\n");
4223 MyThreadArray t(Waiter, Worker);
4224 t.Start();
4225 t.Join();
4227 REGISTER_TEST(Run, 86)
4228 } // namespace test86
4231 // test87: Test for race inside DTOR: racey write to vptr. Harmful.{{{1
4232 namespace test87 {
4233 // A variation of test86 where the race is harmful.
4234 // Here we have class C derived from B.
4235 // We create an object 'A *a = new C' in Thread1 and pass it to Thread2.
4236 // Thread2 calls a->f().
4237 // Thread1 calls 'delete a'.
4238 // It first calls C::~C, then B::~B where it rewrites the vptr to point
4239 // to B::vtbl. This is a problem because Thread2 might not have called a->f()
4240 // and now it will call B::f instead of C::f.
4242 bool flag_stopped = false;
4243 Mutex mu;
4245 ProducerConsumerQueue Q(INT_MAX); // Used to pass A* between threads.
4247 struct A {
4248 A() { printf("A::A()\n"); }
4249 virtual ~A() { printf("A::~A()\n"); }
4250 virtual void f() = 0; // pure virtual.
4253 struct B: A {
4254 B() { printf("B::B()\n"); }
4255 virtual ~B() {
4256 // The race is here. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
4257 printf("B::~B()\n");
4258 // wait until flag_stopped is true.
4259 mu.LockWhen(Condition(&ArgIsTrue, &flag_stopped));
4260 mu.Unlock();
4261 printf("B::~B() done\n");
4263 virtual void f() = 0; // pure virtual.
4266 struct C: B {
4267 C() { printf("C::C()\n"); }
4268 virtual ~C() { printf("C::~C()\n"); }
4269 virtual void f() { }
4272 void Waiter() {
4273 A *a = new C;
4274 Q.Put(a);
4275 delete a;
4278 void Worker() {
4279 A *a = reinterpret_cast<A*>(Q.Get());
4280 a->f();
4282 mu.Lock();
4283 flag_stopped = true;
4284 ANNOTATE_CONDVAR_SIGNAL(&mu);
4285 mu.Unlock();
4288 void Run() {
4289 printf("test87: positive, race inside DTOR\n");
4290 MyThreadArray t(Waiter, Worker);
4291 t.Start();
4292 t.Join();
4294 REGISTER_TEST2(Run, 87, FEATURE|EXCLUDE_FROM_ALL)
4295 } // namespace test87
4298 // test88: Test for ANNOTATE_IGNORE_WRITES_*{{{1
4299 namespace test88 {
4300 // a recey write annotated with ANNOTATE_IGNORE_WRITES_BEGIN/END.
4301 int GLOB = 0;
4302 void Worker() {
4303 ANNOTATE_IGNORE_WRITES_BEGIN();
4304 GLOB = 1;
4305 ANNOTATE_IGNORE_WRITES_END();
4307 void Run() {
4308 printf("test88: negative, test for ANNOTATE_IGNORE_WRITES_*\n");
4309 MyThread t(Worker);
4310 t.Start();
4311 GLOB = 1;
4312 t.Join();
4313 printf("\tGLOB=%d\n", GLOB);
4315 REGISTER_TEST(Run, 88)
4316 } // namespace test88
4319 // test89: Test for debug info. {{{1
4320 namespace test89 {
4321 // Simlpe races with different objects (stack, heap globals; scalars, structs).
4322 // Also, if run with --trace-level=2 this test will show a sequence of
4323 // CTOR and DTOR calls.
4324 struct STRUCT {
4325 int a, b, c;
4328 struct A {
4329 int a;
4330 A() {
4331 ANNOTATE_TRACE_MEMORY(&a);
4332 a = 1;
4334 virtual ~A() {
4335 a = 4;
4339 struct B : A {
4340 B() { CHECK(a == 1); }
4341 virtual ~B() { CHECK(a == 3); }
4343 struct C : B {
4344 C() { a = 2; }
4345 virtual ~C() { a = 3; }
4348 int GLOBAL = 0;
4349 int *STACK = 0;
4350 STRUCT GLOB_STRUCT;
4351 STRUCT *STACK_STRUCT;
4352 STRUCT *HEAP_STRUCT;
4354 void Worker() {
4355 GLOBAL = 1;
4356 *STACK = 1;
4357 GLOB_STRUCT.b = 1;
4358 STACK_STRUCT->b = 1;
4359 HEAP_STRUCT->b = 1;
4362 void Run() {
4363 int stack_var = 0;
4364 STACK = &stack_var;
4366 STRUCT stack_struct;
4367 STACK_STRUCT = &stack_struct;
4369 HEAP_STRUCT = new STRUCT;
4371 printf("test89: negative\n");
4372 MyThreadArray t(Worker, Worker);
4373 t.Start();
4374 t.Join();
4376 delete HEAP_STRUCT;
4378 A *a = new C;
4379 printf("Using 'a->a': %d\n", a->a);
4380 delete a;
4382 REGISTER_TEST2(Run, 89, FEATURE|EXCLUDE_FROM_ALL)
4383 } // namespace test89
4386 // test90: FP. Test for a safely-published pointer (read-only). {{{1
4387 namespace test90 {
4388 // The Publisher creates an object and safely publishes it under a mutex.
4389 // Readers access the object read-only.
4390 // See also test91.
4392 // Without annotations Helgrind will issue a false positive in Reader().
4394 // Choices for annotations:
4395 // -- ANNOTATE_CONDVAR_SIGNAL/ANNOTATE_CONDVAR_WAIT
4396 // -- ANNOTATE_MUTEX_IS_USED_AS_CONDVAR
4397 // -- ANNOTATE_PUBLISH_MEMORY_RANGE.
4399 int *GLOB = 0;
4400 Mutex MU;
4402 void Publisher() {
4403 MU.Lock();
4404 GLOB = (int*)memalign(64, sizeof(int));
4405 *GLOB = 777;
4406 if (!Tsan_PureHappensBefore() && !Tsan_FastMode())
4407 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB, "test90. FP. This is a false positve");
4408 MU.Unlock();
4409 usleep(200000);
4412 void Reader() {
4413 usleep(10000);
4414 while (true) {
4415 MU.Lock();
4416 int *p = GLOB;
4417 MU.Unlock();
4418 if (p) {
4419 CHECK(*p == 777); // Race is reported here.
4420 break;
4425 void Run() {
4426 printf("test90: false positive (safely published pointer).\n");
4427 MyThreadArray t(Publisher, Reader, Reader, Reader);
4428 t.Start();
4429 t.Join();
4430 printf("\t*GLOB=%d\n", *GLOB);
4431 free(GLOB);
4433 REGISTER_TEST(Run, 90)
4434 } // namespace test90
4437 // test91: FP. Test for a safely-published pointer (read-write). {{{1
4438 namespace test91 {
4439 // Similar to test90.
4440 // The Publisher creates an object and safely publishes it under a mutex MU1.
4441 // Accessors get the object under MU1 and access it (read/write) under MU2.
4443 // Without annotations Helgrind will issue a false positive in Accessor().
4446 int *GLOB = 0;
4447 Mutex MU, MU1, MU2;
4449 void Publisher() {
4450 MU1.Lock();
4451 GLOB = (int*)memalign(64, sizeof(int));
4452 *GLOB = 777;
4453 if (!Tsan_PureHappensBefore() && !Tsan_FastMode())
4454 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB, "test91. FP. This is a false positve");
4455 MU1.Unlock();
4458 void Accessor() {
4459 usleep(10000);
4460 while (true) {
4461 MU1.Lock();
4462 int *p = GLOB;
4463 MU1.Unlock();
4464 if (p) {
4465 MU2.Lock();
4466 (*p)++; // Race is reported here.
4467 CHECK(*p > 777);
4468 MU2.Unlock();
4469 break;
4474 void Run() {
4475 printf("test91: false positive (safely published pointer, read/write).\n");
4476 MyThreadArray t(Publisher, Accessor, Accessor, Accessor);
4477 t.Start();
4478 t.Join();
4479 printf("\t*GLOB=%d\n", *GLOB);
4480 free(GLOB);
4482 REGISTER_TEST(Run, 91)
4483 } // namespace test91
4486 // test92: TN. Test for a safely-published pointer (read-write), annotated. {{{1
4487 namespace test92 {
4488 // Similar to test91, but annotated with ANNOTATE_PUBLISH_MEMORY_RANGE.
4491 // Publisher: Accessors:
4493 // 1. MU1.Lock()
4494 // 2. Create GLOB.
4495 // 3. ANNOTATE_PUBLISH_...(GLOB) -------\ .
4496 // 4. MU1.Unlock() \ .
4497 // \ a. MU1.Lock()
4498 // \ b. Get GLOB
4499 // \ c. MU1.Unlock()
4500 // \--> d. Access GLOB
4502 // A happens-before arc is created between ANNOTATE_PUBLISH_MEMORY_RANGE and
4503 // accesses to GLOB.
4505 struct ObjType {
4506 int arr[10];
4509 ObjType *GLOB = 0;
4510 Mutex MU, MU1, MU2;
4512 void Publisher() {
4513 MU1.Lock();
4514 GLOB = new ObjType;
4515 for (int i = 0; i < 10; i++) {
4516 GLOB->arr[i] = 777;
4518 // This annotation should go right before the object is published.
4519 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB, sizeof(*GLOB));
4520 MU1.Unlock();
4523 void Accessor(int index) {
4524 while (true) {
4525 MU1.Lock();
4526 ObjType *p = GLOB;
4527 MU1.Unlock();
4528 if (p) {
4529 MU2.Lock();
4530 p->arr[index]++; // W/o the annotations the race will be reported here.
4531 CHECK(p->arr[index] == 778);
4532 MU2.Unlock();
4533 break;
4538 void Accessor0() { Accessor(0); }
4539 void Accessor5() { Accessor(5); }
4540 void Accessor9() { Accessor(9); }
4542 void Run() {
4543 printf("test92: safely published pointer, read/write, annotated.\n");
4544 MyThreadArray t(Publisher, Accessor0, Accessor5, Accessor9);
4545 t.Start();
4546 t.Join();
4547 printf("\t*GLOB=%d\n", GLOB->arr[0]);
4549 REGISTER_TEST(Run, 92)
4550 } // namespace test92
4553 // test93: TP. Test for incorrect usage of ANNOTATE_PUBLISH_MEMORY_RANGE. {{{1
4554 namespace test93 {
4555 int GLOB = 0;
4557 void Reader() {
4558 CHECK(GLOB == 0);
4561 void Publisher() {
4562 usleep(10000);
4563 // Incorrect, used after the memory has been accessed in another thread.
4564 ANNOTATE_PUBLISH_MEMORY_RANGE(&GLOB, sizeof(GLOB));
4567 void Run() {
4568 printf("test93: positive, misuse of ANNOTATE_PUBLISH_MEMORY_RANGE\n");
4569 MyThreadArray t(Reader, Publisher);
4570 t.Start();
4571 t.Join();
4572 printf("\tGLOB=%d\n", GLOB);
4574 REGISTER_TEST2(Run, 93, FEATURE|EXCLUDE_FROM_ALL)
4575 } // namespace test93
4578 // test94: TP. Check do_cv_signal/fake segment logic {{{1
4579 namespace test94 {
4580 int GLOB;
4582 int COND = 0;
4583 int COND2 = 0;
4584 Mutex MU, MU2;
4585 CondVar CV, CV2;
4587 void Thr1() {
4588 usleep(10000); // Make sure the waiter blocks.
4590 GLOB = 1; // WRITE
4592 MU.Lock();
4593 COND = 1;
4594 CV.Signal();
4595 MU.Unlock();
4597 void Thr2() {
4598 usleep(1000*1000); // Make sure CV2.Signal() "happens after" CV.Signal()
4599 usleep(10000); // Make sure the waiter blocks.
4601 MU2.Lock();
4602 COND2 = 1;
4603 CV2.Signal();
4604 MU2.Unlock();
4606 void Thr3() {
4607 MU.Lock();
4608 while(COND != 1)
4609 CV.Wait(&MU);
4610 MU.Unlock();
4612 void Thr4() {
4613 MU2.Lock();
4614 while(COND2 != 1)
4615 CV2.Wait(&MU2);
4616 MU2.Unlock();
4617 GLOB = 2; // READ: no HB-relation between CV.Signal and CV2.Wait !
4619 void Run() {
4620 FAST_MODE_INIT(&GLOB);
4621 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test94: TP.");
4622 printf("test94: TP. Check do_cv_signal/fake segment logic\n");
4623 MyThreadArray mta(Thr1, Thr2, Thr3, Thr4);
4624 mta.Start();
4625 mta.Join();
4626 printf("\tGLOB=%d\n", GLOB);
4628 REGISTER_TEST(Run, 94);
4629 } // namespace test94
4631 // test95: TP. Check do_cv_signal/fake segment logic {{{1
4632 namespace test95 {
4633 int GLOB = 0;
4635 int COND = 0;
4636 int COND2 = 0;
4637 Mutex MU, MU2;
4638 CondVar CV, CV2;
4640 void Thr1() {
4641 usleep(1000*1000); // Make sure CV2.Signal() "happens before" CV.Signal()
4642 usleep(10000); // Make sure the waiter blocks.
4644 GLOB = 1; // WRITE
4646 MU.Lock();
4647 COND = 1;
4648 CV.Signal();
4649 MU.Unlock();
4651 void Thr2() {
4652 usleep(10000); // Make sure the waiter blocks.
4654 MU2.Lock();
4655 COND2 = 1;
4656 CV2.Signal();
4657 MU2.Unlock();
4659 void Thr3() {
4660 MU.Lock();
4661 while(COND != 1)
4662 CV.Wait(&MU);
4663 MU.Unlock();
4665 void Thr4() {
4666 MU2.Lock();
4667 while(COND2 != 1)
4668 CV2.Wait(&MU2);
4669 MU2.Unlock();
4670 GLOB = 2; // READ: no HB-relation between CV.Signal and CV2.Wait !
4672 void Run() {
4673 FAST_MODE_INIT(&GLOB);
4674 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test95: TP.");
4675 printf("test95: TP. Check do_cv_signal/fake segment logic\n");
4676 MyThreadArray mta(Thr1, Thr2, Thr3, Thr4);
4677 mta.Start();
4678 mta.Join();
4679 printf("\tGLOB=%d\n", GLOB);
4681 REGISTER_TEST(Run, 95);
4682 } // namespace test95
4684 // test96: TN. tricky LockSet behaviour {{{1
4685 // 3 threads access the same memory with three different
4686 // locksets: {A, B}, {B, C}, {C, A}.
4687 // These locksets have empty intersection
4688 namespace test96 {
4689 int GLOB = 0;
4691 Mutex A, B, C;
4693 void Thread1() {
4694 MutexLock a(&A);
4695 MutexLock b(&B);
4696 GLOB++;
4699 void Thread2() {
4700 MutexLock b(&B);
4701 MutexLock c(&C);
4702 GLOB++;
4705 void Thread3() {
4706 MutexLock a(&A);
4707 MutexLock c(&C);
4708 GLOB++;
4711 void Run() {
4712 printf("test96: FP. tricky LockSet behaviour\n");
4713 ANNOTATE_TRACE_MEMORY(&GLOB);
4714 MyThreadArray mta(Thread1, Thread2, Thread3);
4715 mta.Start();
4716 mta.Join();
4717 CHECK(GLOB == 3);
4718 printf("\tGLOB=%d\n", GLOB);
4720 REGISTER_TEST(Run, 96);
4721 } // namespace test96
4723 // test97: This test shows false negative with --fast-mode=yes {{{1
4724 namespace test97 {
4725 const int HG_CACHELINE_SIZE = 64;
4727 Mutex MU;
4729 const int ARRAY_SIZE = HG_CACHELINE_SIZE * 4 / sizeof(int);
4730 int array[ARRAY_SIZE];
4731 int * GLOB = &array[ARRAY_SIZE/2];
4733 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points
4734 to a memory inside a CacheLineZ which is inside array's memory range
4737 void Reader() {
4738 usleep(500000);
4739 CHECK(777 == *GLOB);
4742 void Run() {
4743 MyThreadArray t(Reader);
4744 if (!Tsan_FastMode())
4745 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB, "test97: TP. FN with --fast-mode=yes");
4746 printf("test97: This test shows false negative with --fast-mode=yes\n");
4748 t.Start();
4749 *GLOB = 777;
4750 t.Join();
4753 REGISTER_TEST2(Run, 97, FEATURE)
4754 } // namespace test97
4756 // test98: Synchronization via read/write (or send/recv). {{{1
4757 namespace test98 {
4758 // The synchronization here is done by a pair of read/write calls
4759 // that create a happens-before arc. Same may be done with send/recv.
4760 // Such synchronization is quite unusual in real programs
4761 // (why would one synchronizae via a file or socket?), but
4762 // quite possible in unittests where one threads runs for producer
4763 // and one for consumer.
4765 // A race detector has to create a happens-before arcs for
4766 // {read,send}->{write,recv} even if the file descriptors are different.
4768 int GLOB = 0;
4769 int fd_out = -1;
4770 int fd_in = -1;
4772 void Writer() {
4773 usleep(1000);
4774 GLOB = 1;
4775 const char *str = "Hey there!\n";
4776 IGNORE_RETURN_VALUE(write(fd_out, str, strlen(str) + 1));
4779 void Reader() {
4780 char buff[100];
4781 while (read(fd_in, buff, 100) == 0)
4782 sleep(1);
4783 printf("read: %s\n", buff);
4784 GLOB = 2;
4787 void Run() {
4788 printf("test98: negative, synchronization via I/O\n");
4789 char in_name[100];
4790 char out_name[100];
4791 // we open two files, on for reading and one for writing,
4792 // but the files are actually the same (symlinked).
4793 sprintf(out_name, "/tmp/racecheck_unittest_out.%ld", (long) getpid());
4794 fd_out = creat(out_name, O_WRONLY | S_IRWXU);
4795 #ifdef VGO_darwin
4796 // symlink() is not supported on Darwin. Copy the output file name.
4797 strcpy(in_name, out_name);
4798 #else
4799 sprintf(in_name, "/tmp/racecheck_unittest_in.%ld", (long) getpid());
4800 IGNORE_RETURN_VALUE(symlink(out_name, in_name));
4801 #endif
4802 fd_in = open(in_name, 0, O_RDONLY);
4803 CHECK(fd_out >= 0);
4804 CHECK(fd_in >= 0);
4805 MyThreadArray t(Writer, Reader);
4806 t.Start();
4807 t.Join();
4808 printf("\tGLOB=%d\n", GLOB);
4809 // cleanup
4810 close(fd_in);
4811 close(fd_out);
4812 unlink(in_name);
4813 unlink(out_name);
4815 REGISTER_TEST(Run, 98)
4816 } // namespace test98
4819 // test99: TP. Unit test for a bug in LockWhen*. {{{1
4820 namespace test99 {
4823 bool GLOB = false;
4824 Mutex mu;
4826 static void Thread1() {
4827 for (int i = 0; i < 100; i++) {
4828 mu.LockWhenWithTimeout(Condition(&ArgIsTrue, &GLOB), 5);
4829 GLOB = false;
4830 mu.Unlock();
4831 usleep(10000);
4835 static void Thread2() {
4836 for (int i = 0; i < 100; i++) {
4837 mu.Lock();
4838 mu.Unlock();
4839 usleep(10000);
4843 void Run() {
4844 printf("test99: regression test for LockWhen*\n");
4845 MyThreadArray t(Thread1, Thread2);
4846 t.Start();
4847 t.Join();
4849 REGISTER_TEST(Run, 99);
4850 } // namespace test99
4853 // test100: Test for initialization bit. {{{1
4854 namespace test100 {
4855 int G1 = 0;
4856 int G2 = 0;
4857 int G3 = 0;
4858 int G4 = 0;
4860 void Creator() {
4861 G1 = 1; CHECK(G1);
4862 G2 = 1;
4863 G3 = 1; CHECK(G3);
4864 G4 = 1;
4867 void Worker1() {
4868 usleep(100000);
4869 CHECK(G1);
4870 CHECK(G2);
4871 G3 = 3;
4872 G4 = 3;
4875 void Worker2() {
4880 void Run() {
4881 printf("test100: test for initialization bit. \n");
4882 MyThreadArray t(Creator, Worker1, Worker2);
4883 ANNOTATE_TRACE_MEMORY(&G1);
4884 ANNOTATE_TRACE_MEMORY(&G2);
4885 ANNOTATE_TRACE_MEMORY(&G3);
4886 ANNOTATE_TRACE_MEMORY(&G4);
4887 t.Start();
4888 t.Join();
4890 REGISTER_TEST2(Run, 100, FEATURE|EXCLUDE_FROM_ALL)
4891 } // namespace test100
4894 // test101: TN. Two signals and two waits. {{{1
4895 namespace test101 {
4896 Mutex MU;
4897 CondVar CV;
4898 int GLOB = 0;
4900 int C1 = 0, C2 = 0;
4902 void Signaller() {
4903 usleep(100000);
4904 MU.Lock();
4905 C1 = 1;
4906 CV.Signal();
4907 printf("signal\n");
4908 MU.Unlock();
4910 GLOB = 1;
4912 usleep(500000);
4913 MU.Lock();
4914 C2 = 1;
4915 CV.Signal();
4916 printf("signal\n");
4917 MU.Unlock();
4920 void Waiter() {
4921 MU.Lock();
4922 while(!C1)
4923 CV.Wait(&MU);
4924 printf("wait\n");
4925 MU.Unlock();
4927 MU.Lock();
4928 while(!C2)
4929 CV.Wait(&MU);
4930 printf("wait\n");
4931 MU.Unlock();
4933 GLOB = 2;
4937 void Run() {
4938 printf("test101: negative\n");
4939 MyThreadArray t(Waiter, Signaller);
4940 t.Start();
4941 t.Join();
4942 printf("\tGLOB=%d\n", GLOB);
4944 REGISTER_TEST(Run, 101)
4945 } // namespace test101
4947 // test102: --fast-mode=yes vs. --initialization-bit=yes {{{1
4948 namespace test102 {
4949 const int HG_CACHELINE_SIZE = 64;
4951 Mutex MU;
4953 const int ARRAY_SIZE = HG_CACHELINE_SIZE * 4 / sizeof(int);
4954 int array[ARRAY_SIZE + 1];
4955 int * GLOB = &array[ARRAY_SIZE/2];
4957 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points
4958 to a memory inside a CacheLineZ which is inside array's memory range
4961 void Reader() {
4962 usleep(200000);
4963 CHECK(777 == GLOB[0]);
4964 usleep(400000);
4965 CHECK(777 == GLOB[1]);
4968 void Run() {
4969 MyThreadArray t(Reader);
4970 if (!Tsan_FastMode())
4971 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB+0, "test102: TP. FN with --fast-mode=yes");
4972 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB+1, "test102: TP");
4973 printf("test102: --fast-mode=yes vs. --initialization-bit=yes\n");
4975 t.Start();
4976 GLOB[0] = 777;
4977 usleep(400000);
4978 GLOB[1] = 777;
4979 t.Join();
4982 REGISTER_TEST2(Run, 102, FEATURE)
4983 } // namespace test102
4985 // test103: Access different memory locations with different LockSets {{{1
4986 namespace test103 {
4987 const int N_MUTEXES = 6;
4988 const int LOCKSET_INTERSECTION_SIZE = 3;
4990 int data[1 << LOCKSET_INTERSECTION_SIZE] = {0};
4991 Mutex MU[N_MUTEXES];
4993 inline int LS_to_idx (int ls) {
4994 return (ls >> (N_MUTEXES - LOCKSET_INTERSECTION_SIZE))
4995 & ((1 << LOCKSET_INTERSECTION_SIZE) - 1);
4998 void Worker() {
4999 for (int ls = 0; ls < (1 << N_MUTEXES); ls++) {
5000 if (LS_to_idx(ls) == 0)
5001 continue;
5002 for (int m = 0; m < N_MUTEXES; m++)
5003 if (ls & (1 << m))
5004 MU[m].Lock();
5006 data[LS_to_idx(ls)]++;
5008 for (int m = N_MUTEXES - 1; m >= 0; m--)
5009 if (ls & (1 << m))
5010 MU[m].Unlock();
5014 void Run() {
5015 printf("test103: Access different memory locations with different LockSets\n");
5016 MyThreadArray t(Worker, Worker, Worker, Worker);
5017 t.Start();
5018 t.Join();
5020 REGISTER_TEST2(Run, 103, FEATURE)
5021 } // namespace test103
5023 // test104: TP. Simple race (write vs write). Heap mem. {{{1
5024 namespace test104 {
5025 int *GLOB = NULL;
5026 void Worker() {
5027 *GLOB = 1;
5030 void Parent() {
5031 MyThread t(Worker);
5032 t.Start();
5033 usleep(100000);
5034 *GLOB = 2;
5035 t.Join();
5037 void Run() {
5038 GLOB = (int*)memalign(64, sizeof(int));
5039 *GLOB = 0;
5040 ANNOTATE_EXPECT_RACE(GLOB, "test104. TP.");
5041 ANNOTATE_TRACE_MEMORY(GLOB);
5042 printf("test104: positive\n");
5043 Parent();
5044 printf("\tGLOB=%d\n", *GLOB);
5045 free(GLOB);
5047 REGISTER_TEST(Run, 104);
5048 } // namespace test104
5051 // test105: Checks how stack grows. {{{1
5052 namespace test105 {
5053 int GLOB = 0;
5055 void F1() {
5056 int ar[32] __attribute__((unused));
5057 // ANNOTATE_TRACE_MEMORY(&ar[0]);
5058 // ANNOTATE_TRACE_MEMORY(&ar[31]);
5059 ar[0] = 1;
5060 ar[31] = 1;
5063 void Worker() {
5064 int ar[32] __attribute__((unused));
5065 // ANNOTATE_TRACE_MEMORY(&ar[0]);
5066 // ANNOTATE_TRACE_MEMORY(&ar[31]);
5067 ar[0] = 1;
5068 ar[31] = 1;
5069 F1();
5072 void Run() {
5073 printf("test105: negative\n");
5074 Worker();
5075 MyThread t(Worker);
5076 t.Start();
5077 t.Join();
5078 printf("\tGLOB=%d\n", GLOB);
5080 REGISTER_TEST(Run, 105)
5081 } // namespace test105
5084 // test106: TN. pthread_once. {{{1
5085 namespace test106 {
5086 int *GLOB = NULL;
5087 static pthread_once_t once = PTHREAD_ONCE_INIT;
5088 void Init() {
5089 GLOB = new int;
5090 ANNOTATE_TRACE_MEMORY(GLOB);
5091 *GLOB = 777;
5094 void Worker0() {
5095 pthread_once(&once, Init);
5097 void Worker1() {
5098 usleep(100000);
5099 pthread_once(&once, Init);
5100 CHECK(*GLOB == 777);
5104 void Run() {
5105 printf("test106: negative\n");
5106 MyThreadArray t(Worker0, Worker1, Worker1, Worker1);
5107 t.Start();
5108 t.Join();
5109 printf("\tGLOB=%d\n", *GLOB);
5111 REGISTER_TEST2(Run, 106, FEATURE)
5112 } // namespace test106
5115 // test107: Test for ANNOTATE_EXPECT_RACE {{{1
5116 namespace test107 {
5117 int GLOB = 0;
5118 void Run() {
5119 printf("test107: negative\n");
5120 ANNOTATE_EXPECT_RACE(&GLOB, "No race in fact. Just checking the tool.");
5121 printf("\tGLOB=%d\n", GLOB);
5123 REGISTER_TEST2(Run, 107, FEATURE|EXCLUDE_FROM_ALL)
5124 } // namespace test107
5127 // test108: TN. initialization of static object. {{{1
5128 namespace test108 {
5129 // Here we have a function-level static object.
5130 // Starting from gcc 4 this is therad safe,
5131 // but is is not thread safe with many other compilers.
5133 // Helgrind supports this kind of initialization by
5134 // intercepting __cxa_guard_acquire/__cxa_guard_release
5135 // and ignoring all accesses between them.
5136 // Helgrind also intercepts pthread_once in the same manner.
5137 class Foo {
5138 public:
5139 Foo() {
5140 ANNOTATE_TRACE_MEMORY(&a_);
5141 a_ = 42;
5143 void Check() const { CHECK(a_ == 42); }
5144 private:
5145 int a_;
5148 const Foo *GetFoo() {
5149 static const Foo *foo = new Foo();
5150 return foo;
5152 void Worker0() {
5153 GetFoo();
5156 void Worker() {
5157 usleep(200000);
5158 const Foo *foo = GetFoo();
5159 foo->Check();
5163 void Run() {
5164 printf("test108: negative, initialization of static object\n");
5165 MyThreadArray t(Worker0, Worker, Worker);
5166 t.Start();
5167 t.Join();
5169 REGISTER_TEST2(Run, 108, FEATURE)
5170 } // namespace test108
5173 // test109: TN. Checking happens before between parent and child threads. {{{1
5174 namespace test109 {
5175 // Check that the detector correctly connects
5176 // pthread_create with the new thread
5177 // and
5178 // thread exit with pthread_join
5179 const int N = 32;
5180 static int GLOB[N];
5182 void Worker(void *a) {
5183 usleep(10000);
5184 // printf("--Worker : %ld %p\n", (int*)a - GLOB, (void*)pthread_self());
5185 int *arg = (int*)a;
5186 (*arg)++;
5189 void Run() {
5190 printf("test109: negative\n");
5191 MyThread *t[N];
5192 for (int i = 0; i < N; i++) {
5193 t[i] = new MyThread(Worker, &GLOB[i]);
5195 for (int i = 0; i < N; i++) {
5196 ANNOTATE_TRACE_MEMORY(&GLOB[i]);
5197 GLOB[i] = 1;
5198 t[i]->Start();
5199 // printf("--Started: %p\n", (void*)t[i]->tid());
5201 for (int i = 0; i < N; i++) {
5202 // printf("--Joining: %p\n", (void*)t[i]->tid());
5203 t[i]->Join();
5204 // printf("--Joined : %p\n", (void*)t[i]->tid());
5205 GLOB[i]++;
5207 for (int i = 0; i < N; i++) delete t[i];
5209 printf("\tGLOB=%d\n", GLOB[13]);
5211 REGISTER_TEST(Run, 109)
5212 } // namespace test109
5215 // test110: TP. Simple races with stack, global and heap objects. {{{1
5216 namespace test110 {
5217 int GLOB = 0;
5218 static int STATIC;
5220 int *STACK = 0;
5222 int *MALLOC;
5223 int *CALLOC;
5224 int *REALLOC;
5225 int *VALLOC;
5226 int *PVALLOC;
5227 int *MEMALIGN;
5228 union pi_pv_union { int* pi; void* pv; } POSIX_MEMALIGN;
5229 int *MMAP;
5231 int *NEW;
5232 int *NEW_ARR;
5234 void Worker() {
5235 GLOB++;
5236 STATIC++;
5238 (*STACK)++;
5240 (*MALLOC)++;
5241 (*CALLOC)++;
5242 (*REALLOC)++;
5243 (*VALLOC)++;
5244 (*PVALLOC)++;
5245 (*MEMALIGN)++;
5246 (*(POSIX_MEMALIGN.pi))++;
5247 (*MMAP)++;
5249 (*NEW)++;
5250 (*NEW_ARR)++;
5252 void Run() {
5253 int x = 0;
5254 STACK = &x;
5256 MALLOC = (int*)malloc(sizeof(int));
5257 CALLOC = (int*)calloc(1, sizeof(int));
5258 REALLOC = (int*)realloc(NULL, sizeof(int));
5259 VALLOC = (int*)valloc(sizeof(int));
5260 PVALLOC = (int*)valloc(sizeof(int)); // TODO: pvalloc breaks helgrind.
5261 MEMALIGN = (int*)memalign(64, sizeof(int));
5262 CHECK(0 == posix_memalign(&POSIX_MEMALIGN.pv, 64, sizeof(int)));
5263 MMAP = (int*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE,
5264 MAP_PRIVATE | MAP_ANON, -1, 0);
5266 NEW = new int;
5267 NEW_ARR = new int[10];
5270 FAST_MODE_INIT(STACK);
5271 ANNOTATE_EXPECT_RACE(STACK, "real race on stack object");
5272 FAST_MODE_INIT(&GLOB);
5273 ANNOTATE_EXPECT_RACE(&GLOB, "real race on global object");
5274 FAST_MODE_INIT(&STATIC);
5275 ANNOTATE_EXPECT_RACE(&STATIC, "real race on a static global object");
5276 FAST_MODE_INIT(MALLOC);
5277 ANNOTATE_EXPECT_RACE(MALLOC, "real race on a malloc-ed object");
5278 FAST_MODE_INIT(CALLOC);
5279 ANNOTATE_EXPECT_RACE(CALLOC, "real race on a calloc-ed object");
5280 FAST_MODE_INIT(REALLOC);
5281 ANNOTATE_EXPECT_RACE(REALLOC, "real race on a realloc-ed object");
5282 FAST_MODE_INIT(VALLOC);
5283 ANNOTATE_EXPECT_RACE(VALLOC, "real race on a valloc-ed object");
5284 FAST_MODE_INIT(PVALLOC);
5285 ANNOTATE_EXPECT_RACE(PVALLOC, "real race on a pvalloc-ed object");
5286 FAST_MODE_INIT(MEMALIGN);
5287 ANNOTATE_EXPECT_RACE(MEMALIGN, "real race on a memalign-ed object");
5288 FAST_MODE_INIT(POSIX_MEMALIGN.pi);
5289 ANNOTATE_EXPECT_RACE(POSIX_MEMALIGN.pi, "real race on a posix_memalign-ed object");
5290 FAST_MODE_INIT(MMAP);
5291 ANNOTATE_EXPECT_RACE(MMAP, "real race on a mmap-ed object");
5293 FAST_MODE_INIT(NEW);
5294 ANNOTATE_EXPECT_RACE(NEW, "real race on a new-ed object");
5295 FAST_MODE_INIT(NEW_ARR);
5296 ANNOTATE_EXPECT_RACE(NEW_ARR, "real race on a new[]-ed object");
5298 MyThreadArray t(Worker, Worker, Worker);
5299 t.Start();
5300 t.Join();
5301 printf("test110: positive (race on a stack object)\n");
5302 printf("\tSTACK=%d\n", *STACK);
5303 CHECK(GLOB <= 3);
5304 CHECK(STATIC <= 3);
5306 free(MALLOC);
5307 free(CALLOC);
5308 free(REALLOC);
5309 free(VALLOC);
5310 free(PVALLOC);
5311 free(MEMALIGN);
5312 free(POSIX_MEMALIGN.pv);
5313 munmap(MMAP, sizeof(int));
5314 delete NEW;
5315 delete [] NEW_ARR;
5317 REGISTER_TEST(Run, 110)
5318 } // namespace test110
5321 // test111: TN. Unit test for a bug related to stack handling. {{{1
5322 namespace test111 {
5323 char *GLOB = 0;
5324 bool COND = false;
5325 Mutex mu;
5326 const int N = 3000;
5328 void write_to_p(char *p, int val) {
5329 for (int i = 0; i < N; i++)
5330 p[i] = val;
5333 static bool ArgIsTrue(bool *arg) {
5334 // printf("ArgIsTrue: %d tid=%d\n", *arg, (int)pthread_self());
5335 return *arg == true;
5338 void f1() {
5339 char some_stack[N];
5340 write_to_p(some_stack, 1);
5341 mu.LockWhen(Condition(&ArgIsTrue, &COND));
5342 mu.Unlock();
5345 void f2() {
5346 char some_stack[N];
5347 char some_more_stack[N];
5348 write_to_p(some_stack, 2);
5349 write_to_p(some_more_stack, 2);
5352 void f0() { f2(); }
5354 void Worker1() {
5355 f0();
5356 f1();
5357 f2();
5360 void Worker2() {
5361 usleep(100000);
5362 mu.Lock();
5363 COND = true;
5364 mu.Unlock();
5367 void Run() {
5368 printf("test111: regression test\n");
5369 MyThreadArray t(Worker1, Worker1, Worker2);
5370 // AnnotateSetVerbosity(__FILE__, __LINE__, 3);
5371 t.Start();
5372 t.Join();
5373 // AnnotateSetVerbosity(__FILE__, __LINE__, 1);
5375 REGISTER_TEST2(Run, 111, FEATURE)
5376 } // namespace test111
5378 // test112: STAB. Test for ANNOTATE_PUBLISH_MEMORY_RANGE{{{1
5379 namespace test112 {
5380 char *GLOB = 0;
5381 const int N = 64 * 5;
5382 Mutex mu;
5383 bool ready = false; // under mu
5384 int beg, end; // under mu
5386 Mutex mu1;
5388 void Worker() {
5390 bool is_ready = false;
5391 int b, e;
5392 while (!is_ready) {
5393 mu.Lock();
5394 is_ready = ready;
5395 b = beg;
5396 e = end;
5397 mu.Unlock();
5398 usleep(1000);
5401 mu1.Lock();
5402 for (int i = b; i < e; i++) {
5403 GLOB[i]++;
5405 mu1.Unlock();
5408 void PublishRange(int b, int e) {
5409 MyThreadArray t(Worker, Worker);
5410 ready = false; // runs before other threads
5411 t.Start();
5413 ANNOTATE_NEW_MEMORY(GLOB + b, e - b);
5414 ANNOTATE_TRACE_MEMORY(GLOB + b);
5415 for (int j = b; j < e; j++) {
5416 GLOB[j] = 0;
5418 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB + b, e - b);
5420 // hand off
5421 mu.Lock();
5422 ready = true;
5423 beg = b;
5424 end = e;
5425 mu.Unlock();
5427 t.Join();
5430 void Run() {
5431 printf("test112: stability (ANNOTATE_PUBLISH_MEMORY_RANGE)\n");
5432 GLOB = new char [N];
5434 PublishRange(0, 10);
5435 PublishRange(3, 5);
5437 PublishRange(12, 13);
5438 PublishRange(10, 14);
5440 PublishRange(15, 17);
5441 PublishRange(16, 18);
5443 // do few more random publishes.
5444 for (int i = 0; i < 20; i++) {
5445 const int begin = rand() % N;
5446 const int size = (rand() % (N - begin)) + 1;
5447 CHECK(size > 0);
5448 CHECK(begin + size <= N);
5449 PublishRange(begin, begin + size);
5452 printf("GLOB = %d\n", (int)GLOB[0]);
5454 REGISTER_TEST2(Run, 112, STABILITY)
5455 } // namespace test112
5458 // test113: PERF. A lot of lock/unlock calls. Many locks {{{1
5459 namespace test113 {
5460 const int kNumIter = 100000;
5461 const int kNumLocks = 7;
5462 Mutex MU[kNumLocks];
5463 void Run() {
5464 printf("test113: perf\n");
5465 for (int i = 0; i < kNumIter; i++ ) {
5466 for (int j = 0; j < kNumLocks; j++) {
5467 if (i & (1 << j)) MU[j].Lock();
5469 for (int j = kNumLocks - 1; j >= 0; j--) {
5470 if (i & (1 << j)) MU[j].Unlock();
5474 REGISTER_TEST(Run, 113)
5475 } // namespace test113
5478 // test114: STAB. Recursive lock. {{{1
5479 namespace test114 {
5480 int Bar() {
5481 static int bar = 1;
5482 return bar;
5484 int Foo() {
5485 static int foo = Bar();
5486 return foo;
5488 void Worker() {
5489 static int x = Foo();
5490 CHECK(x == 1);
5492 void Run() {
5493 printf("test114: stab\n");
5494 MyThreadArray t(Worker, Worker);
5495 t.Start();
5496 t.Join();
5498 REGISTER_TEST(Run, 114)
5499 } // namespace test114
5502 // test115: TN. sem_open. {{{1
5503 namespace test115 {
5504 int tid = 0;
5505 Mutex mu;
5506 const char *kSemName = "drt-test-sem";
5508 int GLOB = 0;
5510 sem_t *DoSemOpen() {
5511 // TODO: there is some race report inside sem_open
5512 // for which suppressions do not work... (???)
5513 ANNOTATE_IGNORE_WRITES_BEGIN();
5514 sem_t *sem = sem_open(kSemName, O_CREAT, 0600, 3);
5515 ANNOTATE_IGNORE_WRITES_END();
5516 return sem;
5519 void Worker() {
5520 mu.Lock();
5521 int my_tid = tid++;
5522 mu.Unlock();
5524 if (my_tid == 0) {
5525 GLOB = 1;
5528 // if the detector observes a happens-before arc between
5529 // sem_open and sem_wait, it will be silent.
5530 sem_t *sem = DoSemOpen();
5531 usleep(100000);
5532 CHECK(sem != SEM_FAILED);
5533 CHECK(sem_wait(sem) == 0);
5535 if (my_tid > 0) {
5536 CHECK(GLOB == 1);
5540 void Run() {
5541 printf("test115: stab (sem_open())\n");
5543 // just check that sem_open is not completely broken
5544 sem_unlink(kSemName);
5545 sem_t* sem = DoSemOpen();
5546 CHECK(sem != SEM_FAILED);
5547 CHECK(sem_wait(sem) == 0);
5548 sem_unlink(kSemName);
5550 // check that sem_open and sem_wait create a happens-before arc.
5551 MyThreadArray t(Worker, Worker, Worker);
5552 t.Start();
5553 t.Join();
5554 // clean up
5555 sem_unlink(kSemName);
5557 REGISTER_TEST(Run, 115)
5558 } // namespace test115
5561 // test116: TN. some operations with string<> objects. {{{1
5562 namespace test116 {
5564 void Worker() {
5565 string A[10], B[10], C[10];
5566 for (int i = 0; i < 1000; i++) {
5567 for (int j = 0; j < 10; j++) {
5568 string &a = A[j];
5569 string &b = B[j];
5570 string &c = C[j];
5571 a = "sdl;fkjhasdflksj df";
5572 b = "sdf sdf;ljsd ";
5573 c = "'sfdf df";
5574 c = b;
5575 a = c;
5576 b = a;
5577 swap(a,b);
5578 swap(b,c);
5580 for (int j = 0; j < 10; j++) {
5581 string &a = A[j];
5582 string &b = B[j];
5583 string &c = C[j];
5584 a.clear();
5585 b.clear();
5586 c.clear();
5591 void Run() {
5592 printf("test116: negative (strings)\n");
5593 MyThreadArray t(Worker, Worker, Worker);
5594 t.Start();
5595 t.Join();
5597 REGISTER_TEST2(Run, 116, FEATURE|EXCLUDE_FROM_ALL)
5598 } // namespace test116
5600 // test117: TN. Many calls to function-scope static init. {{{1
5601 namespace test117 {
5602 const int N = 50;
5604 int Foo() {
5605 usleep(20000);
5606 return 1;
5609 void Worker(void *a) {
5610 static int foo = Foo();
5611 CHECK(foo == 1);
5614 void Run() {
5615 printf("test117: negative\n");
5616 MyThread *t[N];
5617 for (int i = 0; i < N; i++) {
5618 t[i] = new MyThread(Worker);
5620 for (int i = 0; i < N; i++) {
5621 t[i]->Start();
5623 for (int i = 0; i < N; i++) {
5624 t[i]->Join();
5626 for (int i = 0; i < N; i++) delete t[i];
5628 REGISTER_TEST(Run, 117)
5629 } // namespace test117
5633 // test118 PERF: One signal, multiple waits. {{{1
5634 namespace test118 {
5635 int GLOB = 0;
5636 const int kNumIter = 2000000;
5637 void Signaller() {
5638 usleep(50000);
5639 ANNOTATE_CONDVAR_SIGNAL(&GLOB);
5641 void Waiter() {
5642 for (int i = 0; i < kNumIter; i++) {
5643 ANNOTATE_CONDVAR_WAIT(&GLOB);
5644 if (i == kNumIter / 2)
5645 usleep(100000);
5648 void Run() {
5649 printf("test118: perf\n");
5650 MyThreadArray t(Signaller, Waiter, Signaller, Waiter);
5651 t.Start();
5652 t.Join();
5653 printf("\tGLOB=%d\n", GLOB);
5655 REGISTER_TEST(Run, 118)
5656 } // namespace test118
5659 // test119: TP. Testing that malloc does not introduce any HB arc. {{{1
5660 namespace test119 {
5661 int GLOB = 0;
5662 void Worker1() {
5663 GLOB = 1;
5664 free(malloc(123));
5666 void Worker2() {
5667 usleep(100000);
5668 free(malloc(345));
5669 GLOB = 2;
5671 void Run() {
5672 printf("test119: positive (checking if malloc creates HB arcs)\n");
5673 FAST_MODE_INIT(&GLOB);
5674 if (!(Tsan_PureHappensBefore() && kMallocUsesMutex))
5675 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "true race");
5676 MyThreadArray t(Worker1, Worker2);
5677 t.Start();
5678 t.Join();
5679 printf("\tGLOB=%d\n", GLOB);
5681 REGISTER_TEST(Run, 119)
5682 } // namespace test119
5685 // test120: TP. Thread1: write then read. Thread2: read. {{{1
5686 namespace test120 {
5687 int GLOB = 0;
5689 void Thread1() {
5690 GLOB = 1; // write
5691 CHECK(GLOB); // read
5694 void Thread2() {
5695 usleep(100000);
5696 CHECK(GLOB >= 0); // read
5699 void Run() {
5700 FAST_MODE_INIT(&GLOB);
5701 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "TP (T1: write then read, T2: read)");
5702 printf("test120: positive\n");
5703 MyThreadArray t(Thread1, Thread2);
5704 GLOB = 1;
5705 t.Start();
5706 t.Join();
5707 printf("\tGLOB=%d\n", GLOB);
5709 REGISTER_TEST(Run, 120)
5710 } // namespace test120
5713 // test121: TP. Example of double-checked-locking {{{1
5714 namespace test121 {
5715 struct Foo {
5716 uintptr_t a, b[15];
5717 } __attribute__ ((aligned (64)));
5719 static Mutex mu;
5720 static Foo *foo;
5722 void InitMe() {
5723 if (!foo) {
5724 MutexLock lock(&mu);
5725 if (!foo) {
5726 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo, "test121. Double-checked locking (ptr)");
5727 foo = new Foo;
5728 if (!Tsan_FastMode())
5729 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo->a, "test121. Double-checked locking (obj)");
5730 foo->a = 42;
5735 void UseMe() {
5736 InitMe();
5737 CHECK(foo && foo->a == 42);
5740 void Worker1() { UseMe(); }
5741 void Worker2() { UseMe(); }
5742 void Worker3() { UseMe(); }
5745 void Run() {
5746 FAST_MODE_INIT(&foo);
5747 printf("test121: TP. Example of double-checked-locking\n");
5748 MyThreadArray t1(Worker1, Worker2, Worker3);
5749 t1.Start();
5750 t1.Join();
5751 delete foo;
5753 REGISTER_TEST(Run, 121)
5754 } // namespace test121
5756 // test122 TP: Simple test with RWLock {{{1
5757 namespace test122 {
5758 int VAR1 = 0;
5759 int VAR2 = 0;
5760 RWLock mu;
5762 void WriteWhileHoldingReaderLock(int *p) {
5763 usleep(100000);
5764 ReaderLockScoped lock(&mu); // Reader lock for writing. -- bug.
5765 (*p)++;
5768 void CorrectWrite(int *p) {
5769 WriterLockScoped lock(&mu);
5770 (*p)++;
5773 void Thread1() { WriteWhileHoldingReaderLock(&VAR1); }
5774 void Thread2() { CorrectWrite(&VAR1); }
5775 void Thread3() { CorrectWrite(&VAR2); }
5776 void Thread4() { WriteWhileHoldingReaderLock(&VAR2); }
5779 void Run() {
5780 printf("test122: positive (rw-lock)\n");
5781 VAR1 = 0;
5782 VAR2 = 0;
5783 ANNOTATE_TRACE_MEMORY(&VAR1);
5784 ANNOTATE_TRACE_MEMORY(&VAR2);
5785 if (!Tsan_PureHappensBefore()) {
5786 ANNOTATE_EXPECT_RACE_FOR_TSAN(&VAR1, "test122. TP. ReaderLock-ed while writing");
5787 ANNOTATE_EXPECT_RACE_FOR_TSAN(&VAR2, "test122. TP. ReaderLock-ed while writing");
5789 MyThreadArray t(Thread1, Thread2, Thread3, Thread4);
5790 t.Start();
5791 t.Join();
5793 REGISTER_TEST(Run, 122)
5794 } // namespace test122
5797 // test123 TP: accesses of different sizes. {{{1
5798 namespace test123 {
5800 union uint_union {
5801 uint64_t u64[1];
5802 uint32_t u32[2];
5803 uint16_t u16[4];
5804 uint8_t u8[8];
5807 uint_union MEM[8];
5809 // Q. Hey dude, why so many functions?
5810 // A. I need different stack traces for different accesses.
5812 void Wr64_0() { MEM[0].u64[0] = 1; }
5813 void Wr64_1() { MEM[1].u64[0] = 1; }
5814 void Wr64_2() { MEM[2].u64[0] = 1; }
5815 void Wr64_3() { MEM[3].u64[0] = 1; }
5816 void Wr64_4() { MEM[4].u64[0] = 1; }
5817 void Wr64_5() { MEM[5].u64[0] = 1; }
5818 void Wr64_6() { MEM[6].u64[0] = 1; }
5819 void Wr64_7() { MEM[7].u64[0] = 1; }
5821 void Wr32_0() { MEM[0].u32[0] = 1; }
5822 void Wr32_1() { MEM[1].u32[1] = 1; }
5823 void Wr32_2() { MEM[2].u32[0] = 1; }
5824 void Wr32_3() { MEM[3].u32[1] = 1; }
5825 void Wr32_4() { MEM[4].u32[0] = 1; }
5826 void Wr32_5() { MEM[5].u32[1] = 1; }
5827 void Wr32_6() { MEM[6].u32[0] = 1; }
5828 void Wr32_7() { MEM[7].u32[1] = 1; }
5830 void Wr16_0() { MEM[0].u16[0] = 1; }
5831 void Wr16_1() { MEM[1].u16[1] = 1; }
5832 void Wr16_2() { MEM[2].u16[2] = 1; }
5833 void Wr16_3() { MEM[3].u16[3] = 1; }
5834 void Wr16_4() { MEM[4].u16[0] = 1; }
5835 void Wr16_5() { MEM[5].u16[1] = 1; }
5836 void Wr16_6() { MEM[6].u16[2] = 1; }
5837 void Wr16_7() { MEM[7].u16[3] = 1; }
5839 void Wr8_0() { MEM[0].u8[0] = 1; }
5840 void Wr8_1() { MEM[1].u8[1] = 1; }
5841 void Wr8_2() { MEM[2].u8[2] = 1; }
5842 void Wr8_3() { MEM[3].u8[3] = 1; }
5843 void Wr8_4() { MEM[4].u8[4] = 1; }
5844 void Wr8_5() { MEM[5].u8[5] = 1; }
5845 void Wr8_6() { MEM[6].u8[6] = 1; }
5846 void Wr8_7() { MEM[7].u8[7] = 1; }
5848 void WriteAll64() {
5849 Wr64_0();
5850 Wr64_1();
5851 Wr64_2();
5852 Wr64_3();
5853 Wr64_4();
5854 Wr64_5();
5855 Wr64_6();
5856 Wr64_7();
5859 void WriteAll32() {
5860 Wr32_0();
5861 Wr32_1();
5862 Wr32_2();
5863 Wr32_3();
5864 Wr32_4();
5865 Wr32_5();
5866 Wr32_6();
5867 Wr32_7();
5870 void WriteAll16() {
5871 Wr16_0();
5872 Wr16_1();
5873 Wr16_2();
5874 Wr16_3();
5875 Wr16_4();
5876 Wr16_5();
5877 Wr16_6();
5878 Wr16_7();
5881 void WriteAll8() {
5882 Wr8_0();
5883 Wr8_1();
5884 Wr8_2();
5885 Wr8_3();
5886 Wr8_4();
5887 Wr8_5();
5888 Wr8_6();
5889 Wr8_7();
5892 void W00() { WriteAll64(); }
5893 void W01() { WriteAll64(); }
5894 void W02() { WriteAll64(); }
5896 void W10() { WriteAll32(); }
5897 void W11() { WriteAll32(); }
5898 void W12() { WriteAll32(); }
5900 void W20() { WriteAll16(); }
5901 void W21() { WriteAll16(); }
5902 void W22() { WriteAll16(); }
5904 void W30() { WriteAll8(); }
5905 void W31() { WriteAll8(); }
5906 void W32() { WriteAll8(); }
5908 typedef void (*F)(void);
5910 void TestTwoSizes(F f1, F f2) {
5911 // first f1, then f2
5912 ANNOTATE_NEW_MEMORY(&MEM, sizeof(MEM));
5913 memset(&MEM, 0, sizeof(MEM));
5914 MyThreadArray t1(f1, f2);
5915 t1.Start();
5916 t1.Join();
5917 // reverse order
5918 ANNOTATE_NEW_MEMORY(&MEM, sizeof(MEM));
5919 memset(&MEM, 0, sizeof(MEM));
5920 MyThreadArray t2(f2, f1);
5921 t2.Start();
5922 t2.Join();
5925 void Run() {
5926 printf("test123: positive (different sizes)\n");
5927 TestTwoSizes(W00, W10);
5928 // TestTwoSizes(W01, W20);
5929 // TestTwoSizes(W02, W30);
5930 // TestTwoSizes(W11, W21);
5931 // TestTwoSizes(W12, W31);
5932 // TestTwoSizes(W22, W32);
5935 REGISTER_TEST2(Run, 123, FEATURE|EXCLUDE_FROM_ALL)
5936 } // namespace test123
5939 // test124: What happens if we delete an unlocked lock? {{{1
5940 namespace test124 {
5941 // This test does not worg with pthreads (you can't call
5942 // pthread_mutex_destroy on a locked lock).
5943 int GLOB = 0;
5944 const int N = 1000;
5945 void Worker() {
5946 Mutex *a_large_local_array_of_mutexes;
5947 a_large_local_array_of_mutexes = new Mutex[N];
5948 for (int i = 0; i < N; i++) {
5949 a_large_local_array_of_mutexes[i].Lock();
5951 delete []a_large_local_array_of_mutexes;
5952 GLOB = 1;
5955 void Run() {
5956 printf("test124: negative\n");
5957 MyThreadArray t(Worker, Worker, Worker);
5958 t.Start();
5959 t.Join();
5960 printf("\tGLOB=%d\n", GLOB);
5962 REGISTER_TEST2(Run, 124, FEATURE|EXCLUDE_FROM_ALL)
5963 } // namespace test124
5966 // test125 TN: Backwards lock (annotated). {{{1
5967 namespace test125 {
5968 // This test uses "Backwards mutex" locking protocol.
5969 // We take a *reader* lock when writing to a per-thread data
5970 // (GLOB[thread_num]) and we take a *writer* lock when we
5971 // are reading from the entire array at once.
5973 // Such locking protocol is not understood by ThreadSanitizer's
5974 // hybrid state machine. So, you either have to use a pure-happens-before
5975 // detector ("tsan --pure-happens-before") or apply pure happens-before mode
5976 // to this particular lock by using ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu).
5978 const int n_threads = 3;
5979 RWLock mu;
5980 int GLOB[n_threads];
5982 int adder_num; // updated atomically.
5984 void Adder() {
5985 int my_num = AtomicIncrement(&adder_num, 1);
5987 ReaderLockScoped lock(&mu);
5988 GLOB[my_num]++;
5991 void Aggregator() {
5992 int sum = 0;
5994 WriterLockScoped lock(&mu);
5995 for (int i = 0; i < n_threads; i++) {
5996 sum += GLOB[i];
5999 printf("sum=%d\n", sum);
6002 void Run() {
6003 printf("test125: negative\n");
6005 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu);
6007 // run Adders, then Aggregator
6009 MyThreadArray t(Adder, Adder, Adder, Aggregator);
6010 t.Start();
6011 t.Join();
6014 // Run Aggregator first.
6015 adder_num = 0;
6017 MyThreadArray t(Aggregator, Adder, Adder, Adder);
6018 t.Start();
6019 t.Join();
6023 REGISTER_TEST(Run, 125)
6024 } // namespace test125
6026 // test126 TN: test for BlockingCounter {{{1
6027 namespace test126 {
6028 BlockingCounter *blocking_counter;
6029 int GLOB = 0;
6030 void Worker() {
6031 CHECK(blocking_counter);
6032 CHECK(GLOB == 0);
6033 blocking_counter->DecrementCount();
6035 void Run() {
6036 printf("test126: negative\n");
6037 MyThreadArray t(Worker, Worker, Worker);
6038 blocking_counter = new BlockingCounter(3);
6039 t.Start();
6040 blocking_counter->Wait();
6041 GLOB = 1;
6042 t.Join();
6043 printf("\tGLOB=%d\n", GLOB);
6045 REGISTER_TEST(Run, 126)
6046 } // namespace test126
6049 // test127. Bad code: unlocking a mutex locked by another thread. {{{1
6050 namespace test127 {
6051 Mutex mu;
6052 void Thread1() {
6053 mu.Lock();
6055 void Thread2() {
6056 usleep(100000);
6057 mu.Unlock();
6059 void Run() {
6060 printf("test127: unlocking a mutex locked by another thread.\n");
6061 MyThreadArray t(Thread1, Thread2);
6062 t.Start();
6063 t.Join();
6065 REGISTER_TEST(Run, 127)
6066 } // namespace test127
6068 // test128. Suppressed code in concurrent accesses {{{1
6069 // Please use --suppressions=unittest.supp flag when running this test.
6070 namespace test128 {
6071 Mutex mu;
6072 int GLOB = 0;
6073 void Worker() {
6074 usleep(100000);
6075 mu.Lock();
6076 GLOB++;
6077 mu.Unlock();
6079 void ThisFunctionShouldBeSuppressed() {
6080 GLOB++;
6082 void Run() {
6083 printf("test128: Suppressed code in concurrent accesses.\n");
6084 MyThreadArray t(Worker, ThisFunctionShouldBeSuppressed);
6085 t.Start();
6086 t.Join();
6088 REGISTER_TEST2(Run, 128, FEATURE | EXCLUDE_FROM_ALL)
6089 } // namespace test128
6091 // test129: TN. Synchronization via ReaderLockWhen(). {{{1
6092 namespace test129 {
6093 int GLOB = 0;
6094 Mutex MU;
6095 bool WeirdCondition(int* param) {
6096 *param = GLOB; // a write into Waiter's memory
6097 return GLOB > 0;
6099 void Waiter() {
6100 int param = 0;
6101 MU.ReaderLockWhen(Condition(WeirdCondition, &param));
6102 MU.ReaderUnlock();
6103 CHECK(GLOB > 0);
6104 CHECK(param > 0);
6106 void Waker() {
6107 usleep(100000); // Make sure the waiter blocks.
6108 MU.Lock();
6109 GLOB++;
6110 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
6112 void Run() {
6113 printf("test129: Synchronization via ReaderLockWhen()\n");
6114 MyThread mt(Waiter, NULL, "Waiter Thread");
6115 mt.Start();
6116 Waker();
6117 mt.Join();
6118 printf("\tGLOB=%d\n", GLOB);
6120 REGISTER_TEST2(Run, 129, FEATURE);
6121 } // namespace test129
6123 // test130: TN. Per-thread. {{{1
6124 namespace test130 {
6125 #ifndef NO_TLS
6126 // This test verifies that the race detector handles
6127 // thread-local storage (TLS) correctly.
6128 // As of 09-03-30 ThreadSanitizer has a bug:
6129 // - Thread1 starts
6130 // - Thread1 touches per_thread_global
6131 // - Thread1 ends
6132 // - Thread2 starts (and there is no happens-before relation between it and
6133 // Thread1)
6134 // - Thread2 touches per_thread_global
6135 // It may happen so that Thread2 will have per_thread_global in the same address
6136 // as Thread1. Since there is no happens-before relation between threads,
6137 // ThreadSanitizer reports a race.
6139 // test131 does the same for stack.
6141 static __thread int per_thread_global[10] = {0};
6143 void RealWorker() { // Touch per_thread_global.
6144 per_thread_global[1]++;
6145 errno++;
6148 void Worker() { // Spawn few threads that touch per_thread_global.
6149 MyThreadArray t(RealWorker, RealWorker);
6150 t.Start();
6151 t.Join();
6153 void Worker0() { sleep(0); Worker(); }
6154 void Worker1() { sleep(1); Worker(); }
6155 void Worker2() { sleep(2); Worker(); }
6156 void Worker3() { sleep(3); Worker(); }
6158 void Run() {
6159 printf("test130: Per-thread\n");
6160 MyThreadArray t1(Worker0, Worker1, Worker2, Worker3);
6161 t1.Start();
6162 t1.Join();
6163 printf("\tper_thread_global=%d\n", per_thread_global[1]);
6165 REGISTER_TEST(Run, 130)
6166 #endif // NO_TLS
6167 } // namespace test130
6170 // test131: TN. Stack. {{{1
6171 namespace test131 {
6172 // Same as test130, but for stack.
6174 void RealWorker() { // Touch stack.
6175 int stack_var = 0;
6176 stack_var++;
6179 void Worker() { // Spawn few threads that touch stack.
6180 MyThreadArray t(RealWorker, RealWorker);
6181 t.Start();
6182 t.Join();
6184 void Worker0() { sleep(0); Worker(); }
6185 void Worker1() { sleep(1); Worker(); }
6186 void Worker2() { sleep(2); Worker(); }
6187 void Worker3() { sleep(3); Worker(); }
6189 void Run() {
6190 printf("test131: stack\n");
6191 MyThreadArray t(Worker0, Worker1, Worker2, Worker3);
6192 t.Start();
6193 t.Join();
6195 REGISTER_TEST(Run, 131)
6196 } // namespace test131
6199 // test132: TP. Simple race (write vs write). Works in fast-mode. {{{1
6200 namespace test132 {
6201 int GLOB = 0;
6202 void Worker() { GLOB = 1; }
6204 void Run1() {
6205 FAST_MODE_INIT(&GLOB);
6206 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test132");
6207 printf("test132: positive; &GLOB=%p\n", &GLOB);
6208 ANNOTATE_TRACE_MEMORY(&GLOB);
6209 GLOB = 7;
6210 MyThreadArray t(Worker, Worker);
6211 t.Start();
6212 t.Join();
6215 void Run() {
6216 Run1();
6218 REGISTER_TEST(Run, 132);
6219 } // namespace test132
6222 // test133: TP. Simple race (write vs write). Works in fast mode. {{{1
6223 namespace test133 {
6224 // Same as test132, but everything is run from a separate thread spawned from
6225 // the main thread.
6226 int GLOB = 0;
6227 void Worker() { GLOB = 1; }
6229 void Run1() {
6230 FAST_MODE_INIT(&GLOB);
6231 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test133");
6232 printf("test133: positive; &GLOB=%p\n", &GLOB);
6233 ANNOTATE_TRACE_MEMORY(&GLOB);
6234 GLOB = 7;
6235 MyThreadArray t(Worker, Worker);
6236 t.Start();
6237 t.Join();
6239 void Run() {
6240 MyThread t(Run1);
6241 t.Start();
6242 t.Join();
6244 REGISTER_TEST(Run, 133);
6245 } // namespace test133
6248 // test134 TN. Swap. Variant of test79. {{{1
6249 namespace test134 {
6250 #if 0
6251 typedef __gnu_cxx::hash_map<int, int> map_t;
6252 #else
6253 typedef std::map<int, int> map_t;
6254 #endif
6255 map_t map;
6256 Mutex mu;
6257 // Here we use swap to pass map between threads.
6258 // The synchronization is correct, but w/o the annotation
6259 // any hybrid detector will complain.
6261 // Swap is very unfriendly to the lock-set (and hybrid) race detectors.
6262 // Since tmp is destructed outside the mutex, we need to have a happens-before
6263 // arc between any prior access to map and here.
6264 // Since the internals of tmp are created ouside the mutex and are passed to
6265 // other thread, we need to have a h-b arc between here and any future access.
6266 // These arcs can be created by HAPPENS_{BEFORE,AFTER} annotations, but it is
6267 // much simpler to apply pure-happens-before mode to the mutex mu.
6268 void Swapper() {
6269 map_t tmp;
6270 MutexLock lock(&mu);
6271 ANNOTATE_HAPPENS_AFTER(&map);
6272 // We swap the new empty map 'tmp' with 'map'.
6273 map.swap(tmp);
6274 ANNOTATE_HAPPENS_BEFORE(&map);
6275 // tmp (which is the old version of map) is destroyed here.
6278 void Worker() {
6279 MutexLock lock(&mu);
6280 ANNOTATE_HAPPENS_AFTER(&map);
6281 map[1]++;
6282 ANNOTATE_HAPPENS_BEFORE(&map);
6285 void Run() {
6286 printf("test134: negative (swap)\n");
6287 // ********************** Shorter way: ***********************
6288 // ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu);
6289 MyThreadArray t(Worker, Worker, Swapper, Worker, Worker);
6290 t.Start();
6291 t.Join();
6293 REGISTER_TEST(Run, 134)
6294 } // namespace test134
6296 // test135 TN. Swap. Variant of test79. {{{1
6297 namespace test135 {
6299 void SubWorker() {
6300 const long SIZE = 65536;
6301 for (int i = 0; i < 32; i++) {
6302 int *ptr = (int*)mmap(NULL, SIZE, PROT_READ | PROT_WRITE,
6303 MAP_PRIVATE | MAP_ANON, -1, 0);
6304 *ptr = 42;
6305 munmap(ptr, SIZE);
6309 void Worker() {
6310 MyThreadArray t(SubWorker, SubWorker, SubWorker, SubWorker);
6311 t.Start();
6312 t.Join();
6315 void Run() {
6316 printf("test135: negative (mmap)\n");
6317 MyThreadArray t(Worker, Worker, Worker, Worker);
6318 t.Start();
6319 t.Join();
6321 REGISTER_TEST(Run, 135)
6322 } // namespace test135
6324 // test136. Unlock twice. {{{1
6325 namespace test136 {
6326 void Run() {
6327 printf("test136: unlock twice\n");
6328 pthread_mutexattr_t attr;
6329 CHECK(0 == pthread_mutexattr_init(&attr));
6330 CHECK(0 == pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
6332 pthread_mutex_t mu;
6333 CHECK(0 == pthread_mutex_init(&mu, &attr));
6334 CHECK(0 == pthread_mutex_lock(&mu));
6335 CHECK(0 == pthread_mutex_unlock(&mu));
6336 int ret_unlock = pthread_mutex_unlock(&mu); // unlocking twice.
6337 int ret_destroy = pthread_mutex_destroy(&mu);
6338 printf(" pthread_mutex_unlock returned %d\n", ret_unlock);
6339 printf(" pthread_mutex_destroy returned %d\n", ret_destroy);
6343 REGISTER_TEST(Run, 136)
6344 } // namespace test136
6346 // test137 TP. Races on stack variables. {{{1
6347 namespace test137 {
6348 int GLOB = 0;
6349 ProducerConsumerQueue q(10);
6351 void Worker() {
6352 int stack;
6353 int *tmp = (int*)q.Get();
6354 (*tmp)++;
6355 int *racey = &stack;
6356 q.Put(racey);
6357 (*racey)++;
6358 usleep(150000);
6359 // We may miss the races if we sleep less due to die_memory events...
6362 void Run() {
6363 int tmp = 0;
6364 printf("test137: TP. Races on stack variables.\n");
6365 q.Put(&tmp);
6366 MyThreadArray t(Worker, Worker, Worker, Worker);
6367 t.Start();
6368 t.Join();
6369 q.Get();
6372 REGISTER_TEST2(Run, 137, FEATURE | EXCLUDE_FROM_ALL)
6373 } // namespace test137
6375 // test138 FN. Two closures hit the same thread in ThreadPool. {{{1
6376 namespace test138 {
6377 int GLOB = 0;
6379 void Worker() {
6380 usleep(100000);
6381 GLOB++;
6384 void Run() {
6385 FAST_MODE_INIT(&GLOB);
6386 printf("test138: FN. Two closures hit the same thread in ThreadPool.\n");
6388 // When using thread pools, two concurrent callbacks might be scheduled
6389 // onto the same executor thread. As a result, unnecessary happens-before
6390 // relation may be introduced between callbacks.
6391 // If we set the number of executor threads to 1, any known data
6392 // race detector will be silent. However, the same situation may happen
6393 // with any number of executor threads (with some probability).
6394 ThreadPool tp(1);
6395 tp.StartWorkers();
6396 tp.Add(NewCallback(Worker));
6397 tp.Add(NewCallback(Worker));
6400 REGISTER_TEST2(Run, 138, FEATURE)
6401 } // namespace test138
6403 // test139: FN. A true race hidden by reference counting annotation. {{{1
6404 namespace test139 {
6405 int GLOB = 0;
6406 RefCountedClass *obj;
6408 void Worker1() {
6409 GLOB++; // First access.
6410 obj->Unref();
6413 void Worker2() {
6414 usleep(100000);
6415 obj->Unref();
6416 GLOB++; // Second access.
6419 void Run() {
6420 FAST_MODE_INIT(&GLOB);
6421 printf("test139: FN. A true race hidden by reference counting annotation.\n");
6423 obj = new RefCountedClass;
6424 obj->AnnotateUnref();
6425 obj->Ref();
6426 obj->Ref();
6427 MyThreadArray mt(Worker1, Worker2);
6428 mt.Start();
6429 mt.Join();
6432 REGISTER_TEST2(Run, 139, FEATURE)
6433 } // namespace test139
6435 // test140 TN. Swap. Variant of test79 and test134. {{{1
6436 namespace test140 {
6437 #if 0
6438 typedef __gnu_cxx::hash_map<int, int> Container;
6439 #else
6440 typedef std::map<int,int> Container;
6441 #endif
6442 Mutex mu;
6443 static Container container;
6445 // Here we use swap to pass a Container between threads.
6446 // The synchronization is correct, but w/o the annotation
6447 // any hybrid detector will complain.
6449 // Unlike the test134, we try to have a minimal set of annotations
6450 // so that extra h-b arcs do not hide other races.
6452 // Swap is very unfriendly to the lock-set (and hybrid) race detectors.
6453 // Since tmp is destructed outside the mutex, we need to have a happens-before
6454 // arc between any prior access to map and here.
6455 // Since the internals of tmp are created ouside the mutex and are passed to
6456 // other thread, we need to have a h-b arc between here and any future access.
6458 // We want to be able to annotate swapper so that we don't need to annotate
6459 // anything else.
6460 void Swapper() {
6461 Container tmp;
6462 tmp[1] = tmp[2] = tmp[3] = 0;
6464 MutexLock lock(&mu);
6465 container.swap(tmp);
6466 // we are unpublishing the old container.
6467 ANNOTATE_UNPUBLISH_MEMORY_RANGE(&container, sizeof(container));
6468 // we are publishing the new container.
6469 ANNOTATE_PUBLISH_MEMORY_RANGE(&container, sizeof(container));
6471 tmp[1]++;
6472 tmp[2]++;
6473 // tmp (which is the old version of container) is destroyed here.
6476 void Worker() {
6477 MutexLock lock(&mu);
6478 container[1]++;
6479 int *v = &container[2];
6480 for (int i = 0; i < 10; i++) {
6481 // if uncommented, this will break ANNOTATE_UNPUBLISH_MEMORY_RANGE():
6482 // ANNOTATE_HAPPENS_BEFORE(v);
6483 if (i % 3) {
6484 (*v)++;
6489 void Run() {
6490 printf("test140: negative (swap) %p\n", &container);
6491 MyThreadArray t(Worker, Worker, Swapper, Worker, Worker);
6492 t.Start();
6493 t.Join();
6495 REGISTER_TEST(Run, 140)
6496 } // namespace test140
6498 // test141 FP. unlink/fopen, rmdir/opendir. {{{1
6499 namespace test141 {
6500 int GLOB1 = 0,
6501 GLOB2 = 0;
6502 char *dir_name = NULL,
6503 *filename = NULL;
6505 void Waker1() {
6506 usleep(100000);
6507 GLOB1 = 1; // Write
6508 // unlink deletes a file 'filename'
6509 // which exits spin-loop in Waiter1().
6510 printf(" Deleting file...\n");
6511 CHECK(unlink(filename) == 0);
6514 void Waiter1() {
6515 FILE *tmp;
6516 while ((tmp = fopen(filename, "r")) != NULL) {
6517 fclose(tmp);
6518 usleep(10000);
6520 printf(" ...file has been deleted\n");
6521 GLOB1 = 2; // Write
6524 void Waker2() {
6525 usleep(100000);
6526 GLOB2 = 1; // Write
6527 // rmdir deletes a directory 'dir_name'
6528 // which exit spin-loop in Waker().
6529 printf(" Deleting directory...\n");
6530 CHECK(rmdir(dir_name) == 0);
6533 void Waiter2() {
6534 DIR *tmp;
6535 while ((tmp = opendir(dir_name)) != NULL) {
6536 closedir(tmp);
6537 usleep(10000);
6539 printf(" ...directory has been deleted\n");
6540 GLOB2 = 2;
6543 void Run() {
6544 FAST_MODE_INIT(&GLOB1);
6545 FAST_MODE_INIT(&GLOB2);
6546 printf("test141: FP. unlink/fopen, rmdir/opendir.\n");
6548 dir_name = strdup("/tmp/tsan-XXXXXX");
6549 IGNORE_RETURN_VALUE(mkdtemp(dir_name));
6551 filename = strdup((std::string() + dir_name + "/XXXXXX").c_str());
6552 const int fd = mkstemp(filename);
6553 CHECK(fd >= 0);
6554 close(fd);
6556 MyThreadArray mta1(Waker1, Waiter1);
6557 mta1.Start();
6558 mta1.Join();
6560 MyThreadArray mta2(Waker2, Waiter2);
6561 mta2.Start();
6562 mta2.Join();
6563 free(filename);
6564 filename = 0;
6565 free(dir_name);
6566 dir_name = 0;
6568 REGISTER_TEST(Run, 141)
6569 } // namespace test141
6572 // Simple FIFO queue annotated with PCQ annotations. {{{1
6573 class FifoMessageQueue {
6574 public:
6575 FifoMessageQueue() { ANNOTATE_PCQ_CREATE(this); }
6576 ~FifoMessageQueue() { ANNOTATE_PCQ_DESTROY(this); }
6577 // Send a message. 'message' should be positive.
6578 void Put(int message) {
6579 CHECK(message);
6580 MutexLock lock(&mu_);
6581 ANNOTATE_PCQ_PUT(this);
6582 q_.push(message);
6584 // Return the message from the queue and pop it
6585 // or return 0 if there are no messages.
6586 int Get() {
6587 MutexLock lock(&mu_);
6588 if (q_.empty()) return 0;
6589 int res = q_.front();
6590 q_.pop();
6591 ANNOTATE_PCQ_GET(this);
6592 return res;
6594 private:
6595 Mutex mu_;
6596 queue<int> q_;
6600 // test142: TN. Check PCQ_* annotations. {{{1
6601 namespace test142 {
6602 // Putter writes to array[i] and sends a message 'i'.
6603 // Getters receive messages and read array[message].
6604 // PCQ_* annotations calm down the hybrid detectors.
6606 const int N = 1000;
6607 int array[N+1];
6609 FifoMessageQueue q;
6611 void Putter() {
6612 for (int i = 1; i <= N; i++) {
6613 array[i] = i*i;
6614 q.Put(i);
6615 usleep(1000);
6619 void Getter() {
6620 int non_zero_received = 0;
6621 for (int i = 1; i <= N; i++) {
6622 int res = q.Get();
6623 if (res > 0) {
6624 CHECK(array[res] = res * res);
6625 non_zero_received++;
6627 usleep(1000);
6629 printf("T=%zd: non_zero_received=%d\n",
6630 (size_t)pthread_self(), non_zero_received);
6633 void Run() {
6634 printf("test142: tests PCQ annotations\n");
6635 MyThreadArray t(Putter, Getter, Getter);
6636 t.Start();
6637 t.Join();
6639 REGISTER_TEST(Run, 142)
6640 } // namespace test142
6643 // test143: TP. Check PCQ_* annotations. {{{1
6644 namespace test143 {
6645 // True positive.
6646 // We have a race on GLOB between Putter and one of the Getters.
6647 // Pure h-b will not see it.
6648 // If FifoMessageQueue was annotated using HAPPENS_BEFORE/AFTER, the race would
6649 // be missed too.
6650 // PCQ_* annotations do not hide this race.
6651 int GLOB = 0;
6653 FifoMessageQueue q;
6655 void Putter() {
6656 GLOB = 1;
6657 q.Put(1);
6660 void Getter() {
6661 usleep(10000);
6662 q.Get();
6663 CHECK(GLOB == 1); // Race here
6666 void Run() {
6667 q.Put(1);
6668 if (!Tsan_PureHappensBefore()) {
6669 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "true races");
6671 printf("test143: tests PCQ annotations (true positive)\n");
6672 MyThreadArray t(Putter, Getter, Getter);
6673 t.Start();
6674 t.Join();
6676 REGISTER_TEST(Run, 143);
6677 } // namespace test143
6682 // test300: {{{1
6683 namespace test300 {
6684 int GLOB = 0;
6685 void Run() {
6687 REGISTER_TEST2(Run, 300, RACE_DEMO)
6688 } // namespace test300
6690 // test301: Simple race. {{{1
6691 namespace test301 {
6692 Mutex mu1; // This Mutex guards var.
6693 Mutex mu2; // This Mutex is not related to var.
6694 int var; // GUARDED_BY(mu1)
6696 void Thread1() { // Runs in thread named 'test-thread-1'.
6697 MutexLock lock(&mu1); // Correct Mutex.
6698 var = 1;
6701 void Thread2() { // Runs in thread named 'test-thread-2'.
6702 MutexLock lock(&mu2); // Wrong Mutex.
6703 var = 2;
6706 void Run() {
6707 var = 0;
6708 printf("test301: simple race.\n");
6709 MyThread t1(Thread1, NULL, "test-thread-1");
6710 MyThread t2(Thread2, NULL, "test-thread-2");
6711 t1.Start();
6712 t2.Start();
6713 t1.Join();
6714 t2.Join();
6716 REGISTER_TEST2(Run, 301, RACE_DEMO)
6717 } // namespace test301
6719 // test302: Complex race which happens at least twice. {{{1
6720 namespace test302 {
6721 // In this test we have many different accesses to GLOB and only one access
6722 // is not synchronized properly.
6723 int GLOB = 0;
6725 Mutex MU1;
6726 Mutex MU2;
6727 void Worker() {
6728 for(int i = 0; i < 100; i++) {
6729 switch(i % 4) {
6730 case 0:
6731 // This read is protected correctly.
6732 MU1.Lock(); CHECK(GLOB >= 0); MU1.Unlock();
6733 break;
6734 case 1:
6735 // Here we used the wrong lock! The reason of the race is here.
6736 MU2.Lock(); CHECK(GLOB >= 0); MU2.Unlock();
6737 break;
6738 case 2:
6739 // This read is protected correctly.
6740 MU1.Lock(); CHECK(GLOB >= 0); MU1.Unlock();
6741 break;
6742 case 3:
6743 // This write is protected correctly.
6744 MU1.Lock(); GLOB++; MU1.Unlock();
6745 break;
6747 // sleep a bit so that the threads interleave
6748 // and the race happens at least twice.
6749 usleep(100);
6753 void Run() {
6754 printf("test302: Complex race that happens twice.\n");
6755 MyThread t1(Worker), t2(Worker);
6756 t1.Start();
6757 t2.Start();
6758 t1.Join(); t2.Join();
6760 REGISTER_TEST2(Run, 302, RACE_DEMO)
6761 } // namespace test302
6764 // test303: Need to trace the memory to understand the report. {{{1
6765 namespace test303 {
6766 int GLOB = 0;
6768 Mutex MU;
6769 void Worker1() { CHECK(GLOB >= 0); }
6770 void Worker2() { MU.Lock(); GLOB=1; MU.Unlock();}
6772 void Run() {
6773 printf("test303: a race that needs annotations.\n");
6774 ANNOTATE_TRACE_MEMORY(&GLOB);
6775 MyThreadArray t(Worker1, Worker2);
6776 t.Start();
6777 t.Join();
6779 REGISTER_TEST2(Run, 303, RACE_DEMO)
6780 } // namespace test303
6784 // test304: Can not trace the memory, since it is a library object. {{{1
6785 namespace test304 {
6786 string *STR;
6787 Mutex MU;
6789 void Worker1() {
6790 sleep(0);
6791 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6792 MU.Lock(); CHECK(STR->length() >= 4); MU.Unlock();
6794 void Worker2() {
6795 sleep(1);
6796 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6797 CHECK(STR->length() >= 4); // Unprotected!
6799 void Worker3() {
6800 sleep(2);
6801 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6802 MU.Lock(); CHECK(STR->length() >= 4); MU.Unlock();
6804 void Worker4() {
6805 sleep(3);
6806 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF);
6807 MU.Lock(); *STR += " + a very very long string"; MU.Unlock();
6810 void Run() {
6811 STR = new string ("The String");
6812 printf("test304: a race where memory tracing does not work.\n");
6813 MyThreadArray t(Worker1, Worker2, Worker3, Worker4);
6814 t.Start();
6815 t.Join();
6817 printf("%s\n", STR->c_str());
6818 delete STR;
6820 REGISTER_TEST2(Run, 304, RACE_DEMO)
6821 } // namespace test304
6825 // test305: A bit more tricky: two locks used inconsistenly. {{{1
6826 namespace test305 {
6827 int GLOB = 0;
6829 // In this test GLOB is protected by MU1 and MU2, but inconsistently.
6830 // The TRACES observed by helgrind are:
6831 // TRACE[1]: Access{T2/S2 wr} -> new State{Mod; #LS=2; #SS=1; T2/S2}
6832 // TRACE[2]: Access{T4/S9 wr} -> new State{Mod; #LS=1; #SS=2; T2/S2, T4/S9}
6833 // TRACE[3]: Access{T5/S13 wr} -> new State{Mod; #LS=1; #SS=3; T2/S2, T4/S9, T5/S13}
6834 // TRACE[4]: Access{T6/S19 wr} -> new State{Mod; #LS=0; #SS=4; T2/S2, T4/S9, T5/S13, T6/S19}
6836 // The guilty access is either Worker2() or Worker4(), depending on
6837 // which mutex is supposed to protect GLOB.
6838 Mutex MU1;
6839 Mutex MU2;
6840 void Worker1() { MU1.Lock(); MU2.Lock(); GLOB=1; MU2.Unlock(); MU1.Unlock(); }
6841 void Worker2() { MU1.Lock(); GLOB=2; MU1.Unlock(); }
6842 void Worker3() { MU1.Lock(); MU2.Lock(); GLOB=3; MU2.Unlock(); MU1.Unlock(); }
6843 void Worker4() { MU2.Lock(); GLOB=4; MU2.Unlock(); }
6845 void Run() {
6846 ANNOTATE_TRACE_MEMORY(&GLOB);
6847 printf("test305: simple race.\n");
6848 MyThread t1(Worker1), t2(Worker2), t3(Worker3), t4(Worker4);
6849 t1.Start(); usleep(100);
6850 t2.Start(); usleep(100);
6851 t3.Start(); usleep(100);
6852 t4.Start(); usleep(100);
6853 t1.Join(); t2.Join(); t3.Join(); t4.Join();
6855 REGISTER_TEST2(Run, 305, RACE_DEMO)
6856 } // namespace test305
6858 // test306: Two locks are used to protect a var. {{{1
6859 namespace test306 {
6860 int GLOB = 0;
6861 // Thread1 and Thread2 access the var under two locks.
6862 // Thread3 uses no locks.
6864 Mutex MU1;
6865 Mutex MU2;
6866 void Worker1() { MU1.Lock(); MU2.Lock(); GLOB=1; MU2.Unlock(); MU1.Unlock(); }
6867 void Worker2() { MU1.Lock(); MU2.Lock(); GLOB=3; MU2.Unlock(); MU1.Unlock(); }
6868 void Worker3() { GLOB=4; }
6870 void Run() {
6871 ANNOTATE_TRACE_MEMORY(&GLOB);
6872 printf("test306: simple race.\n");
6873 MyThread t1(Worker1), t2(Worker2), t3(Worker3);
6874 t1.Start(); usleep(100);
6875 t2.Start(); usleep(100);
6876 t3.Start(); usleep(100);
6877 t1.Join(); t2.Join(); t3.Join();
6879 REGISTER_TEST2(Run, 306, RACE_DEMO)
6880 } // namespace test306
6882 // test307: Simple race, code with control flow {{{1
6883 namespace test307 {
6884 int *GLOB = 0;
6885 volatile /*to fake the compiler*/ bool some_condition = true;
6888 void SomeFunc() { }
6890 int FunctionWithControlFlow() {
6891 int unrelated_stuff = 0;
6892 unrelated_stuff++;
6893 SomeFunc(); // "--keep-history=1" will point somewhere here.
6894 if (some_condition) { // Or here
6895 if (some_condition) {
6896 unrelated_stuff++; // Or here.
6897 unrelated_stuff++;
6898 (*GLOB)++; // "--keep-history=2" will point here (experimental).
6901 usleep(100000);
6902 return unrelated_stuff;
6905 void Worker1() { FunctionWithControlFlow(); }
6906 void Worker2() { Worker1(); }
6907 void Worker3() { Worker2(); }
6908 void Worker4() { Worker3(); }
6910 void Run() {
6911 GLOB = new int;
6912 *GLOB = 1;
6913 printf("test307: simple race, code with control flow\n");
6914 MyThreadArray t1(Worker1, Worker2, Worker3, Worker4);
6915 t1.Start();
6916 t1.Join();
6918 REGISTER_TEST2(Run, 307, RACE_DEMO)
6919 } // namespace test307
6921 // test308: Example of double-checked-locking {{{1
6922 namespace test308 {
6923 struct Foo {
6924 int a;
6927 static int is_inited = 0;
6928 static Mutex lock;
6929 static Foo *foo;
6931 void InitMe() {
6932 if (!is_inited) {
6933 lock.Lock();
6934 if (!is_inited) {
6935 foo = new Foo;
6936 foo->a = 42;
6937 is_inited = 1;
6939 lock.Unlock();
6943 void UseMe() {
6944 InitMe();
6945 CHECK(foo && foo->a == 42);
6948 void Worker1() { UseMe(); }
6949 void Worker2() { UseMe(); }
6950 void Worker3() { UseMe(); }
6953 void Run() {
6954 ANNOTATE_TRACE_MEMORY(&is_inited);
6955 printf("test308: Example of double-checked-locking\n");
6956 MyThreadArray t1(Worker1, Worker2, Worker3);
6957 t1.Start();
6958 t1.Join();
6960 REGISTER_TEST2(Run, 308, RACE_DEMO)
6961 } // namespace test308
6963 // test309: Simple race on an STL object. {{{1
6964 namespace test309 {
6965 string GLOB;
6967 void Worker1() {
6968 GLOB="Thread1";
6970 void Worker2() {
6971 usleep(100000);
6972 GLOB="Booooooooooo";
6975 void Run() {
6976 printf("test309: simple race on an STL object.\n");
6977 MyThread t1(Worker1), t2(Worker2);
6978 t1.Start();
6979 t2.Start();
6980 t1.Join(); t2.Join();
6982 REGISTER_TEST2(Run, 309, RACE_DEMO)
6983 } // namespace test309
6985 // test310: One more simple race. {{{1
6986 namespace test310 {
6987 int *PTR = NULL; // GUARDED_BY(mu1)
6989 Mutex mu1; // Protects PTR.
6990 Mutex mu2; // Unrelated to PTR.
6991 Mutex mu3; // Unrelated to PTR.
6993 void Writer1() {
6994 MutexLock lock3(&mu3); // This lock is unrelated to PTR.
6995 MutexLock lock1(&mu1); // Protect PTR.
6996 *PTR = 1;
6999 void Writer2() {
7000 MutexLock lock2(&mu2); // This lock is unrelated to PTR.
7001 MutexLock lock1(&mu1); // Protect PTR.
7002 int some_unrelated_stuff = 0;
7003 if (some_unrelated_stuff == 0)
7004 some_unrelated_stuff++;
7005 *PTR = 2;
7009 void Reader() {
7010 MutexLock lock2(&mu2); // Oh, gosh, this is a wrong mutex!
7011 CHECK(*PTR <= 2);
7014 // Some functions to make the stack trace non-trivial.
7015 void DoWrite1() { Writer1(); }
7016 void Thread1() { DoWrite1(); }
7018 void DoWrite2() { Writer2(); }
7019 void Thread2() { DoWrite2(); }
7021 void DoRead() { Reader(); }
7022 void Thread3() { DoRead(); }
7024 void Run() {
7025 printf("test310: simple race.\n");
7026 PTR = new int;
7027 ANNOTATE_TRACE_MEMORY(PTR);
7028 *PTR = 0;
7029 MyThread t1(Thread1, NULL, "writer1"),
7030 t2(Thread2, NULL, "writer2"),
7031 t3(Thread3, NULL, "buggy reader");
7032 t1.Start();
7033 t2.Start();
7034 usleep(100000); // Let the writers go first.
7035 t3.Start();
7037 t1.Join();
7038 t2.Join();
7039 t3.Join();
7041 REGISTER_TEST2(Run, 310, RACE_DEMO)
7042 } // namespace test310
7044 // test311: Yet another simple race. {{{1
7045 namespace test311 {
7046 int *PTR = NULL; // GUARDED_BY(mu1)
7048 Mutex mu1; // Protects PTR.
7049 Mutex mu2; // Unrelated to PTR.
7050 Mutex mu3; // Unrelated to PTR.
7052 void GoodWriter1() {
7053 MutexLock lock3(&mu3); // This lock is unrelated to PTR.
7054 MutexLock lock1(&mu1); // Protect PTR.
7055 *PTR = 1;
7058 void GoodWriter2() {
7059 MutexLock lock2(&mu2); // This lock is unrelated to PTR.
7060 MutexLock lock1(&mu1); // Protect PTR.
7061 *PTR = 2;
7064 void GoodReader() {
7065 MutexLock lock1(&mu1); // Protect PTR.
7066 CHECK(*PTR >= 0);
7069 void BuggyWriter() {
7070 MutexLock lock2(&mu2); // Wrong mutex!
7071 *PTR = 3;
7074 // Some functions to make the stack trace non-trivial.
7075 void DoWrite1() { GoodWriter1(); }
7076 void Thread1() { DoWrite1(); }
7078 void DoWrite2() { GoodWriter2(); }
7079 void Thread2() { DoWrite2(); }
7081 void DoGoodRead() { GoodReader(); }
7082 void Thread3() { DoGoodRead(); }
7084 void DoBadWrite() { BuggyWriter(); }
7085 void Thread4() { DoBadWrite(); }
7087 void Run() {
7088 printf("test311: simple race.\n");
7089 PTR = new int;
7090 ANNOTATE_TRACE_MEMORY(PTR);
7091 *PTR = 0;
7092 MyThread t1(Thread1, NULL, "good writer1"),
7093 t2(Thread2, NULL, "good writer2"),
7094 t3(Thread3, NULL, "good reader"),
7095 t4(Thread4, NULL, "buggy writer");
7096 t1.Start();
7097 t3.Start();
7098 // t2 goes after t3. This way a pure happens-before detector has no chance.
7099 usleep(10000);
7100 t2.Start();
7101 usleep(100000); // Let the good folks go first.
7102 t4.Start();
7104 t1.Join();
7105 t2.Join();
7106 t3.Join();
7107 t4.Join();
7109 REGISTER_TEST2(Run, 311, RACE_DEMO)
7110 } // namespace test311
7112 // test312: A test with a very deep stack. {{{1
7113 namespace test312 {
7114 int GLOB = 0;
7115 void RaceyWrite() { GLOB++; }
7116 void Func1() { RaceyWrite(); }
7117 void Func2() { Func1(); }
7118 void Func3() { Func2(); }
7119 void Func4() { Func3(); }
7120 void Func5() { Func4(); }
7121 void Func6() { Func5(); }
7122 void Func7() { Func6(); }
7123 void Func8() { Func7(); }
7124 void Func9() { Func8(); }
7125 void Func10() { Func9(); }
7126 void Func11() { Func10(); }
7127 void Func12() { Func11(); }
7128 void Func13() { Func12(); }
7129 void Func14() { Func13(); }
7130 void Func15() { Func14(); }
7131 void Func16() { Func15(); }
7132 void Func17() { Func16(); }
7133 void Func18() { Func17(); }
7134 void Func19() { Func18(); }
7135 void Worker() { Func19(); }
7136 void Run() {
7137 printf("test312: simple race with deep stack.\n");
7138 MyThreadArray t(Worker, Worker, Worker);
7139 t.Start();
7140 t.Join();
7142 REGISTER_TEST2(Run, 312, RACE_DEMO)
7143 } // namespace test312
7145 // test313 TP: test for thread graph output {{{1
7146 namespace test313 {
7147 BlockingCounter *blocking_counter;
7148 int GLOB = 0;
7150 // Worker(N) will do 2^N increments of GLOB, each increment in a separate thread
7151 void Worker(long depth) {
7152 CHECK(depth >= 0);
7153 if (depth > 0) {
7154 ThreadPool pool(2);
7155 pool.StartWorkers();
7156 pool.Add(NewCallback(Worker, depth-1));
7157 pool.Add(NewCallback(Worker, depth-1));
7158 } else {
7159 GLOB++; // Race here
7162 void Run() {
7163 printf("test313: positive\n");
7164 Worker(4);
7165 printf("\tGLOB=%d\n", GLOB);
7167 REGISTER_TEST2(Run, 313, RACE_DEMO)
7168 } // namespace test313
7172 // test400: Demo of a simple false positive. {{{1
7173 namespace test400 {
7174 static Mutex mu;
7175 static vector<int> *vec; // GUARDED_BY(mu);
7177 void InitAllBeforeStartingThreads() {
7178 vec = new vector<int>;
7179 vec->push_back(1);
7180 vec->push_back(2);
7183 void Thread1() {
7184 MutexLock lock(&mu);
7185 vec->pop_back();
7188 void Thread2() {
7189 MutexLock lock(&mu);
7190 vec->pop_back();
7193 //---- Sub-optimal code ---------
7194 size_t NumberOfElementsLeft() {
7195 MutexLock lock(&mu);
7196 return vec->size();
7199 void WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly() {
7200 while(NumberOfElementsLeft()) {
7201 ; // sleep or print or do nothing.
7203 // It is now safe to access vec w/o lock.
7204 // But a hybrid detector (like ThreadSanitizer) can't see it.
7205 // Solutions:
7206 // 1. Use pure happens-before detector (e.g. "tsan --pure-happens-before")
7207 // 2. Call ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu)
7208 // in InitAllBeforeStartingThreads()
7209 // 3. (preferred) Use WaitForAllThreadsToFinish_Good() (see below).
7210 CHECK(vec->empty());
7211 delete vec;
7214 //----- Better code -----------
7216 bool NoElementsLeft(vector<int> *v) {
7217 return v->empty();
7220 void WaitForAllThreadsToFinish_Good() {
7221 mu.LockWhen(Condition(NoElementsLeft, vec));
7222 mu.Unlock();
7224 // It is now safe to access vec w/o lock.
7225 CHECK(vec->empty());
7226 delete vec;
7230 void Run() {
7231 MyThreadArray t(Thread1, Thread2);
7232 InitAllBeforeStartingThreads();
7233 t.Start();
7234 WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly();
7235 // WaitForAllThreadsToFinish_Good();
7236 t.Join();
7238 REGISTER_TEST2(Run, 400, RACE_DEMO)
7239 } // namespace test400
7241 // test401: Demo of false positive caused by reference counting. {{{1
7242 namespace test401 {
7243 // A simplified example of reference counting.
7244 // DecRef() does ref count increment in a way unfriendly to race detectors.
7245 // DecRefAnnotated() does the same in a friendly way.
7247 static vector<int> *vec;
7248 static int ref_count;
7250 void InitAllBeforeStartingThreads(int number_of_threads) {
7251 vec = new vector<int>;
7252 vec->push_back(1);
7253 ref_count = number_of_threads;
7256 // Correct, but unfriendly to race detectors.
7257 int DecRef() {
7258 return AtomicIncrement(&ref_count, -1);
7261 // Correct and friendly to race detectors.
7262 int DecRefAnnotated() {
7263 ANNOTATE_CONDVAR_SIGNAL(&ref_count);
7264 int res = AtomicIncrement(&ref_count, -1);
7265 if (res == 0) {
7266 ANNOTATE_CONDVAR_WAIT(&ref_count);
7268 return res;
7271 void ThreadWorker() {
7272 CHECK(ref_count > 0);
7273 CHECK(vec->size() == 1);
7274 if (DecRef() == 0) { // Use DecRefAnnotated() instead!
7275 // No one uses vec now ==> delete it.
7276 delete vec; // A false race may be reported here.
7277 vec = NULL;
7281 void Run() {
7282 MyThreadArray t(ThreadWorker, ThreadWorker, ThreadWorker);
7283 InitAllBeforeStartingThreads(3 /*number of threads*/);
7284 t.Start();
7285 t.Join();
7286 CHECK(vec == 0);
7288 REGISTER_TEST2(Run, 401, RACE_DEMO)
7289 } // namespace test401
7291 // test501: Manually call PRINT_* annotations {{{1
7292 namespace test501 {
7293 int COUNTER = 0;
7294 int GLOB = 0;
7295 Mutex muCounter, muGlob[65];
7297 void Worker() {
7298 muCounter.Lock();
7299 int myId = ++COUNTER;
7300 muCounter.Unlock();
7302 usleep(100);
7304 muGlob[myId].Lock();
7305 muGlob[0].Lock();
7306 GLOB++;
7307 muGlob[0].Unlock();
7308 muGlob[myId].Unlock();
7311 void Worker_1() {
7312 MyThreadArray ta (Worker, Worker, Worker, Worker);
7313 ta.Start();
7314 usleep(500000);
7315 ta.Join ();
7318 void Worker_2() {
7319 MyThreadArray ta (Worker_1, Worker_1, Worker_1, Worker_1);
7320 ta.Start();
7321 usleep(300000);
7322 ta.Join ();
7325 void Run() {
7326 ANNOTATE_RESET_STATS();
7327 printf("test501: Manually call PRINT_* annotations.\n");
7328 MyThreadArray ta (Worker_2, Worker_2, Worker_2, Worker_2);
7329 ta.Start();
7330 usleep(100000);
7331 ta.Join ();
7332 ANNOTATE_PRINT_MEMORY_USAGE(0);
7333 ANNOTATE_PRINT_STATS();
7336 REGISTER_TEST2(Run, 501, FEATURE | EXCLUDE_FROM_ALL)
7337 } // namespace test501
7339 // test502: produce lots of segments without cross-thread relations {{{1
7340 namespace test502 {
7343 * This test produces ~1Gb of memory usage when run with the following options:
7345 * --tool=helgrind
7346 * --trace-after-race=0
7347 * --num-callers=2
7348 * --more-context=no
7351 Mutex MU;
7352 int GLOB = 0;
7354 void TP() {
7355 for (int i = 0; i < 750000; i++) {
7356 MU.Lock();
7357 GLOB++;
7358 MU.Unlock();
7362 void Run() {
7363 MyThreadArray t(TP, TP);
7364 printf("test502: produce lots of segments without cross-thread relations\n");
7366 t.Start();
7367 t.Join();
7370 REGISTER_TEST2(Run, 502, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL
7371 | PERFORMANCE)
7372 } // namespace test502
7374 // test503: produce lots of segments with simple HB-relations {{{1
7375 // HB cache-miss rate is ~55%
7376 namespace test503 {
7378 // |- | | | | |
7379 // | \| | | | |
7380 // | |- | | | |
7381 // | | \| | | |
7382 // | | |- | | |
7383 // | | | \| | |
7384 // | | | |- | |
7385 // | | | | \| |
7386 // | | | | |- |
7387 // | | | | | \|
7388 // | | | | | |----
7389 //->| | | | | |
7390 // |- | | | | |
7391 // | \| | | | |
7392 // ...
7394 const int N_threads = 32;
7395 const int ARRAY_SIZE = 128;
7396 int GLOB[ARRAY_SIZE];
7397 ProducerConsumerQueue *Q[N_threads];
7398 int GLOB_limit = 100000;
7399 int count = -1;
7401 void Worker(){
7402 int myId = AtomicIncrement(&count, 1);
7404 ProducerConsumerQueue &myQ = *Q[myId], &nextQ = *Q[(myId+1) % N_threads];
7406 // this code produces a new SS with each new segment
7407 while (myQ.Get() != NULL) {
7408 for (int i = 0; i < ARRAY_SIZE; i++)
7409 GLOB[i]++;
7411 if (myId == 0 && GLOB[0] > GLOB_limit) {
7412 // Stop all threads
7413 for (int i = 0; i < N_threads; i++)
7414 Q[i]->Put(NULL);
7415 } else
7416 nextQ.Put(GLOB);
7420 void Run() {
7421 printf("test503: produce lots of segments with simple HB-relations\n");
7422 for (int i = 0; i < N_threads; i++)
7423 Q[i] = new ProducerConsumerQueue(1);
7424 Q[0]->Put(GLOB);
7427 ThreadPool pool(N_threads);
7428 pool.StartWorkers();
7429 for (int i = 0; i < N_threads; i++) {
7430 pool.Add(NewCallback(Worker));
7432 } // all folks are joined here.
7434 for (int i = 0; i < N_threads; i++)
7435 delete Q[i];
7438 REGISTER_TEST2(Run, 503, MEMORY_USAGE | PRINT_STATS
7439 | PERFORMANCE | EXCLUDE_FROM_ALL)
7440 } // namespace test503
7442 // test504: force massive cache fetch-wback (50% misses, mostly CacheLineZ) {{{1
7443 namespace test504 {
7445 const int N_THREADS = 2,
7446 HG_CACHELINE_COUNT = 1 << 16,
7447 HG_CACHELINE_SIZE = 1 << 6,
7448 HG_CACHE_SIZE = HG_CACHELINE_COUNT * HG_CACHELINE_SIZE;
7450 // int gives us ~4x speed of the byte test
7451 // 4x array size gives us
7452 // total multiplier of 16x over the cachesize
7453 // so we can neglect the cached-at-the-end memory
7454 const int ARRAY_SIZE = 4 * HG_CACHE_SIZE,
7455 ITERATIONS = 30;
7456 int array[ARRAY_SIZE];
7458 int count = 0;
7459 Mutex count_mu;
7461 void Worker() {
7462 count_mu.Lock();
7463 int myId = ++count;
7464 count_mu.Unlock();
7466 // all threads write to different memory locations,
7467 // so no synchronization mechanisms are needed
7468 int lower_bound = ARRAY_SIZE * (myId-1) / N_THREADS,
7469 upper_bound = ARRAY_SIZE * ( myId ) / N_THREADS;
7470 for (int j = 0; j < ITERATIONS; j++)
7471 for (int i = lower_bound; i < upper_bound;
7472 i += HG_CACHELINE_SIZE / sizeof(array[0])) {
7473 array[i] = i; // each array-write generates a cache miss
7477 void Run() {
7478 printf("test504: force massive CacheLineZ fetch-wback\n");
7479 MyThreadArray t(Worker, Worker);
7480 t.Start();
7481 t.Join();
7484 REGISTER_TEST2(Run, 504, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL)
7485 } // namespace test504
7487 // test505: force massive cache fetch-wback (60% misses) {{{1
7488 // modification of test504 - more threads, byte accesses and lots of mutexes
7489 // so it produces lots of CacheLineF misses (30-50% of CacheLineZ misses)
7490 namespace test505 {
7492 const int N_THREADS = 2,
7493 HG_CACHELINE_COUNT = 1 << 16,
7494 HG_CACHELINE_SIZE = 1 << 6,
7495 HG_CACHE_SIZE = HG_CACHELINE_COUNT * HG_CACHELINE_SIZE;
7497 const int ARRAY_SIZE = 4 * HG_CACHE_SIZE,
7498 ITERATIONS = 3;
7499 int64_t array[ARRAY_SIZE];
7501 int count = 0;
7502 Mutex count_mu;
7504 void Worker() {
7505 const int N_MUTEXES = 5;
7506 Mutex mu[N_MUTEXES];
7507 count_mu.Lock();
7508 int myId = ++count;
7509 count_mu.Unlock();
7511 // all threads write to different memory locations,
7512 // so no synchronization mechanisms are needed
7513 int lower_bound = ARRAY_SIZE * (myId-1) / N_THREADS,
7514 upper_bound = ARRAY_SIZE * ( myId ) / N_THREADS;
7515 for (int j = 0; j < ITERATIONS; j++)
7516 for (int mutex_id = 0; mutex_id < N_MUTEXES; mutex_id++) {
7517 Mutex *m = & mu[mutex_id];
7518 m->Lock();
7519 for (int i = lower_bound + mutex_id, cnt = 0;
7520 i < upper_bound;
7521 i += HG_CACHELINE_SIZE / sizeof(array[0]), cnt++) {
7522 array[i] = i; // each array-write generates a cache miss
7524 m->Unlock();
7528 void Run() {
7529 printf("test505: force massive CacheLineF fetch-wback\n");
7530 MyThreadArray t(Worker, Worker);
7531 t.Start();
7532 t.Join();
7535 REGISTER_TEST2(Run, 505, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL)
7536 } // namespace test505
7538 // test506: massive HB's using Barriers {{{1
7539 // HB cache miss is ~40%
7540 // segments consume 10x more memory than SSs
7541 // modification of test39
7542 namespace test506 {
7543 #ifndef NO_BARRIER
7544 // Same as test17 but uses Barrier class (pthread_barrier_t).
7545 int GLOB = 0;
7546 const int N_threads = 64,
7547 ITERATIONS = 1000;
7548 Barrier *barrier[ITERATIONS];
7549 Mutex MU;
7551 void Worker() {
7552 for (int i = 0; i < ITERATIONS; i++) {
7553 MU.Lock();
7554 GLOB++;
7555 MU.Unlock();
7556 barrier[i]->Block();
7559 void Run() {
7560 printf("test506: massive HB's using Barriers\n");
7561 for (int i = 0; i < ITERATIONS; i++) {
7562 barrier[i] = new Barrier(N_threads);
7565 ThreadPool pool(N_threads);
7566 pool.StartWorkers();
7567 for (int i = 0; i < N_threads; i++) {
7568 pool.Add(NewCallback(Worker));
7570 } // all folks are joined here.
7571 CHECK(GLOB == N_threads * ITERATIONS);
7572 for (int i = 0; i < ITERATIONS; i++) {
7573 delete barrier[i];
7576 REGISTER_TEST2(Run, 506, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL);
7577 #endif // NO_BARRIER
7578 } // namespace test506
7580 // test507: vgHelgrind_initIterAtFM/stackClear benchmark {{{1
7581 // vgHelgrind_initIterAtFM/stackClear consume ~8.5%/5.5% CPU
7582 namespace test507 {
7583 const int N_THREADS = 1,
7584 BUFFER_SIZE = 1,
7585 ITERATIONS = 1 << 20;
7587 void Foo() {
7588 struct T {
7589 char temp;
7590 T() {
7591 ANNOTATE_RWLOCK_CREATE(&temp);
7593 ~T() {
7594 ANNOTATE_RWLOCK_DESTROY(&temp);
7596 } s[BUFFER_SIZE];
7597 s->temp = '\0';
7600 void Worker() {
7601 for (int j = 0; j < ITERATIONS; j++) {
7602 Foo();
7606 void Run() {
7607 printf("test507: vgHelgrind_initIterAtFM/stackClear benchmark\n");
7609 ThreadPool pool(N_THREADS);
7610 pool.StartWorkers();
7611 for (int i = 0; i < N_THREADS; i++) {
7612 pool.Add(NewCallback(Worker));
7614 } // all folks are joined here.
7616 REGISTER_TEST2(Run, 507, EXCLUDE_FROM_ALL);
7617 } // namespace test507
7619 // test508: cmp_WordVecs_for_FM benchmark {{{1
7620 // 50+% of CPU consumption by cmp_WordVecs_for_FM
7621 namespace test508 {
7622 const int N_THREADS = 1,
7623 BUFFER_SIZE = 1 << 10,
7624 ITERATIONS = 1 << 9;
7626 void Foo() {
7627 struct T {
7628 char temp;
7629 T() {
7630 ANNOTATE_RWLOCK_CREATE(&temp);
7632 ~T() {
7633 ANNOTATE_RWLOCK_DESTROY(&temp);
7635 } s[BUFFER_SIZE];
7636 s->temp = '\0';
7639 void Worker() {
7640 for (int j = 0; j < ITERATIONS; j++) {
7641 Foo();
7645 void Run() {
7646 printf("test508: cmp_WordVecs_for_FM benchmark\n");
7648 ThreadPool pool(N_THREADS);
7649 pool.StartWorkers();
7650 for (int i = 0; i < N_THREADS; i++) {
7651 pool.Add(NewCallback(Worker));
7653 } // all folks are joined here.
7655 REGISTER_TEST2(Run, 508, EXCLUDE_FROM_ALL);
7656 } // namespace test508
7658 // test509: avl_find_node benchmark {{{1
7659 // 10+% of CPU consumption by avl_find_node
7660 namespace test509 {
7661 const int N_THREADS = 16,
7662 ITERATIONS = 1 << 8;
7664 void Worker() {
7665 std::vector<Mutex*> mu_list;
7666 for (int i = 0; i < ITERATIONS; i++) {
7667 Mutex * mu = new Mutex();
7668 mu_list.push_back(mu);
7669 mu->Lock();
7671 for (int i = ITERATIONS - 1; i >= 0; i--) {
7672 Mutex * mu = mu_list[i];
7673 mu->Unlock();
7674 delete mu;
7678 void Run() {
7679 printf("test509: avl_find_node benchmark\n");
7681 ThreadPool pool(N_THREADS);
7682 pool.StartWorkers();
7683 for (int i = 0; i < N_THREADS; i++) {
7684 pool.Add(NewCallback(Worker));
7686 } // all folks are joined here.
7688 REGISTER_TEST2(Run, 509, EXCLUDE_FROM_ALL);
7689 } // namespace test509
7691 // test510: SS-recycle test {{{1
7692 // this tests shows the case where only ~1% of SS are recycled
7693 namespace test510 {
7694 const int N_THREADS = 16,
7695 ITERATIONS = 1 << 10;
7696 int GLOB = 0;
7698 void Worker() {
7699 usleep(100000);
7700 for (int i = 0; i < ITERATIONS; i++) {
7701 ANNOTATE_CONDVAR_SIGNAL((void*)0xDeadBeef);
7702 GLOB++;
7703 usleep(10);
7707 void Run() {
7708 //ANNOTATE_BENIGN_RACE(&GLOB, "Test");
7709 printf("test510: SS-recycle test\n");
7711 ThreadPool pool(N_THREADS);
7712 pool.StartWorkers();
7713 for (int i = 0; i < N_THREADS; i++) {
7714 pool.Add(NewCallback(Worker));
7716 } // all folks are joined here.
7718 REGISTER_TEST2(Run, 510, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL);
7719 } // namespace test510
7721 // test511: Segment refcounting test ('1' refcounting) {{{1
7722 namespace test511 {
7723 int GLOB = 0;
7725 void Run () {
7726 for (int i = 0; i < 300; i++) {
7727 ANNOTATE_CONDVAR_SIGNAL(&GLOB);
7728 usleep(1000);
7729 GLOB++;
7730 ANNOTATE_CONDVAR_WAIT(&GLOB);
7731 if (i % 100 == 0)
7732 ANNOTATE_PRINT_MEMORY_USAGE(0);
7735 REGISTER_TEST2(Run, 511, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL);
7736 } // namespace test511
7738 // test512: Segment refcounting test ('S' refcounting) {{{1
7739 namespace test512 {
7740 int GLOB = 0;
7741 sem_t SEM;
7743 void Run () {
7744 sem_init(&SEM, 0, 0);
7745 for (int i = 0; i < 300; i++) {
7746 sem_post(&SEM);
7747 usleep(1000);
7748 GLOB++;
7749 sem_wait(&SEM);
7750 /*if (i % 100 == 0)
7751 ANNOTATE_PRINT_MEMORY_USAGE(0);*/
7753 sem_destroy(&SEM);
7755 REGISTER_TEST2(Run, 512, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL);
7756 } // namespace test512
7758 // test513: --fast-mode benchmark {{{1
7759 namespace test513 {
7761 const int N_THREADS = 2,
7762 HG_CACHELINE_SIZE = 1 << 6,
7763 ARRAY_SIZE = HG_CACHELINE_SIZE * 512,
7764 MUTEX_ID_BITS = 8;
7765 // MUTEX_ID_MASK = (1 << MUTEX_ID_BITS) - 1;
7767 // Each thread has its own cacheline and tackles with it intensively
7768 const int ITERATIONS = 1024;
7769 int array[N_THREADS][ARRAY_SIZE];
7771 int count = 0;
7772 Mutex count_mu;
7773 Mutex mutex_arr[N_THREADS][MUTEX_ID_BITS];
7775 void Worker() {
7776 count_mu.Lock();
7777 int myId = count++;
7778 count_mu.Unlock();
7780 // all threads write to different memory locations
7781 for (int j = 0; j < ITERATIONS; j++) {
7782 int mutex_mask = j & MUTEX_ID_BITS;
7783 for (int m = 0; m < MUTEX_ID_BITS; m++)
7784 if (mutex_mask & (1 << m))
7785 mutex_arr[myId][m].Lock();
7787 for (int i = 0; i < ARRAY_SIZE; i++) {
7788 array[myId][i] = i;
7791 for (int m = 0; m < MUTEX_ID_BITS; m++)
7792 if (mutex_mask & (1 << m))
7793 mutex_arr[myId][m].Unlock();
7797 void Run() {
7798 printf("test513: --fast-mode benchmark\n");
7800 ThreadPool pool(N_THREADS);
7801 pool.StartWorkers();
7802 for (int i = 0; i < N_THREADS; i++) {
7803 pool.Add(NewCallback(Worker));
7805 } // all folks are joined here.
7808 REGISTER_TEST2(Run, 513, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL)
7809 } // namespace test513
7811 // End {{{1
7812 // vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=marker