drd/tests/tsan_unittest: Avoid that this test reads from uninitialized memory
[valgrind.git] / drd / tests / tsan_thread_wrappers_pthread.h
blob45485ea85dd8df7e0ca4613130f1ef114fc09621
1 /*
2 This file is part of Valgrind, a dynamic binary instrumentation
3 framework.
5 Copyright (C) 2008-2008 Google Inc
6 opensource@google.com
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2 of the
11 License, or (at your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 02111-1307, USA.
23 The GNU General Public License is contained in the file COPYING.
26 // Author: Konstantin Serebryany <opensource@google.com>
28 // Here we define few simple classes that wrap pthread primitives.
30 // We need this to create unit tests for helgrind (or similar tool)
31 // that will work with different threading frameworks.
33 // If one needs to test helgrind's support for another threading library,
34 // he/she can create a copy of this file and replace pthread_ calls
35 // with appropriate calls to his/her library.
37 // Note, that some of the methods defined here are annotated with
38 // ANNOTATE_* macros defined in dynamic_annotations.h.
40 // DISCLAIMER: the classes defined in this header file
41 // are NOT intended for general use -- only for unit tests.
44 #ifndef THREAD_WRAPPERS_PTHREAD_H
45 #define THREAD_WRAPPERS_PTHREAD_H
47 #include <pthread.h>
48 #include <semaphore.h>
49 #include <unistd.h>
50 #include <queue>
51 #include <stdio.h>
52 #include <limits.h> // INT_MAX
54 #ifdef VGO_darwin
55 #include <libkern/OSAtomic.h>
56 #define NO_BARRIER
57 #define NO_TLS
58 #endif
60 #include <string>
61 using namespace std;
63 #include <sys/time.h>
64 #include <time.h>
66 #include "../../drd/drd.h"
67 #define ANNOTATE_NO_OP(arg) do { } while(0)
68 #define ANNOTATE_EXPECT_RACE(addr, descr) \
69 ANNOTATE_BENIGN_RACE_SIZED(addr, 4, "expected race")
70 static inline bool RunningOnValgrind() { return RUNNING_ON_VALGRIND; }
72 #include <assert.h>
73 #ifdef NDEBUG
74 # error "Pleeease, do not define NDEBUG"
75 #endif
76 #define CHECK assert
78 /// Set this to true if malloc() uses mutex on your platform as this may
79 /// introduce a happens-before arc for a pure happens-before race detector.
80 const bool kMallocUsesMutex = false;
82 /// Current time in milliseconds.
83 static inline int64_t GetCurrentTimeMillis() {
84 struct timeval now;
85 gettimeofday(&now, NULL);
86 return now.tv_sec * 1000 + now.tv_usec / 1000;
89 /// Copy tv to ts adding offset in milliseconds.
90 static inline void timeval2timespec(timeval *const tv,
91 timespec *ts,
92 int64_t offset_milli) {
93 const int64_t ten_9 = 1000000000LL;
94 const int64_t ten_6 = 1000000LL;
95 const int64_t ten_3 = 1000LL;
96 int64_t now_nsec = (int64_t)tv->tv_sec * ten_9;
97 now_nsec += (int64_t)tv->tv_usec * ten_3;
98 int64_t then_nsec = now_nsec + offset_milli * ten_6;
99 ts->tv_sec = then_nsec / ten_9;
100 ts->tv_nsec = then_nsec % ten_9;
104 class CondVar;
106 #ifndef NO_SPINLOCK
107 /// helgrind does not (yet) support spin locks, so we annotate them.
109 #ifndef VGO_darwin
110 class SpinLock {
111 public:
112 SpinLock() {
113 CHECK(0 == pthread_spin_init(&mu_, 0));
114 ANNOTATE_RWLOCK_CREATE((void*)&mu_);
116 ~SpinLock() {
117 ANNOTATE_RWLOCK_DESTROY((void*)&mu_);
118 CHECK(0 == pthread_spin_destroy(&mu_));
120 void Lock() {
121 CHECK(0 == pthread_spin_lock(&mu_));
122 ANNOTATE_RWLOCK_ACQUIRED((void*)&mu_, 1);
124 void Unlock() {
125 ANNOTATE_RWLOCK_RELEASED((void*)&mu_, 1);
126 CHECK(0 == pthread_spin_unlock(&mu_));
128 private:
129 pthread_spinlock_t mu_;
132 #else
134 class SpinLock {
135 public:
136 // Mac OS X version.
137 SpinLock() : mu_(OS_SPINLOCK_INIT) {
138 ANNOTATE_RWLOCK_CREATE((void*)&mu_);
140 ~SpinLock() {
141 ANNOTATE_RWLOCK_DESTROY((void*)&mu_);
143 void Lock() {
144 OSSpinLockLock(&mu_);
145 ANNOTATE_RWLOCK_ACQUIRED((void*)&mu_, 1);
147 void Unlock() {
148 ANNOTATE_RWLOCK_RELEASED((void*)&mu_, 1);
149 OSSpinLockUnlock(&mu_);
151 private:
152 OSSpinLock mu_;
154 #endif // VGO_darwin
156 #endif // NO_SPINLOCK
158 /// Just a boolean condition. Used by Mutex::LockWhen and similar.
159 template <typename T>
160 class Condition {
161 public:
162 typedef bool (*func_t)(void*);
164 Condition(bool (*func)(T*), T* arg)
165 : func1_(func), arg_(arg) {}
167 Condition(bool (*func)())
168 : func0_(func), arg_(NULL) {}
170 bool Eval() const { return func1_ ? func1_(arg_) : func0_(); }
172 private:
173 bool (*func0_)();
174 bool (*func1_)(T*);
175 T *arg_;
179 /// Wrapper for pthread_mutex_t.
181 /// pthread_mutex_t is *not* a reader-writer lock,
182 /// so the methods like ReaderLock() aren't really reader locks.
183 /// We can not use pthread_rwlock_t because it
184 /// does not work with pthread_cond_t.
186 /// TODO: We still need to test reader locks with this class.
187 /// Implement a mode where pthread_rwlock_t will be used
188 /// instead of pthread_mutex_t (only when not used with CondVar or LockWhen).
190 class Mutex {
191 friend class CondVar;
192 public:
193 Mutex() {
194 CHECK(0 == pthread_mutex_init(&mu_, NULL));
195 CHECK(0 == pthread_cond_init(&cv_, NULL));
196 signal_at_unlock_ = true; // Always signal at Unlock to make
197 // Mutex more friendly to hybrid detectors.
199 ~Mutex() {
200 CHECK(0 == pthread_cond_destroy(&cv_));
201 CHECK(0 == pthread_mutex_destroy(&mu_));
203 void Lock() { CHECK(0 == pthread_mutex_lock(&mu_));}
204 bool TryLock() { return (0 == pthread_mutex_trylock(&mu_));}
205 void Unlock() {
206 if (signal_at_unlock_) {
207 CHECK(0 == pthread_cond_signal(&cv_));
209 CHECK(0 == pthread_mutex_unlock(&mu_));
211 void ReaderLock() { Lock(); }
212 bool ReaderTryLock() { return TryLock();}
213 void ReaderUnlock() { Unlock(); }
215 template <typename T>
216 void LockWhen(const Condition<T>& cond) { Lock(); WaitLoop(cond); }
217 template <typename T>
218 void ReaderLockWhen(const Condition<T>& cond) { Lock(); WaitLoop(cond); }
219 template <typename T>
220 void Await(const Condition<T>& cond) { WaitLoop(cond); }
222 template <typename T>
223 bool ReaderLockWhenWithTimeout(const Condition<T>& cond, int millis)
224 { Lock(); return WaitLoopWithTimeout(cond, millis); }
225 template <typename T>
226 bool LockWhenWithTimeout(const Condition<T>& cond, int millis)
227 { Lock(); return WaitLoopWithTimeout(cond, millis); }
228 template <typename T>
229 bool AwaitWithTimeout(const Condition<T>& cond, int millis)
230 { return WaitLoopWithTimeout(cond, millis); }
232 private:
234 template <typename T>
235 void WaitLoop(const Condition<T>& cond) {
236 signal_at_unlock_ = true;
237 while(cond.Eval() == false) {
238 pthread_cond_wait(&cv_, &mu_);
240 ANNOTATE_CONDVAR_LOCK_WAIT(&cv_, &mu_);
243 template <typename T>
244 bool WaitLoopWithTimeout(const Condition<T>& cond, int millis) {
245 struct timeval now;
246 struct timespec timeout;
247 int retcode = 0;
248 gettimeofday(&now, NULL);
249 timeval2timespec(&now, &timeout, millis);
251 signal_at_unlock_ = true;
252 while (cond.Eval() == false && retcode == 0) {
253 retcode = pthread_cond_timedwait(&cv_, &mu_, &timeout);
255 if(retcode == 0) {
256 ANNOTATE_CONDVAR_LOCK_WAIT(&cv_, &mu_);
258 return cond.Eval();
261 // A hack. cv_ should be the first data member so that
262 // ANNOTATE_CONDVAR_WAIT(&MU, &MU) and ANNOTATE_CONDVAR_SIGNAL(&MU) works.
263 // (See also racecheck_unittest.cc)
264 pthread_cond_t cv_;
265 pthread_mutex_t mu_;
266 bool signal_at_unlock_; // Set to true if Wait was called.
270 class MutexLock { // Scoped Mutex Locker/Unlocker
271 public:
272 MutexLock(Mutex *mu)
273 : mu_(mu) {
274 mu_->Lock();
276 ~MutexLock() {
277 mu_->Unlock();
279 private:
280 Mutex *mu_;
284 /// Wrapper for pthread_cond_t.
285 class CondVar {
286 public:
287 CondVar() { CHECK(0 == pthread_cond_init(&cv_, NULL)); }
288 ~CondVar() { CHECK(0 == pthread_cond_destroy(&cv_)); }
289 void Wait(Mutex *mu) { CHECK(0 == pthread_cond_wait(&cv_, &mu->mu_)); }
290 bool WaitWithTimeout(Mutex *mu, int millis) {
291 struct timeval now;
292 struct timespec timeout;
293 gettimeofday(&now, NULL);
294 timeval2timespec(&now, &timeout, millis);
295 return 0 != pthread_cond_timedwait(&cv_, &mu->mu_, &timeout);
297 void Signal() { CHECK(0 == pthread_cond_signal(&cv_)); }
298 void SignalAll() { CHECK(0 == pthread_cond_broadcast(&cv_)); }
299 private:
300 pthread_cond_t cv_;
304 // pthreads do not allow to use condvar with rwlock so we can't make
305 // ReaderLock method of Mutex to be the real rw-lock.
306 // So, we need a special lock class to test reader locks.
307 #define NEEDS_SEPERATE_RW_LOCK
308 class RWLock {
309 public:
310 RWLock() { CHECK(0 == pthread_rwlock_init(&mu_, NULL)); }
311 ~RWLock() { CHECK(0 == pthread_rwlock_destroy(&mu_)); }
312 void Lock() { CHECK(0 == pthread_rwlock_wrlock(&mu_)); }
313 void ReaderLock() { CHECK(0 == pthread_rwlock_rdlock(&mu_)); }
314 void Unlock() { CHECK(0 == pthread_rwlock_unlock(&mu_)); }
315 void ReaderUnlock() { CHECK(0 == pthread_rwlock_unlock(&mu_)); }
316 private:
317 pthread_cond_t dummy; // Damn, this requires some redesign...
318 pthread_rwlock_t mu_;
321 class ReaderLockScoped { // Scoped RWLock Locker/Unlocker
322 public:
323 ReaderLockScoped(RWLock *mu)
324 : mu_(mu) {
325 mu_->ReaderLock();
327 ~ReaderLockScoped() {
328 mu_->ReaderUnlock();
330 private:
331 RWLock *mu_;
334 class WriterLockScoped { // Scoped RWLock Locker/Unlocker
335 public:
336 WriterLockScoped(RWLock *mu)
337 : mu_(mu) {
338 mu_->Lock();
340 ~WriterLockScoped() {
341 mu_->Unlock();
343 private:
344 RWLock *mu_;
350 /// Wrapper for pthread_create()/pthread_join().
351 class MyThread {
352 public:
353 MyThread(void* (*worker)(void *), void *arg = NULL, const char *name = NULL)
354 :wpvpv_(worker), wvv_(), wvpv_(), arg_(arg), name_(name) {}
355 MyThread(void (*worker)(void), void *arg = NULL, const char *name = NULL)
356 :wpvpv_(), wvv_(worker), wvpv_(), arg_(arg), name_(name) {}
357 MyThread(void (*worker)(void *), void *arg = NULL, const char *name = NULL)
358 :wpvpv_(), wvv_(), wvpv_(worker), arg_(arg), name_(name) {}
360 void Start() { CHECK(0 == pthread_create(&t_, NULL, ThreadBody, this));}
361 void Join() { CHECK(0 == pthread_join(t_, NULL));}
362 pthread_t tid() const { return t_; }
363 private:
364 static void *ThreadBody(void *arg) {
365 MyThread *my_thread = reinterpret_cast<MyThread*>(arg);
366 if (my_thread->name_) {
367 ANNOTATE_THREAD_NAME(my_thread->name_);
369 if (my_thread->wpvpv_)
370 return my_thread->wpvpv_(my_thread->arg_);
371 if (my_thread->wvpv_)
372 my_thread->wvpv_(my_thread->arg_);
373 if (my_thread->wvv_)
374 my_thread->wvv_();
375 return NULL;
377 pthread_t t_;
378 void *(*wpvpv_)(void*);
379 void (*wvv_)(void);
380 void (*wvpv_)(void*);
381 void *arg_;
382 const char *name_;
386 /// Just a message queue.
387 class ProducerConsumerQueue {
388 public:
389 ProducerConsumerQueue(int unused) {
390 //ANNOTATE_PCQ_CREATE(this);
392 ~ProducerConsumerQueue() {
393 CHECK(q_.empty());
394 //ANNOTATE_PCQ_DESTROY(this);
397 // Put.
398 void Put(void *item) {
399 mu_.Lock();
400 q_.push(item);
401 ANNOTATE_CONDVAR_SIGNAL(&mu_); // LockWhen in Get()
402 //ANNOTATE_PCQ_PUT(this);
403 mu_.Unlock();
406 // Get.
407 // Blocks if the queue is empty.
408 void *Get() {
409 mu_.LockWhen(Condition<typeof(q_)>(IsQueueNotEmpty, &q_));
410 void * item = NULL;
411 bool ok = TryGetInternal(&item);
412 CHECK(ok);
413 mu_.Unlock();
414 return item;
417 // If queue is not empty,
418 // remove an element from queue, put it into *res and return true.
419 // Otherwise return false.
420 bool TryGet(void **res) {
421 mu_.Lock();
422 bool ok = TryGetInternal(res);
423 mu_.Unlock();
424 return ok;
427 private:
428 Mutex mu_;
429 std::queue<void*> q_; // protected by mu_
431 // Requires mu_
432 bool TryGetInternal(void ** item_ptr) {
433 if (q_.empty())
434 return false;
435 *item_ptr = q_.front();
436 q_.pop();
437 //ANNOTATE_PCQ_GET(this);
438 return true;
441 static bool IsQueueNotEmpty(std::queue<void*> * queue) {
442 return !queue->empty();
448 /// Function pointer with zero, one or two parameters.
449 struct Closure {
450 typedef void (*F0)();
451 typedef void (*F1)(void *arg1);
452 typedef void (*F2)(void *arg1, void *arg2);
453 int n_params;
454 void *f;
455 void *param1;
456 void *param2;
458 void Execute() {
459 if (n_params == 0) {
460 (F0(f))();
461 } else if (n_params == 1) {
462 (F1(f))(param1);
463 } else {
464 CHECK(n_params == 2);
465 (F2(f))(param1, param2);
467 delete this;
471 Closure *NewCallback(void (*f)()) {
472 Closure *res = new Closure;
473 res->n_params = 0;
474 res->f = (void*)(f);
475 res->param1 = NULL;
476 res->param2 = NULL;
477 return res;
480 template <class P1>
481 Closure *NewCallback(void (*f)(P1), P1 p1) {
482 CHECK(sizeof(P1) <= sizeof(void*));
483 Closure *res = new Closure;
484 res->n_params = 1;
485 res->f = (void*)(f);
486 res->param1 = (void*)p1;
487 res->param2 = NULL;
488 return res;
491 template <class T, class P1, class P2>
492 Closure *NewCallback(void (*f)(P1, P2), P1 p1, P2 p2) {
493 CHECK(sizeof(P1) <= sizeof(void*));
494 Closure *res = new Closure;
495 res->n_params = 2;
496 res->f = (void*)(f);
497 res->param1 = (void*)p1;
498 res->param2 = (void*)p2;
499 return res;
502 /*! A thread pool that uses ProducerConsumerQueue.
503 Usage:
505 ThreadPool pool(n_workers);
506 pool.StartWorkers();
507 pool.Add(NewCallback(func_with_no_args));
508 pool.Add(NewCallback(func_with_one_arg, arg));
509 pool.Add(NewCallback(func_with_two_args, arg1, arg2));
510 ... // more calls to pool.Add()
512 // the ~ThreadPool() is called: we wait workers to finish
513 // and then join all threads in the pool.
516 class ThreadPool {
517 public:
518 //! Create n_threads threads, but do not start.
519 explicit ThreadPool(int n_threads)
520 : queue_(INT_MAX) {
521 for (int i = 0; i < n_threads; i++) {
522 MyThread *thread = new MyThread(&ThreadPool::Worker, this);
523 workers_.push_back(thread);
527 //! Start all threads.
528 void StartWorkers() {
529 for (size_t i = 0; i < workers_.size(); i++) {
530 workers_[i]->Start();
534 //! Add a closure.
535 void Add(Closure *closure) {
536 queue_.Put(closure);
539 int num_threads() { return workers_.size();}
541 //! Wait workers to finish, then join all threads.
542 ~ThreadPool() {
543 for (size_t i = 0; i < workers_.size(); i++) {
544 Add(NULL);
546 for (size_t i = 0; i < workers_.size(); i++) {
547 workers_[i]->Join();
548 delete workers_[i];
551 private:
552 std::vector<MyThread*> workers_;
553 ProducerConsumerQueue queue_;
555 static void *Worker(void *p) {
556 ThreadPool *pool = reinterpret_cast<ThreadPool*>(p);
557 while (true) {
558 Closure *closure = reinterpret_cast<Closure*>(pool->queue_.Get());
559 if(closure == NULL) {
560 return NULL;
562 closure->Execute();
567 #ifndef NO_BARRIER
568 /// Wrapper for pthread_barrier_t.
569 class Barrier{
570 public:
571 explicit Barrier(int n_threads) {CHECK(0 == pthread_barrier_init(&b_, 0, n_threads));}
572 ~Barrier() {CHECK(0 == pthread_barrier_destroy(&b_));}
573 void Block() {
574 // helgrind 3.3.0 does not have an interceptor for barrier.
575 // but our current local version does.
576 // ANNOTATE_CONDVAR_SIGNAL(this);
577 pthread_barrier_wait(&b_);
578 // ANNOTATE_CONDVAR_WAIT(this, this);
580 private:
581 pthread_barrier_t b_;
584 #endif // NO_BARRIER
586 class BlockingCounter {
587 public:
588 explicit BlockingCounter(int initial_count) :
589 count_(initial_count) {}
590 bool DecrementCount() {
591 MutexLock lock(&mu_);
592 count_--;
593 return count_ == 0;
595 void Wait() {
596 mu_.LockWhen(Condition<int>(&IsZero, &count_));
597 mu_.Unlock();
599 private:
600 static bool IsZero(int *arg) { return *arg == 0; }
601 Mutex mu_;
602 int count_;
605 int AtomicIncrement(volatile int *value, int increment);
607 #ifndef VGO_darwin
608 inline int AtomicIncrement(volatile int *value, int increment) {
609 return __sync_add_and_fetch(value, increment);
612 #else
613 // Mac OS X version.
614 inline int AtomicIncrement(volatile int *value, int increment) {
615 return OSAtomicAdd32(increment, value);
618 // TODO(timurrrr) this is a hack
619 #define memalign(A,B) malloc(B)
621 // TODO(timurrrr) this is a hack
622 int posix_memalign(void **out, size_t al, size_t size) {
623 *out = memalign(al, size);
624 return (*out == 0);
626 #endif // VGO_darwin
628 #endif // THREAD_WRAPPERS_PTHREAD_H
629 // vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=marker