2 This file is part of Valgrind, a dynamic binary instrumentation
5 Copyright (C) 2008-2008 Google Inc
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2 of the
11 License, or (at your option) any later version.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
23 The GNU General Public License is contained in the file COPYING.
26 // Author: Konstantin Serebryany <opensource@google.com>
28 // Here we define few simple classes that wrap pthread primitives.
30 // We need this to create unit tests for helgrind (or similar tool)
31 // that will work with different threading frameworks.
33 // If one needs to test helgrind's support for another threading library,
34 // he/she can create a copy of this file and replace pthread_ calls
35 // with appropriate calls to his/her library.
37 // Note, that some of the methods defined here are annotated with
38 // ANNOTATE_* macros defined in dynamic_annotations.h.
40 // DISCLAIMER: the classes defined in this header file
41 // are NOT intended for general use -- only for unit tests.
44 #ifndef THREAD_WRAPPERS_PTHREAD_H
45 #define THREAD_WRAPPERS_PTHREAD_H
48 #include <semaphore.h>
52 #include <limits.h> // INT_MAX
55 #include <libkern/OSAtomic.h>
66 #include "../../drd/drd.h"
67 #define ANNOTATE_NO_OP(arg) do { } while(0)
68 #define ANNOTATE_EXPECT_RACE(addr, descr) \
69 ANNOTATE_BENIGN_RACE_SIZED(addr, 4, "expected race")
70 static inline bool RunningOnValgrind() { return RUNNING_ON_VALGRIND
; }
74 # error "Pleeease, do not define NDEBUG"
78 /// Set this to true if malloc() uses mutex on your platform as this may
79 /// introduce a happens-before arc for a pure happens-before race detector.
80 const bool kMallocUsesMutex
= false;
82 /// Current time in milliseconds.
83 static inline int64_t GetCurrentTimeMillis() {
85 gettimeofday(&now
, NULL
);
86 return now
.tv_sec
* 1000 + now
.tv_usec
/ 1000;
89 /// Copy tv to ts adding offset in milliseconds.
90 static inline void timeval2timespec(timeval
*const tv
,
92 int64_t offset_milli
) {
93 const int64_t ten_9
= 1000000000LL;
94 const int64_t ten_6
= 1000000LL;
95 const int64_t ten_3
= 1000LL;
96 int64_t now_nsec
= (int64_t)tv
->tv_sec
* ten_9
;
97 now_nsec
+= (int64_t)tv
->tv_usec
* ten_3
;
98 int64_t then_nsec
= now_nsec
+ offset_milli
* ten_6
;
99 ts
->tv_sec
= then_nsec
/ ten_9
;
100 ts
->tv_nsec
= then_nsec
% ten_9
;
107 /// helgrind does not (yet) support spin locks, so we annotate them.
113 CHECK(0 == pthread_spin_init(&mu_
, 0));
114 ANNOTATE_RWLOCK_CREATE((void*)&mu_
);
117 ANNOTATE_RWLOCK_DESTROY((void*)&mu_
);
118 CHECK(0 == pthread_spin_destroy(&mu_
));
121 CHECK(0 == pthread_spin_lock(&mu_
));
122 ANNOTATE_RWLOCK_ACQUIRED((void*)&mu_
, 1);
125 ANNOTATE_RWLOCK_RELEASED((void*)&mu_
, 1);
126 CHECK(0 == pthread_spin_unlock(&mu_
));
129 pthread_spinlock_t mu_
;
137 SpinLock() : mu_(OS_SPINLOCK_INIT
) {
138 ANNOTATE_RWLOCK_CREATE((void*)&mu_
);
141 ANNOTATE_RWLOCK_DESTROY((void*)&mu_
);
144 OSSpinLockLock(&mu_
);
145 ANNOTATE_RWLOCK_ACQUIRED((void*)&mu_
, 1);
148 ANNOTATE_RWLOCK_RELEASED((void*)&mu_
, 1);
149 OSSpinLockUnlock(&mu_
);
156 #endif // NO_SPINLOCK
158 /// Just a boolean condition. Used by Mutex::LockWhen and similar.
159 template <typename T
>
162 typedef bool (*func_t
)(void*);
164 Condition(bool (*func
)(T
*), T
* arg
)
165 : func1_(func
), arg_(arg
) {}
167 Condition(bool (*func
)())
168 : func0_(func
), arg_(NULL
) {}
170 bool Eval() const { return func1_
? func1_(arg_
) : func0_(); }
179 /// Wrapper for pthread_mutex_t.
181 /// pthread_mutex_t is *not* a reader-writer lock,
182 /// so the methods like ReaderLock() aren't really reader locks.
183 /// We can not use pthread_rwlock_t because it
184 /// does not work with pthread_cond_t.
186 /// TODO: We still need to test reader locks with this class.
187 /// Implement a mode where pthread_rwlock_t will be used
188 /// instead of pthread_mutex_t (only when not used with CondVar or LockWhen).
191 friend class CondVar
;
194 CHECK(0 == pthread_mutex_init(&mu_
, NULL
));
195 CHECK(0 == pthread_cond_init(&cv_
, NULL
));
196 signal_at_unlock_
= true; // Always signal at Unlock to make
197 // Mutex more friendly to hybrid detectors.
200 CHECK(0 == pthread_cond_destroy(&cv_
));
201 CHECK(0 == pthread_mutex_destroy(&mu_
));
203 void Lock() { CHECK(0 == pthread_mutex_lock(&mu_
));}
204 bool TryLock() { return (0 == pthread_mutex_trylock(&mu_
));}
206 if (signal_at_unlock_
) {
207 CHECK(0 == pthread_cond_signal(&cv_
));
209 CHECK(0 == pthread_mutex_unlock(&mu_
));
211 void ReaderLock() { Lock(); }
212 bool ReaderTryLock() { return TryLock();}
213 void ReaderUnlock() { Unlock(); }
215 template <typename T
>
216 void LockWhen(const Condition
<T
>& cond
) { Lock(); WaitLoop(cond
); }
217 template <typename T
>
218 void ReaderLockWhen(const Condition
<T
>& cond
) { Lock(); WaitLoop(cond
); }
219 template <typename T
>
220 void Await(const Condition
<T
>& cond
) { WaitLoop(cond
); }
222 template <typename T
>
223 bool ReaderLockWhenWithTimeout(const Condition
<T
>& cond
, int millis
)
224 { Lock(); return WaitLoopWithTimeout(cond
, millis
); }
225 template <typename T
>
226 bool LockWhenWithTimeout(const Condition
<T
>& cond
, int millis
)
227 { Lock(); return WaitLoopWithTimeout(cond
, millis
); }
228 template <typename T
>
229 bool AwaitWithTimeout(const Condition
<T
>& cond
, int millis
)
230 { return WaitLoopWithTimeout(cond
, millis
); }
234 template <typename T
>
235 void WaitLoop(const Condition
<T
>& cond
) {
236 signal_at_unlock_
= true;
237 while(cond
.Eval() == false) {
238 pthread_cond_wait(&cv_
, &mu_
);
240 ANNOTATE_CONDVAR_LOCK_WAIT(&cv_
, &mu_
);
243 template <typename T
>
244 bool WaitLoopWithTimeout(const Condition
<T
>& cond
, int millis
) {
246 struct timespec timeout
;
248 gettimeofday(&now
, NULL
);
249 timeval2timespec(&now
, &timeout
, millis
);
251 signal_at_unlock_
= true;
252 while (cond
.Eval() == false && retcode
== 0) {
253 retcode
= pthread_cond_timedwait(&cv_
, &mu_
, &timeout
);
256 ANNOTATE_CONDVAR_LOCK_WAIT(&cv_
, &mu_
);
261 // A hack. cv_ should be the first data member so that
262 // ANNOTATE_CONDVAR_WAIT(&MU, &MU) and ANNOTATE_CONDVAR_SIGNAL(&MU) works.
263 // (See also racecheck_unittest.cc)
266 bool signal_at_unlock_
; // Set to true if Wait was called.
270 class MutexLock
{ // Scoped Mutex Locker/Unlocker
284 /// Wrapper for pthread_cond_t.
287 CondVar() { CHECK(0 == pthread_cond_init(&cv_
, NULL
)); }
288 ~CondVar() { CHECK(0 == pthread_cond_destroy(&cv_
)); }
289 void Wait(Mutex
*mu
) { CHECK(0 == pthread_cond_wait(&cv_
, &mu
->mu_
)); }
290 bool WaitWithTimeout(Mutex
*mu
, int millis
) {
292 struct timespec timeout
;
293 gettimeofday(&now
, NULL
);
294 timeval2timespec(&now
, &timeout
, millis
);
295 return 0 != pthread_cond_timedwait(&cv_
, &mu
->mu_
, &timeout
);
297 void Signal() { CHECK(0 == pthread_cond_signal(&cv_
)); }
298 void SignalAll() { CHECK(0 == pthread_cond_broadcast(&cv_
)); }
304 // pthreads do not allow to use condvar with rwlock so we can't make
305 // ReaderLock method of Mutex to be the real rw-lock.
306 // So, we need a special lock class to test reader locks.
307 #define NEEDS_SEPERATE_RW_LOCK
310 RWLock() { CHECK(0 == pthread_rwlock_init(&mu_
, NULL
)); }
311 ~RWLock() { CHECK(0 == pthread_rwlock_destroy(&mu_
)); }
312 void Lock() { CHECK(0 == pthread_rwlock_wrlock(&mu_
)); }
313 void ReaderLock() { CHECK(0 == pthread_rwlock_rdlock(&mu_
)); }
314 void Unlock() { CHECK(0 == pthread_rwlock_unlock(&mu_
)); }
315 void ReaderUnlock() { CHECK(0 == pthread_rwlock_unlock(&mu_
)); }
317 pthread_cond_t dummy
; // Damn, this requires some redesign...
318 pthread_rwlock_t mu_
;
321 class ReaderLockScoped
{ // Scoped RWLock Locker/Unlocker
323 ReaderLockScoped(RWLock
*mu
)
327 ~ReaderLockScoped() {
334 class WriterLockScoped
{ // Scoped RWLock Locker/Unlocker
336 WriterLockScoped(RWLock
*mu
)
340 ~WriterLockScoped() {
350 /// Wrapper for pthread_create()/pthread_join().
353 MyThread(void* (*worker
)(void *), void *arg
= NULL
, const char *name
= NULL
)
354 :wpvpv_(worker
), wvv_(), wvpv_(), arg_(arg
), name_(name
) {}
355 MyThread(void (*worker
)(void), void *arg
= NULL
, const char *name
= NULL
)
356 :wpvpv_(), wvv_(worker
), wvpv_(), arg_(arg
), name_(name
) {}
357 MyThread(void (*worker
)(void *), void *arg
= NULL
, const char *name
= NULL
)
358 :wpvpv_(), wvv_(), wvpv_(worker
), arg_(arg
), name_(name
) {}
360 void Start() { CHECK(0 == pthread_create(&t_
, NULL
, ThreadBody
, this));}
361 void Join() { CHECK(0 == pthread_join(t_
, NULL
));}
362 pthread_t
tid() const { return t_
; }
364 static void *ThreadBody(void *arg
) {
365 MyThread
*my_thread
= reinterpret_cast<MyThread
*>(arg
);
366 if (my_thread
->name_
) {
367 ANNOTATE_THREAD_NAME(my_thread
->name_
);
369 if (my_thread
->wpvpv_
)
370 return my_thread
->wpvpv_(my_thread
->arg_
);
371 if (my_thread
->wvpv_
)
372 my_thread
->wvpv_(my_thread
->arg_
);
378 void *(*wpvpv_
)(void*);
380 void (*wvpv_
)(void*);
386 /// Just a message queue.
387 class ProducerConsumerQueue
{
389 ProducerConsumerQueue(int unused
) {
390 //ANNOTATE_PCQ_CREATE(this);
392 ~ProducerConsumerQueue() {
394 //ANNOTATE_PCQ_DESTROY(this);
398 void Put(void *item
) {
401 ANNOTATE_CONDVAR_SIGNAL(&mu_
); // LockWhen in Get()
402 //ANNOTATE_PCQ_PUT(this);
407 // Blocks if the queue is empty.
409 mu_
.LockWhen(Condition
<typeof(q_
)>(IsQueueNotEmpty
, &q_
));
411 bool ok
= TryGetInternal(&item
);
417 // If queue is not empty,
418 // remove an element from queue, put it into *res and return true.
419 // Otherwise return false.
420 bool TryGet(void **res
) {
422 bool ok
= TryGetInternal(res
);
429 std::queue
<void*> q_
; // protected by mu_
432 bool TryGetInternal(void ** item_ptr
) {
435 *item_ptr
= q_
.front();
437 //ANNOTATE_PCQ_GET(this);
441 static bool IsQueueNotEmpty(std::queue
<void*> * queue
) {
442 return !queue
->empty();
448 /// Function pointer with zero, one or two parameters.
450 typedef void (*F0
)();
451 typedef void (*F1
)(void *arg1
);
452 typedef void (*F2
)(void *arg1
, void *arg2
);
461 } else if (n_params
== 1) {
464 CHECK(n_params
== 2);
465 (F2(f
))(param1
, param2
);
471 Closure
*NewCallback(void (*f
)()) {
472 Closure
*res
= new Closure
;
481 Closure
*NewCallback(void (*f
)(P1
), P1 p1
) {
482 CHECK(sizeof(P1
) <= sizeof(void*));
483 Closure
*res
= new Closure
;
486 res
->param1
= (void*)p1
;
491 template <class T
, class P1
, class P2
>
492 Closure
*NewCallback(void (*f
)(P1
, P2
), P1 p1
, P2 p2
) {
493 CHECK(sizeof(P1
) <= sizeof(void*));
494 Closure
*res
= new Closure
;
497 res
->param1
= (void*)p1
;
498 res
->param2
= (void*)p2
;
502 /*! A thread pool that uses ProducerConsumerQueue.
505 ThreadPool pool(n_workers);
507 pool.Add(NewCallback(func_with_no_args));
508 pool.Add(NewCallback(func_with_one_arg, arg));
509 pool.Add(NewCallback(func_with_two_args, arg1, arg2));
510 ... // more calls to pool.Add()
512 // the ~ThreadPool() is called: we wait workers to finish
513 // and then join all threads in the pool.
518 //! Create n_threads threads, but do not start.
519 explicit ThreadPool(int n_threads
)
521 for (int i
= 0; i
< n_threads
; i
++) {
522 MyThread
*thread
= new MyThread(&ThreadPool::Worker
, this);
523 workers_
.push_back(thread
);
527 //! Start all threads.
528 void StartWorkers() {
529 for (size_t i
= 0; i
< workers_
.size(); i
++) {
530 workers_
[i
]->Start();
535 void Add(Closure
*closure
) {
539 int num_threads() { return workers_
.size();}
541 //! Wait workers to finish, then join all threads.
543 for (size_t i
= 0; i
< workers_
.size(); i
++) {
546 for (size_t i
= 0; i
< workers_
.size(); i
++) {
552 std::vector
<MyThread
*> workers_
;
553 ProducerConsumerQueue queue_
;
555 static void *Worker(void *p
) {
556 ThreadPool
*pool
= reinterpret_cast<ThreadPool
*>(p
);
558 Closure
*closure
= reinterpret_cast<Closure
*>(pool
->queue_
.Get());
559 if(closure
== NULL
) {
568 /// Wrapper for pthread_barrier_t.
571 explicit Barrier(int n_threads
) {CHECK(0 == pthread_barrier_init(&b_
, 0, n_threads
));}
572 ~Barrier() {CHECK(0 == pthread_barrier_destroy(&b_
));}
574 // helgrind 3.3.0 does not have an interceptor for barrier.
575 // but our current local version does.
576 // ANNOTATE_CONDVAR_SIGNAL(this);
577 pthread_barrier_wait(&b_
);
578 // ANNOTATE_CONDVAR_WAIT(this, this);
581 pthread_barrier_t b_
;
586 class BlockingCounter
{
588 explicit BlockingCounter(int initial_count
) :
589 count_(initial_count
) {}
590 bool DecrementCount() {
591 MutexLock
lock(&mu_
);
596 mu_
.LockWhen(Condition
<int>(&IsZero
, &count_
));
600 static bool IsZero(int *arg
) { return *arg
== 0; }
605 int AtomicIncrement(volatile int *value
, int increment
);
608 inline int AtomicIncrement(volatile int *value
, int increment
) {
609 return __sync_add_and_fetch(value
, increment
);
614 inline int AtomicIncrement(volatile int *value
, int increment
) {
615 return OSAtomicAdd32(increment
, value
);
618 // TODO(timurrrr) this is a hack
619 #define memalign(A,B) malloc(B)
621 // TODO(timurrrr) this is a hack
622 int posix_memalign(void **out
, size_t al
, size_t size
) {
623 *out
= memalign(al
, size
);
628 #endif // THREAD_WRAPPERS_PTHREAD_H
629 // vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=marker