1 //===-- tsan_test_util_posix.cpp ------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 // Test utils, Linux, FreeBSD, NetBSD and Darwin implementation.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "tsan_interface.h"
16 #include "tsan_posix_util.h"
18 #include "tsan_test_util.h"
19 #include "tsan_report.h"
29 #define CALLERPC (__builtin_return_address(0))
31 static __thread
bool expect_report
;
32 static __thread
bool expect_report_reported
;
33 static __thread
__tsan::ReportType expect_report_type
;
35 void ThreadSanitizer::TearDown() {
36 __tsan::ctx
->racy_stacks
.Reset();
39 static void *BeforeInitThread(void *param
) {
44 static void AtExit() {
47 void TestMutexBeforeInit() {
48 // Mutexes must be usable before __tsan_init();
49 pthread_mutex_t mtx
= PTHREAD_MUTEX_INITIALIZER
;
50 __interceptor_pthread_mutex_lock(&mtx
);
51 __interceptor_pthread_mutex_unlock(&mtx
);
52 __interceptor_pthread_mutex_destroy(&mtx
);
54 __interceptor_pthread_create(&thr
, 0, BeforeInitThread
, 0);
55 __interceptor_pthread_join(thr
, 0);
60 bool OnReport(const ReportDesc
*rep
, bool suppressed
) {
62 if (rep
->typ
!= expect_report_type
) {
63 printf("Expected report of type %d, got type %d\n",
64 (int)expect_report_type
, (int)rep
->typ
);
65 EXPECT_TRUE(false) << "Wrong report type";
69 EXPECT_TRUE(false) << "Unexpected report";
72 expect_report_reported
= true;
77 static void* allocate_addr(int size
, int offset_from_aligned
= 0) {
79 static __tsan::atomic_uintptr_t uniq
= {(uintptr_t)&foo
}; // Some real address.
80 const int kAlign
= 16;
81 CHECK(offset_from_aligned
< kAlign
);
82 size
= (size
+ 2 * kAlign
) & ~(kAlign
- 1);
83 uintptr_t addr
= atomic_fetch_add(&uniq
, size
, __tsan::memory_order_relaxed
);
84 return (void*)(addr
+ offset_from_aligned
);
87 MemLoc::MemLoc(int offset_from_aligned
)
88 : loc_(allocate_addr(16, offset_from_aligned
)) {
94 UserMutex::UserMutex(Type type
) : alive_(), type_(type
) {}
96 UserMutex::~UserMutex() { CHECK(!alive_
); }
98 void UserMutex::Init() {
102 CHECK_EQ(__interceptor_pthread_mutex_init((pthread_mutex_t
*)mtx_
, 0), 0);
104 else if (type_
== Spin
)
105 CHECK_EQ(pthread_spin_init((pthread_spinlock_t
*)mtx_
, 0), 0);
107 else if (type_
== RW
)
108 CHECK_EQ(__interceptor_pthread_rwlock_init((pthread_rwlock_t
*)mtx_
, 0), 0);
113 void UserMutex::StaticInit() {
115 CHECK(type_
== Normal
);
117 pthread_mutex_t tmp
= PTHREAD_MUTEX_INITIALIZER
;
118 memcpy(mtx_
, &tmp
, sizeof(tmp
));
121 void UserMutex::Destroy() {
125 CHECK_EQ(__interceptor_pthread_mutex_destroy((pthread_mutex_t
*)mtx_
), 0);
127 else if (type_
== Spin
)
128 CHECK_EQ(pthread_spin_destroy((pthread_spinlock_t
*)mtx_
), 0);
130 else if (type_
== RW
)
131 CHECK_EQ(__interceptor_pthread_rwlock_destroy((pthread_rwlock_t
*)mtx_
), 0);
134 void UserMutex::Lock() {
137 CHECK_EQ(__interceptor_pthread_mutex_lock((pthread_mutex_t
*)mtx_
), 0);
139 else if (type_
== Spin
)
140 CHECK_EQ(pthread_spin_lock((pthread_spinlock_t
*)mtx_
), 0);
142 else if (type_
== RW
)
143 CHECK_EQ(__interceptor_pthread_rwlock_wrlock((pthread_rwlock_t
*)mtx_
), 0);
146 bool UserMutex::TryLock() {
149 return __interceptor_pthread_mutex_trylock((pthread_mutex_t
*)mtx_
) == 0;
151 else if (type_
== Spin
)
152 return pthread_spin_trylock((pthread_spinlock_t
*)mtx_
) == 0;
154 else if (type_
== RW
)
155 return __interceptor_pthread_rwlock_trywrlock((pthread_rwlock_t
*)mtx_
) == 0;
159 void UserMutex::Unlock() {
162 CHECK_EQ(__interceptor_pthread_mutex_unlock((pthread_mutex_t
*)mtx_
), 0);
164 else if (type_
== Spin
)
165 CHECK_EQ(pthread_spin_unlock((pthread_spinlock_t
*)mtx_
), 0);
167 else if (type_
== RW
)
168 CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t
*)mtx_
), 0);
171 void UserMutex::ReadLock() {
174 CHECK_EQ(__interceptor_pthread_rwlock_rdlock((pthread_rwlock_t
*)mtx_
), 0);
177 bool UserMutex::TryReadLock() {
180 return __interceptor_pthread_rwlock_tryrdlock((pthread_rwlock_t
*)mtx_
) == 0;
183 void UserMutex::ReadUnlock() {
186 CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t
*)mtx_
), 0);
214 __tsan::ReportType report_type
;
216 explicit Event(Type type
, const void *ptr
= 0, uptr arg
= 0, uptr arg2
= 0)
218 ptr(const_cast<void *>(ptr
)),
225 void ExpectReport(__tsan::ReportType type
) {
226 expect_report
= true;
231 struct ScopedThread::Impl
{
235 __tsan::atomic_uintptr_t event
; // Event*
237 static void *ScopedThreadCallback(void *arg
);
238 void send(Event
*ev
);
239 void HandleEvent(Event
*ev
);
242 void ScopedThread::Impl::HandleEvent(Event
*ev
) {
243 CHECK_EQ(expect_report
, false);
244 expect_report
= ev
->expect_report
;
245 expect_report_reported
= false;
246 expect_report_type
= ev
->report_type
;
250 void (*tsan_mop
)(void *addr
, void *pc
) = 0;
251 if (ev
->type
== Event::READ
) {
252 switch (ev
->arg
/*size*/) {
254 tsan_mop
= __tsan_read1_pc
;
257 tsan_mop
= __tsan_read2_pc
;
260 tsan_mop
= __tsan_read4_pc
;
263 tsan_mop
= __tsan_read8_pc
;
266 tsan_mop
= __tsan_read16_pc
;
270 switch (ev
->arg
/*size*/) {
272 tsan_mop
= __tsan_write1_pc
;
275 tsan_mop
= __tsan_write2_pc
;
278 tsan_mop
= __tsan_write4_pc
;
281 tsan_mop
= __tsan_write8_pc
;
284 tsan_mop
= __tsan_write16_pc
;
288 CHECK_NE(tsan_mop
, 0);
289 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__NetBSD__)
290 const int ErrCode
= ESOCKTNOSUPPORT
;
292 const int ErrCode
= ECHRNG
;
295 tsan_mop(ev
->ptr
, (void *)ev
->arg2
);
296 CHECK_EQ(ErrCode
, errno
); // In no case must errno be changed.
299 case Event::VPTR_UPDATE
:
300 __tsan_vptr_update((void**)ev
->ptr
, (void*)ev
->arg
);
303 __tsan_func_entry((void*)((uptr
)ev
->ptr
));
308 case Event::MUTEX_CREATE
:
309 static_cast<UserMutex
*>(ev
->ptr
)->Init();
311 case Event::MUTEX_DESTROY
:
312 static_cast<UserMutex
*>(ev
->ptr
)->Destroy();
314 case Event::MUTEX_LOCK
:
315 static_cast<UserMutex
*>(ev
->ptr
)->Lock();
317 case Event::MUTEX_TRYLOCK
:
318 ev
->res
= static_cast<UserMutex
*>(ev
->ptr
)->TryLock();
320 case Event::MUTEX_UNLOCK
:
321 static_cast<UserMutex
*>(ev
->ptr
)->Unlock();
323 case Event::MUTEX_READLOCK
:
324 static_cast<UserMutex
*>(ev
->ptr
)->ReadLock();
326 case Event::MUTEX_TRYREADLOCK
:
327 ev
->res
= static_cast<UserMutex
*>(ev
->ptr
)->TryReadLock();
329 case Event::MUTEX_READUNLOCK
:
330 static_cast<UserMutex
*>(ev
->ptr
)->ReadUnlock();
333 __interceptor_memcpy(ev
->ptr
, (void*)ev
->arg
, ev
->arg2
);
336 __interceptor_memset(ev
->ptr
, ev
->arg
, ev
->arg2
);
340 if (expect_report
&& !expect_report_reported
) {
341 printf("Missed expected report of type %d\n", (int)ev
->report_type
);
342 EXPECT_TRUE(false) << "Missed expected race";
344 expect_report
= false;
347 void *ScopedThread::Impl::ScopedThreadCallback(void *arg
) {
348 __tsan_func_entry(CALLERPC
);
349 Impl
*impl
= (Impl
*)arg
;
352 (Event
*)atomic_load(&impl
->event
, __tsan::memory_order_acquire
);
357 if (ev
->type
== Event::SHUTDOWN
) {
358 atomic_store(&impl
->event
, 0, __tsan::memory_order_release
);
361 impl
->HandleEvent(ev
);
362 atomic_store(&impl
->event
, 0, __tsan::memory_order_release
);
368 void ScopedThread::Impl::send(Event
*e
) {
372 CHECK_EQ(atomic_load(&event
, __tsan::memory_order_relaxed
), 0);
373 atomic_store(&event
, (uintptr_t)e
, __tsan::memory_order_release
);
374 while (atomic_load(&event
, __tsan::memory_order_acquire
) != 0)
379 ScopedThread::ScopedThread(bool detached
, bool main
) {
382 impl_
->detached
= detached
;
383 atomic_store(&impl_
->event
, 0, __tsan::memory_order_relaxed
);
386 pthread_attr_init(&attr
);
387 pthread_attr_setdetachstate(
388 &attr
, detached
? PTHREAD_CREATE_DETACHED
: PTHREAD_CREATE_JOINABLE
);
389 pthread_attr_setstacksize(&attr
, 64*1024);
390 __interceptor_pthread_create(&impl_
->thread
, &attr
,
391 ScopedThread::Impl::ScopedThreadCallback
, impl_
);
395 ScopedThread::~ScopedThread() {
397 Event
event(Event::SHUTDOWN
);
399 if (!impl_
->detached
)
400 __interceptor_pthread_join(impl_
->thread
, 0);
405 void ScopedThread::Detach() {
407 CHECK(!impl_
->detached
);
408 impl_
->detached
= true;
409 __interceptor_pthread_detach(impl_
->thread
);
412 void ScopedThread::Access(void *addr
, bool is_write
,
413 int size
, bool expect_race
) {
414 Event
event(is_write
? Event::WRITE
: Event::READ
, addr
, size
,
417 event
.ExpectReport(__tsan::ReportTypeRace
);
421 void ScopedThread::VptrUpdate(const MemLoc
&vptr
,
422 const MemLoc
&new_val
,
424 Event
event(Event::VPTR_UPDATE
, vptr
.loc(), (uptr
)new_val
.loc());
426 event
.ExpectReport(__tsan::ReportTypeRace
);
430 void ScopedThread::Call(void(*pc
)()) {
431 Event
event(Event::CALL
, (void*)((uintptr_t)pc
));
435 void ScopedThread::Return() {
436 Event
event(Event::RETURN
);
440 void ScopedThread::Create(const UserMutex
&m
) {
441 Event
event(Event::MUTEX_CREATE
, &m
);
445 void ScopedThread::Destroy(const UserMutex
&m
) {
446 Event
event(Event::MUTEX_DESTROY
, &m
);
450 void ScopedThread::Lock(const UserMutex
&m
) {
451 Event
event(Event::MUTEX_LOCK
, &m
);
455 bool ScopedThread::TryLock(const UserMutex
&m
) {
456 Event
event(Event::MUTEX_TRYLOCK
, &m
);
461 void ScopedThread::Unlock(const UserMutex
&m
) {
462 Event
event(Event::MUTEX_UNLOCK
, &m
);
466 void ScopedThread::ReadLock(const UserMutex
&m
) {
467 Event
event(Event::MUTEX_READLOCK
, &m
);
471 bool ScopedThread::TryReadLock(const UserMutex
&m
) {
472 Event
event(Event::MUTEX_TRYREADLOCK
, &m
);
477 void ScopedThread::ReadUnlock(const UserMutex
&m
) {
478 Event
event(Event::MUTEX_READUNLOCK
, &m
);
482 void ScopedThread::Memcpy(void *dst
, const void *src
, int size
,
484 Event
event(Event::MEMCPY
, dst
, (uptr
)src
, size
);
486 event
.ExpectReport(__tsan::ReportTypeRace
);
490 void ScopedThread::Memset(void *dst
, int val
, int size
,
492 Event
event(Event::MEMSET
, dst
, val
, size
);
494 event
.ExpectReport(__tsan::ReportTypeRace
);