Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / compiler-rt / lib / tsan / tests / rtl / tsan_test_util_posix.cpp
blob8aa0813b11f0dc404af050963639593e57044b7a
1 //===-- tsan_test_util_posix.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 // Test utils, Linux, FreeBSD, NetBSD and Darwin implementation.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "tsan_interface.h"
16 #include "tsan_posix_util.h"
17 #include "tsan_rtl.h"
18 #include "tsan_test_util.h"
19 #include "tsan_report.h"
21 #include <assert.h>
22 #include <pthread.h>
23 #include <stdio.h>
24 #include <stdint.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <errno.h>
29 #define CALLERPC (__builtin_return_address(0))
31 static __thread bool expect_report;
32 static __thread bool expect_report_reported;
33 static __thread __tsan::ReportType expect_report_type;
35 void ThreadSanitizer::TearDown() {
36 __tsan::ctx->racy_stacks.Reset();
39 static void *BeforeInitThread(void *param) {
40 (void)param;
41 return 0;
44 static void AtExit() {
47 void TestMutexBeforeInit() {
48 // Mutexes must be usable before __tsan_init();
49 pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
50 __interceptor_pthread_mutex_lock(&mtx);
51 __interceptor_pthread_mutex_unlock(&mtx);
52 __interceptor_pthread_mutex_destroy(&mtx);
53 pthread_t thr;
54 __interceptor_pthread_create(&thr, 0, BeforeInitThread, 0);
55 __interceptor_pthread_join(thr, 0);
56 atexit(AtExit);
59 namespace __tsan {
60 bool OnReport(const ReportDesc *rep, bool suppressed) {
61 if (expect_report) {
62 if (rep->typ != expect_report_type) {
63 printf("Expected report of type %d, got type %d\n",
64 (int)expect_report_type, (int)rep->typ);
65 EXPECT_TRUE(false) << "Wrong report type";
66 return false;
68 } else {
69 EXPECT_TRUE(false) << "Unexpected report";
70 return false;
72 expect_report_reported = true;
73 return true;
75 } // namespace __tsan
77 static void* allocate_addr(int size, int offset_from_aligned = 0) {
78 static uintptr_t foo;
79 static __tsan::atomic_uintptr_t uniq = {(uintptr_t)&foo}; // Some real address.
80 const int kAlign = 16;
81 CHECK(offset_from_aligned < kAlign);
82 size = (size + 2 * kAlign) & ~(kAlign - 1);
83 uintptr_t addr = atomic_fetch_add(&uniq, size, __tsan::memory_order_relaxed);
84 return (void*)(addr + offset_from_aligned);
87 MemLoc::MemLoc(int offset_from_aligned)
88 : loc_(allocate_addr(16, offset_from_aligned)) {
91 MemLoc::~MemLoc() {
94 UserMutex::UserMutex(Type type) : alive_(), type_(type) {}
96 UserMutex::~UserMutex() { CHECK(!alive_); }
98 void UserMutex::Init() {
99 CHECK(!alive_);
100 alive_ = true;
101 if (type_ == Normal)
102 CHECK_EQ(__interceptor_pthread_mutex_init((pthread_mutex_t*)mtx_, 0), 0);
103 #ifndef __APPLE__
104 else if (type_ == Spin)
105 CHECK_EQ(pthread_spin_init((pthread_spinlock_t*)mtx_, 0), 0);
106 #endif
107 else if (type_ == RW)
108 CHECK_EQ(__interceptor_pthread_rwlock_init((pthread_rwlock_t*)mtx_, 0), 0);
109 else
110 CHECK(0);
113 void UserMutex::StaticInit() {
114 CHECK(!alive_);
115 CHECK(type_ == Normal);
116 alive_ = true;
117 pthread_mutex_t tmp = PTHREAD_MUTEX_INITIALIZER;
118 memcpy(mtx_, &tmp, sizeof(tmp));
121 void UserMutex::Destroy() {
122 CHECK(alive_);
123 alive_ = false;
124 if (type_ == Normal)
125 CHECK_EQ(__interceptor_pthread_mutex_destroy((pthread_mutex_t*)mtx_), 0);
126 #ifndef __APPLE__
127 else if (type_ == Spin)
128 CHECK_EQ(pthread_spin_destroy((pthread_spinlock_t*)mtx_), 0);
129 #endif
130 else if (type_ == RW)
131 CHECK_EQ(__interceptor_pthread_rwlock_destroy((pthread_rwlock_t*)mtx_), 0);
134 void UserMutex::Lock() {
135 CHECK(alive_);
136 if (type_ == Normal)
137 CHECK_EQ(__interceptor_pthread_mutex_lock((pthread_mutex_t*)mtx_), 0);
138 #ifndef __APPLE__
139 else if (type_ == Spin)
140 CHECK_EQ(pthread_spin_lock((pthread_spinlock_t*)mtx_), 0);
141 #endif
142 else if (type_ == RW)
143 CHECK_EQ(__interceptor_pthread_rwlock_wrlock((pthread_rwlock_t*)mtx_), 0);
146 bool UserMutex::TryLock() {
147 CHECK(alive_);
148 if (type_ == Normal)
149 return __interceptor_pthread_mutex_trylock((pthread_mutex_t*)mtx_) == 0;
150 #ifndef __APPLE__
151 else if (type_ == Spin)
152 return pthread_spin_trylock((pthread_spinlock_t*)mtx_) == 0;
153 #endif
154 else if (type_ == RW)
155 return __interceptor_pthread_rwlock_trywrlock((pthread_rwlock_t*)mtx_) == 0;
156 return false;
159 void UserMutex::Unlock() {
160 CHECK(alive_);
161 if (type_ == Normal)
162 CHECK_EQ(__interceptor_pthread_mutex_unlock((pthread_mutex_t*)mtx_), 0);
163 #ifndef __APPLE__
164 else if (type_ == Spin)
165 CHECK_EQ(pthread_spin_unlock((pthread_spinlock_t*)mtx_), 0);
166 #endif
167 else if (type_ == RW)
168 CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
171 void UserMutex::ReadLock() {
172 CHECK(alive_);
173 CHECK(type_ == RW);
174 CHECK_EQ(__interceptor_pthread_rwlock_rdlock((pthread_rwlock_t*)mtx_), 0);
177 bool UserMutex::TryReadLock() {
178 CHECK(alive_);
179 CHECK(type_ == RW);
180 return __interceptor_pthread_rwlock_tryrdlock((pthread_rwlock_t*)mtx_) == 0;
183 void UserMutex::ReadUnlock() {
184 CHECK(alive_);
185 CHECK(type_ == RW);
186 CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
189 struct Event {
190 enum Type {
191 SHUTDOWN,
192 READ,
193 WRITE,
194 VPTR_UPDATE,
195 CALL,
196 RETURN,
197 MUTEX_CREATE,
198 MUTEX_DESTROY,
199 MUTEX_LOCK,
200 MUTEX_TRYLOCK,
201 MUTEX_UNLOCK,
202 MUTEX_READLOCK,
203 MUTEX_TRYREADLOCK,
204 MUTEX_READUNLOCK,
205 MEMCPY,
206 MEMSET
208 Type type;
209 void *ptr;
210 uptr arg;
211 uptr arg2;
212 bool res;
213 bool expect_report;
214 __tsan::ReportType report_type;
216 explicit Event(Type type, const void *ptr = 0, uptr arg = 0, uptr arg2 = 0)
217 : type(type),
218 ptr(const_cast<void *>(ptr)),
219 arg(arg),
220 arg2(arg2),
221 res(),
222 expect_report(),
223 report_type() {}
225 void ExpectReport(__tsan::ReportType type) {
226 expect_report = true;
227 report_type = type;
231 struct ScopedThread::Impl {
232 pthread_t thread;
233 bool main;
234 bool detached;
235 __tsan::atomic_uintptr_t event; // Event*
237 static void *ScopedThreadCallback(void *arg);
238 void send(Event *ev);
239 void HandleEvent(Event *ev);
242 void ScopedThread::Impl::HandleEvent(Event *ev) {
243 CHECK_EQ(expect_report, false);
244 expect_report = ev->expect_report;
245 expect_report_reported = false;
246 expect_report_type = ev->report_type;
247 switch (ev->type) {
248 case Event::READ:
249 case Event::WRITE: {
250 void (*tsan_mop)(void *addr, void *pc) = 0;
251 if (ev->type == Event::READ) {
252 switch (ev->arg /*size*/) {
253 case 1:
254 tsan_mop = __tsan_read1_pc;
255 break;
256 case 2:
257 tsan_mop = __tsan_read2_pc;
258 break;
259 case 4:
260 tsan_mop = __tsan_read4_pc;
261 break;
262 case 8:
263 tsan_mop = __tsan_read8_pc;
264 break;
265 case 16:
266 tsan_mop = __tsan_read16_pc;
267 break;
269 } else {
270 switch (ev->arg /*size*/) {
271 case 1:
272 tsan_mop = __tsan_write1_pc;
273 break;
274 case 2:
275 tsan_mop = __tsan_write2_pc;
276 break;
277 case 4:
278 tsan_mop = __tsan_write4_pc;
279 break;
280 case 8:
281 tsan_mop = __tsan_write8_pc;
282 break;
283 case 16:
284 tsan_mop = __tsan_write16_pc;
285 break;
288 CHECK_NE(tsan_mop, 0);
289 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__NetBSD__)
290 const int ErrCode = ESOCKTNOSUPPORT;
291 #else
292 const int ErrCode = ECHRNG;
293 #endif
294 errno = ErrCode;
295 tsan_mop(ev->ptr, (void *)ev->arg2);
296 CHECK_EQ(ErrCode, errno); // In no case must errno be changed.
297 break;
299 case Event::VPTR_UPDATE:
300 __tsan_vptr_update((void**)ev->ptr, (void*)ev->arg);
301 break;
302 case Event::CALL:
303 __tsan_func_entry((void*)((uptr)ev->ptr));
304 break;
305 case Event::RETURN:
306 __tsan_func_exit();
307 break;
308 case Event::MUTEX_CREATE:
309 static_cast<UserMutex *>(ev->ptr)->Init();
310 break;
311 case Event::MUTEX_DESTROY:
312 static_cast<UserMutex *>(ev->ptr)->Destroy();
313 break;
314 case Event::MUTEX_LOCK:
315 static_cast<UserMutex *>(ev->ptr)->Lock();
316 break;
317 case Event::MUTEX_TRYLOCK:
318 ev->res = static_cast<UserMutex *>(ev->ptr)->TryLock();
319 break;
320 case Event::MUTEX_UNLOCK:
321 static_cast<UserMutex *>(ev->ptr)->Unlock();
322 break;
323 case Event::MUTEX_READLOCK:
324 static_cast<UserMutex *>(ev->ptr)->ReadLock();
325 break;
326 case Event::MUTEX_TRYREADLOCK:
327 ev->res = static_cast<UserMutex *>(ev->ptr)->TryReadLock();
328 break;
329 case Event::MUTEX_READUNLOCK:
330 static_cast<UserMutex *>(ev->ptr)->ReadUnlock();
331 break;
332 case Event::MEMCPY:
333 __interceptor_memcpy(ev->ptr, (void*)ev->arg, ev->arg2);
334 break;
335 case Event::MEMSET:
336 __interceptor_memset(ev->ptr, ev->arg, ev->arg2);
337 break;
338 default: CHECK(0);
340 if (expect_report && !expect_report_reported) {
341 printf("Missed expected report of type %d\n", (int)ev->report_type);
342 EXPECT_TRUE(false) << "Missed expected race";
344 expect_report = false;
347 void *ScopedThread::Impl::ScopedThreadCallback(void *arg) {
348 __tsan_func_entry(CALLERPC);
349 Impl *impl = (Impl*)arg;
350 for (;;) {
351 Event *ev =
352 (Event *)atomic_load(&impl->event, __tsan::memory_order_acquire);
353 if (ev == 0) {
354 sched_yield();
355 continue;
357 if (ev->type == Event::SHUTDOWN) {
358 atomic_store(&impl->event, 0, __tsan::memory_order_release);
359 break;
361 impl->HandleEvent(ev);
362 atomic_store(&impl->event, 0, __tsan::memory_order_release);
364 __tsan_func_exit();
365 return 0;
368 void ScopedThread::Impl::send(Event *e) {
369 if (main) {
370 HandleEvent(e);
371 } else {
372 CHECK_EQ(atomic_load(&event, __tsan::memory_order_relaxed), 0);
373 atomic_store(&event, (uintptr_t)e, __tsan::memory_order_release);
374 while (atomic_load(&event, __tsan::memory_order_acquire) != 0)
375 sched_yield();
379 ScopedThread::ScopedThread(bool detached, bool main) {
380 impl_ = new Impl;
381 impl_->main = main;
382 impl_->detached = detached;
383 atomic_store(&impl_->event, 0, __tsan::memory_order_relaxed);
384 if (!main) {
385 pthread_attr_t attr;
386 pthread_attr_init(&attr);
387 pthread_attr_setdetachstate(
388 &attr, detached ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE);
389 pthread_attr_setstacksize(&attr, 64*1024);
390 __interceptor_pthread_create(&impl_->thread, &attr,
391 ScopedThread::Impl::ScopedThreadCallback, impl_);
395 ScopedThread::~ScopedThread() {
396 if (!impl_->main) {
397 Event event(Event::SHUTDOWN);
398 impl_->send(&event);
399 if (!impl_->detached)
400 __interceptor_pthread_join(impl_->thread, 0);
402 delete impl_;
405 void ScopedThread::Detach() {
406 CHECK(!impl_->main);
407 CHECK(!impl_->detached);
408 impl_->detached = true;
409 __interceptor_pthread_detach(impl_->thread);
412 void ScopedThread::Access(void *addr, bool is_write,
413 int size, bool expect_race) {
414 Event event(is_write ? Event::WRITE : Event::READ, addr, size,
415 (uptr)CALLERPC);
416 if (expect_race)
417 event.ExpectReport(__tsan::ReportTypeRace);
418 impl_->send(&event);
421 void ScopedThread::VptrUpdate(const MemLoc &vptr,
422 const MemLoc &new_val,
423 bool expect_race) {
424 Event event(Event::VPTR_UPDATE, vptr.loc(), (uptr)new_val.loc());
425 if (expect_race)
426 event.ExpectReport(__tsan::ReportTypeRace);
427 impl_->send(&event);
430 void ScopedThread::Call(void(*pc)()) {
431 Event event(Event::CALL, (void*)((uintptr_t)pc));
432 impl_->send(&event);
435 void ScopedThread::Return() {
436 Event event(Event::RETURN);
437 impl_->send(&event);
440 void ScopedThread::Create(const UserMutex &m) {
441 Event event(Event::MUTEX_CREATE, &m);
442 impl_->send(&event);
445 void ScopedThread::Destroy(const UserMutex &m) {
446 Event event(Event::MUTEX_DESTROY, &m);
447 impl_->send(&event);
450 void ScopedThread::Lock(const UserMutex &m) {
451 Event event(Event::MUTEX_LOCK, &m);
452 impl_->send(&event);
455 bool ScopedThread::TryLock(const UserMutex &m) {
456 Event event(Event::MUTEX_TRYLOCK, &m);
457 impl_->send(&event);
458 return event.res;
461 void ScopedThread::Unlock(const UserMutex &m) {
462 Event event(Event::MUTEX_UNLOCK, &m);
463 impl_->send(&event);
466 void ScopedThread::ReadLock(const UserMutex &m) {
467 Event event(Event::MUTEX_READLOCK, &m);
468 impl_->send(&event);
471 bool ScopedThread::TryReadLock(const UserMutex &m) {
472 Event event(Event::MUTEX_TRYREADLOCK, &m);
473 impl_->send(&event);
474 return event.res;
477 void ScopedThread::ReadUnlock(const UserMutex &m) {
478 Event event(Event::MUTEX_READUNLOCK, &m);
479 impl_->send(&event);
482 void ScopedThread::Memcpy(void *dst, const void *src, int size,
483 bool expect_race) {
484 Event event(Event::MEMCPY, dst, (uptr)src, size);
485 if (expect_race)
486 event.ExpectReport(__tsan::ReportTypeRace);
487 impl_->send(&event);
490 void ScopedThread::Memset(void *dst, int val, int size,
491 bool expect_race) {
492 Event event(Event::MEMSET, dst, val, size);
493 if (expect_race)
494 event.ExpectReport(__tsan::ReportTypeRace);
495 impl_->send(&event);