1 //===-- tsan_trace_test.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
12 #include "tsan_trace.h"
16 #include "gtest/gtest.h"
19 #if !defined(__x86_64__)
20 // These tests are currently crashing on ppc64:
21 // https://reviews.llvm.org/D110546#3025422
22 // due to the way we create thread contexts
23 // There must be some difference in thread initialization
24 // between normal execution and unit tests.
25 # define TRACE_TEST(SUITE, NAME) TEST(SUITE, DISABLED_##NAME)
27 # define TRACE_TEST(SUITE, NAME) TEST(SUITE, NAME)
32 // We need to run all trace tests in a new thread,
33 // so that the thread trace is empty initially.
37 for (auto *&thr
: threads
) {
38 thr
= static_cast<ThreadState
*>(
39 MmapOrDie(sizeof(ThreadState
), "ThreadState"));
40 Tid tid
= ThreadCreate(cur_thread(), 0, 0, true);
41 Processor
*proc
= ProcCreate();
43 ThreadStart(thr
, tid
, 0, ThreadType::Fiber
);
48 for (uptr i
= 0; i
< N
; i
++) {
55 auto *thr
= threads
[i
];
57 Processor
*proc
= thr
->proc();
59 ProcUnwire(proc
, thr
);
61 UnmapOrDie(thr
, sizeof(ThreadState
));
64 ThreadState
*threads
[N
];
65 ThreadState
*operator[](uptr i
) { return threads
[i
]; }
66 ThreadState
*operator->() { return threads
[0]; }
67 operator ThreadState
*() { return threads
[0]; }
70 TRACE_TEST(Trace
, RestoreAccess
) {
71 // A basic test with some function entry/exit events,
72 // some mutex lock/unlock events and some other distracting
75 TraceFunc(thr
, 0x1000);
76 TraceFunc(thr
, 0x1001);
77 TraceMutexLock(thr
, EventType::kLock
, 0x4000, 0x5000, 0x6000);
78 TraceMutexLock(thr
, EventType::kLock
, 0x4001, 0x5001, 0x6001);
79 TraceMutexUnlock(thr
, 0x5000);
81 CHECK(TryTraceMemoryAccess(thr
, 0x2001, 0x3001, 8, kAccessRead
));
82 TraceMutexLock(thr
, EventType::kRLock
, 0x4002, 0x5002, 0x6002);
83 TraceFunc(thr
, 0x1002);
84 CHECK(TryTraceMemoryAccess(thr
, 0x2000, 0x3000, 8, kAccessRead
));
85 // This is the access we want to find.
86 // The previous one is equivalent, but RestoreStack must prefer
87 // the last of the matchig accesses.
88 CHECK(TryTraceMemoryAccess(thr
, 0x2002, 0x3000, 8, kAccessRead
));
89 Lock
slot_lock(&ctx
->slots
[static_cast<uptr
>(thr
->fast_state
.sid())].mtx
);
90 ThreadRegistryLock
lock1(&ctx
->thread_registry
);
91 Lock
lock2(&ctx
->slot_mtx
);
92 Tid tid
= kInvalidTid
;
93 VarSizeStackTrace stk
;
95 uptr tag
= kExternalTagNone
;
96 bool res
= RestoreStack(EventType::kAccessExt
, thr
->fast_state
.sid(),
97 thr
->fast_state
.epoch(), 0x3000, 8, kAccessRead
, &tid
,
100 CHECK_EQ(tid
, thr
->tid
);
101 CHECK_EQ(stk
.size
, 3);
102 CHECK_EQ(stk
.trace
[0], 0x1000);
103 CHECK_EQ(stk
.trace
[1], 0x1002);
104 CHECK_EQ(stk
.trace
[2], 0x2002);
105 CHECK_EQ(mset
.Size(), 2);
106 CHECK_EQ(mset
.Get(0).addr
, 0x5001);
107 CHECK_EQ(mset
.Get(0).stack_id
, 0x6001);
108 CHECK_EQ(mset
.Get(0).write
, true);
109 CHECK_EQ(mset
.Get(1).addr
, 0x5002);
110 CHECK_EQ(mset
.Get(1).stack_id
, 0x6002);
111 CHECK_EQ(mset
.Get(1).write
, false);
112 CHECK_EQ(tag
, kExternalTagNone
);
115 TRACE_TEST(Trace
, MemoryAccessSize
) {
116 // Test tracing and matching of accesses of different sizes.
118 uptr access_size
, offset
, size
;
122 {1, 0, 1, true}, {4, 0, 2, true},
123 {4, 2, 2, true}, {8, 3, 1, true},
124 {2, 1, 1, true}, {1, 1, 1, false},
125 {8, 5, 4, false}, {4, static_cast<uptr
>(-1l), 4, false},
127 for (auto params
: tests
) {
128 for (int type
= 0; type
< 3; type
++) {
130 Printf("access_size=%zu, offset=%zu, size=%zu, res=%d, type=%d\n",
131 params
.access_size
, params
.offset
, params
.size
, params
.res
, type
);
132 TraceFunc(thr
, 0x1000);
135 // This should emit compressed event.
136 CHECK(TryTraceMemoryAccess(thr
, 0x2000, 0x3000, params
.access_size
,
140 // This should emit full event.
141 CHECK(TryTraceMemoryAccess(thr
, 0x2000000, 0x3000, params
.access_size
,
145 TraceMemoryAccessRange(thr
, 0x2000000, 0x3000, params
.access_size
,
149 Lock
slot_lock(&ctx
->slots
[static_cast<uptr
>(thr
->fast_state
.sid())].mtx
);
150 ThreadRegistryLock
lock1(&ctx
->thread_registry
);
151 Lock
lock2(&ctx
->slot_mtx
);
152 Tid tid
= kInvalidTid
;
153 VarSizeStackTrace stk
;
155 uptr tag
= kExternalTagNone
;
157 RestoreStack(EventType::kAccessExt
, thr
->fast_state
.sid(),
158 thr
->fast_state
.epoch(), 0x3000 + params
.offset
,
159 params
.size
, kAccessRead
, &tid
, &stk
, &mset
, &tag
);
160 CHECK_EQ(res
, params
.res
);
162 CHECK_EQ(stk
.size
, 2);
163 CHECK_EQ(stk
.trace
[0], 0x1000);
164 CHECK_EQ(stk
.trace
[1], type
? 0x2000000 : 0x2000);
170 TRACE_TEST(Trace
, RestoreMutexLock
) {
171 // Check of restoration of a mutex lock event.
173 TraceFunc(thr
, 0x1000);
174 TraceMutexLock(thr
, EventType::kLock
, 0x4000, 0x5000, 0x6000);
175 TraceMutexLock(thr
, EventType::kRLock
, 0x4001, 0x5001, 0x6001);
176 TraceMutexLock(thr
, EventType::kRLock
, 0x4002, 0x5001, 0x6002);
177 Lock
slot_lock(&ctx
->slots
[static_cast<uptr
>(thr
->fast_state
.sid())].mtx
);
178 ThreadRegistryLock
lock1(&ctx
->thread_registry
);
179 Lock
lock2(&ctx
->slot_mtx
);
180 Tid tid
= kInvalidTid
;
181 VarSizeStackTrace stk
;
183 uptr tag
= kExternalTagNone
;
184 bool res
= RestoreStack(EventType::kLock
, thr
->fast_state
.sid(),
185 thr
->fast_state
.epoch(), 0x5001, 0, 0, &tid
, &stk
,
188 CHECK_EQ(stk
.size
, 2);
189 CHECK_EQ(stk
.trace
[0], 0x1000);
190 CHECK_EQ(stk
.trace
[1], 0x4002);
191 CHECK_EQ(mset
.Size(), 2);
192 CHECK_EQ(mset
.Get(0).addr
, 0x5000);
193 CHECK_EQ(mset
.Get(0).stack_id
, 0x6000);
194 CHECK_EQ(mset
.Get(0).write
, true);
195 CHECK_EQ(mset
.Get(1).addr
, 0x5001);
196 CHECK_EQ(mset
.Get(1).stack_id
, 0x6001);
197 CHECK_EQ(mset
.Get(1).write
, false);
200 TRACE_TEST(Trace
, MultiPart
) {
201 // Check replay of a trace with multiple parts.
203 FuncEntry(thr
, 0x1000);
204 FuncEntry(thr
, 0x2000);
205 MutexPreLock(thr
, 0x4000, 0x5000, 0);
206 MutexPostLock(thr
, 0x4000, 0x5000, 0);
207 MutexPreLock(thr
, 0x4000, 0x5000, 0);
208 MutexPostLock(thr
, 0x4000, 0x5000, 0);
209 const uptr kEvents
= 3 * sizeof(TracePart
) / sizeof(Event
);
210 for (uptr i
= 0; i
< kEvents
; i
++) {
211 FuncEntry(thr
, 0x3000);
212 MutexPreLock(thr
, 0x4002, 0x5002, 0);
213 MutexPostLock(thr
, 0x4002, 0x5002, 0);
214 MutexUnlock(thr
, 0x4003, 0x5002, 0);
217 FuncEntry(thr
, 0x4000);
218 TraceMutexLock(thr
, EventType::kRLock
, 0x4001, 0x5001, 0x6001);
219 CHECK(TryTraceMemoryAccess(thr
, 0x2002, 0x3000, 8, kAccessRead
));
220 Lock
slot_lock(&ctx
->slots
[static_cast<uptr
>(thr
->fast_state
.sid())].mtx
);
221 ThreadRegistryLock
lock1(&ctx
->thread_registry
);
222 Lock
lock2(&ctx
->slot_mtx
);
223 Tid tid
= kInvalidTid
;
224 VarSizeStackTrace stk
;
226 uptr tag
= kExternalTagNone
;
227 bool res
= RestoreStack(EventType::kAccessExt
, thr
->fast_state
.sid(),
228 thr
->fast_state
.epoch(), 0x3000, 8, kAccessRead
, &tid
,
231 CHECK_EQ(tid
, thr
->tid
);
232 CHECK_EQ(stk
.size
, 4);
233 CHECK_EQ(stk
.trace
[0], 0x1000);
234 CHECK_EQ(stk
.trace
[1], 0x2000);
235 CHECK_EQ(stk
.trace
[2], 0x4000);
236 CHECK_EQ(stk
.trace
[3], 0x2002);
237 CHECK_EQ(mset
.Size(), 2);
238 CHECK_EQ(mset
.Get(0).addr
, 0x5000);
239 CHECK_EQ(mset
.Get(0).write
, true);
240 CHECK_EQ(mset
.Get(0).count
, 2);
241 CHECK_EQ(mset
.Get(1).addr
, 0x5001);
242 CHECK_EQ(mset
.Get(1).write
, false);
243 CHECK_EQ(mset
.Get(1).count
, 1);
246 TRACE_TEST(Trace
, DeepSwitch
) {
248 for (int i
= 0; i
< 2000; i
++) {
249 FuncEntry(thr
, 0x1000);
250 const uptr kEvents
= sizeof(TracePart
) / sizeof(Event
);
251 for (uptr i
= 0; i
< kEvents
; i
++) {
252 TraceMutexLock(thr
, EventType::kLock
, 0x4000, 0x5000, 0x6000);
253 TraceMutexUnlock(thr
, 0x5000);
258 void CheckTraceState(uptr count
, uptr finished
, uptr excess
, uptr recycle
) {
259 Lock
l(&ctx
->slot_mtx
);
260 Printf("CheckTraceState(%zu/%zu, %zu/%zu, %zu/%zu, %zu/%zu)\n",
261 ctx
->trace_part_total_allocated
, count
,
262 ctx
->trace_part_recycle_finished
, finished
,
263 ctx
->trace_part_finished_excess
, excess
,
264 ctx
->trace_part_recycle
.Size(), recycle
);
265 CHECK_EQ(ctx
->trace_part_total_allocated
, count
);
266 CHECK_EQ(ctx
->trace_part_recycle_finished
, finished
);
267 CHECK_EQ(ctx
->trace_part_finished_excess
, excess
);
268 CHECK_EQ(ctx
->trace_part_recycle
.Size(), recycle
);
271 TRACE_TEST(TraceAlloc
, SingleThread
) {
272 TraceResetForTesting();
273 auto check_thread
= [&](ThreadState
*thr
, uptr size
, uptr count
,
274 uptr finished
, uptr excess
, uptr recycle
) {
275 CHECK_EQ(thr
->tctx
->trace
.parts
.Size(), size
);
276 CheckTraceState(count
, finished
, excess
, recycle
);
278 ThreadArray
<2> threads
;
279 check_thread(threads
[0], 0, 0, 0, 0, 0);
280 TraceSwitchPartImpl(threads
[0]);
281 check_thread(threads
[0], 1, 1, 0, 0, 0);
282 TraceSwitchPartImpl(threads
[0]);
283 check_thread(threads
[0], 2, 2, 0, 0, 0);
284 TraceSwitchPartImpl(threads
[0]);
285 check_thread(threads
[0], 3, 3, 0, 0, 1);
286 TraceSwitchPartImpl(threads
[0]);
287 check_thread(threads
[0], 3, 3, 0, 0, 1);
289 CheckTraceState(3, 3, 0, 3);
291 CheckTraceState(3, 3, 0, 3);
294 TRACE_TEST(TraceAlloc
, FinishedThreadReuse
) {
295 TraceResetForTesting();
296 constexpr uptr Hi
= Trace::kFinishedThreadHi
;
297 constexpr uptr kThreads
= 4 * Hi
;
298 ThreadArray
<kThreads
> threads
;
299 for (uptr i
= 0; i
< kThreads
; i
++) {
300 Printf("thread %zu\n", i
);
301 TraceSwitchPartImpl(threads
[i
]);
303 CheckTraceState(i
+ 1, i
, 0, i
);
304 else if (i
<= 2 * Hi
)
305 CheckTraceState(Hi
+ 1, Hi
, i
- Hi
, Hi
);
307 CheckTraceState(Hi
+ 1, Hi
, Hi
, Hi
);
310 CheckTraceState(i
+ 1, i
+ 1, 0, i
+ 1);
312 CheckTraceState(Hi
+ 1, Hi
+ 1, i
- Hi
+ 1, Hi
+ 1);
314 CheckTraceState(Hi
+ 1, Hi
+ 1, Hi
+ 1, Hi
+ 1);
318 TRACE_TEST(TraceAlloc
, FinishedThreadReuse2
) {
319 TraceResetForTesting();
320 // constexpr uptr Lo = Trace::kFinishedThreadLo;
321 // constexpr uptr Hi = Trace::kFinishedThreadHi;
322 constexpr uptr Min
= Trace::kMinParts
;
323 constexpr uptr kThreads
= 10;
324 constexpr uptr kParts
= 2 * Min
;
325 ThreadArray
<kThreads
> threads
;
326 for (uptr i
= 0; i
< kThreads
; i
++) {
327 Printf("thread %zu\n", i
);
328 for (uptr j
= 0; j
< kParts
; j
++) TraceSwitchPartImpl(threads
[i
]);
330 CheckTraceState(Min
, 0, 0, 1);
332 CheckTraceState(2 * Min
, 0, Min
, Min
+ 1);
335 CheckTraceState(Min
, Min
, 0, Min
);
337 CheckTraceState(2 * Min
, 2 * Min
, Min
, 2 * Min
);
341 } // namespace __tsan