1 //===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
15 #include "tsan_defs.h"
16 #include "tsan_ilist.h"
17 #include "tsan_mutexset.h"
18 #include "tsan_stack_trace.h"
22 enum class EventType
: u64
{
31 // "Base" type for all events for type dispatch.
33 // We use variable-length type encoding to give more bits to some event
34 // types that need them. If is_access is set, this is EventAccess.
35 // Otherwise, if is_func is set, this is EventFunc.
36 // Otherwise type denotes the type.
42 static_assert(sizeof(Event
) == 8, "bad Event size");
44 // Nop event used as padding and does not affect state during replay.
45 static constexpr Event NopEvent
= {1, 0, EventType::kAccessExt
, 0};
47 // Compressed memory access can represent only some events with PCs
48 // close enough to each other. Otherwise we fall back to EventAccessExt.
50 static constexpr uptr kPCBits
= 15;
51 static_assert(kPCBits
+ kCompressedAddrBits
+ 5 == 64,
52 "unused bits in EventAccess");
54 u64 is_access
: 1; // = 1
58 u64 pc_delta
: kPCBits
; // signed delta from the previous memory access PC
59 u64 addr
: kCompressedAddrBits
;
61 static_assert(sizeof(EventAccess
) == 8, "bad EventAccess size");
63 // Function entry (pc != 0) or exit (pc == 0).
65 u64 is_access
: 1; // = 0
66 u64 is_func
: 1; // = 1
69 static_assert(sizeof(EventFunc
) == 8, "bad EventFunc size");
71 // Extended memory access with full PC.
72 struct EventAccessExt
{
73 // Note: precisely specifying the unused parts of the bitfield is critical for
74 // performance. If we don't specify them, compiler will generate code to load
75 // the old value and shuffle it to extract the unused bits to apply to the new
76 // value. If we specify the unused part and store 0 in there, all that
77 // unnecessary code goes away (store of the 0 const is combined with other
79 static constexpr uptr kUnusedBits
= 11;
80 static_assert(kCompressedAddrBits
+ kUnusedBits
+ 9 == 64,
81 "unused bits in EventAccessExt");
83 u64 is_access
: 1; // = 0
84 u64 is_func
: 1; // = 0
85 EventType type
: 3; // = EventType::kAccessExt
90 u64 addr
: kCompressedAddrBits
;
93 static_assert(sizeof(EventAccessExt
) == 16, "bad EventAccessExt size");
95 // Access to a memory range.
96 struct EventAccessRange
{
97 static constexpr uptr kSizeLoBits
= 13;
98 static_assert(kCompressedAddrBits
+ kSizeLoBits
+ 7 == 64,
99 "unused bits in EventAccessRange");
101 u64 is_access
: 1; // = 0
102 u64 is_func
: 1; // = 0
103 EventType type
: 3; // = EventType::kAccessRange
106 u64 size_lo
: kSizeLoBits
;
107 u64 pc
: kCompressedAddrBits
;
108 u64 addr
: kCompressedAddrBits
;
109 u64 size_hi
: 64 - kCompressedAddrBits
;
111 static_assert(sizeof(EventAccessRange
) == 16, "bad EventAccessRange size");
115 static constexpr uptr kStackIDLoBits
= 15;
116 static constexpr uptr kStackIDHiBits
=
117 sizeof(StackID
) * kByteBits
- kStackIDLoBits
;
118 static constexpr uptr kUnusedBits
= 3;
119 static_assert(kCompressedAddrBits
+ kStackIDLoBits
+ 5 == 64,
120 "unused bits in EventLock");
121 static_assert(kCompressedAddrBits
+ kStackIDHiBits
+ kUnusedBits
== 64,
122 "unused bits in EventLock");
124 u64 is_access
: 1; // = 0
125 u64 is_func
: 1; // = 0
126 EventType type
: 3; // = EventType::kLock or EventType::kRLock
127 u64 pc
: kCompressedAddrBits
;
128 u64 stack_lo
: kStackIDLoBits
;
129 u64 stack_hi
: sizeof(StackID
) * kByteBits
- kStackIDLoBits
;
131 u64 addr
: kCompressedAddrBits
;
133 static_assert(sizeof(EventLock
) == 16, "bad EventLock size");
137 static constexpr uptr kUnusedBits
= 15;
138 static_assert(kCompressedAddrBits
+ kUnusedBits
+ 5 == 64,
139 "unused bits in EventUnlock");
141 u64 is_access
: 1; // = 0
142 u64 is_func
: 1; // = 0
143 EventType type
: 3; // = EventType::kUnlock
145 u64 addr
: kCompressedAddrBits
;
147 static_assert(sizeof(EventUnlock
) == 8, "bad EventUnlock size");
149 // Time change event.
151 static constexpr uptr kUnusedBits
= 37;
152 static_assert(kUnusedBits
+ sizeof(Sid
) * kByteBits
+ kEpochBits
+ 5 == 64,
153 "unused bits in EventTime");
155 u64 is_access
: 1; // = 0
156 u64 is_func
: 1; // = 0
157 EventType type
: 3; // = EventType::kTime
158 u64 sid
: sizeof(Sid
) * kByteBits
;
159 u64 epoch
: kEpochBits
;
162 static_assert(sizeof(EventTime
) == 8, "bad EventTime size");
167 Trace
* trace
= nullptr; // back-pointer to Trace containing this part
168 INode trace_parts
; // in Trace::parts
169 INode global
; // in Contex::trace_part_recycle
172 struct TracePart
: TraceHeader
{
173 // There are a lot of goroutines in Go, so we use smaller parts.
174 static constexpr uptr kByteSize
= (SANITIZER_GO
? 128 : 256) << 10;
175 static constexpr uptr kSize
=
176 (kByteSize
- sizeof(TraceHeader
)) / sizeof(Event
);
177 // TraceAcquire does a fast event pointer overflow check by comparing
178 // pointer into TracePart::events with kAlignment mask. Since TracePart's
179 // are allocated page-aligned, this check detects end of the array
180 // (it also have false positives in the middle that are filtered separately).
181 // This also requires events to be the last field.
182 static constexpr uptr kAlignment
= 0xff0;
187 static_assert(sizeof(TracePart
) == TracePart::kByteSize
, "bad TracePart size");
191 IList
<TraceHeader
, &TraceHeader::trace_parts
, TracePart
> parts
;
192 // First node non-queued into ctx->trace_part_recycle.
193 TracePart
* local_head
;
194 // Final position in the last part for finished threads.
195 Event
* final_pos
= nullptr;
196 // Number of trace parts allocated on behalf of this trace specifically.
197 // Total number of parts in this trace can be larger if we retake some
198 // parts from other traces.
199 uptr parts_allocated
= 0;
201 Trace() : mtx(MutexTypeTrace
) {}
203 // We need at least 3 parts per thread, because we want to keep at last
204 // 2 parts per thread that are not queued into ctx->trace_part_recycle
205 // (the current one being filled and one full part that ensures that
206 // we always have at least one part worth of previous memory accesses).
207 static constexpr uptr kMinParts
= 3;
209 static constexpr uptr kFinishedThreadLo
= 16;
210 static constexpr uptr kFinishedThreadHi
= 64;
213 } // namespace __tsan
215 #endif // TSAN_TRACE_H