1 //===-- tsan_shadow.h -------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
12 #include "tsan_defs.h"
18 FastState() { Reset(); }
22 part_
.sid_
= static_cast<u8
>(kFreeSid
);
23 part_
.epoch_
= static_cast<u16
>(kEpochLast
);
25 part_
.ignore_accesses_
= false;
28 void SetSid(Sid sid
) { part_
.sid_
= static_cast<u8
>(sid
); }
30 Sid
sid() const { return static_cast<Sid
>(part_
.sid_
); }
32 Epoch
epoch() const { return static_cast<Epoch
>(part_
.epoch_
); }
34 void SetEpoch(Epoch epoch
) { part_
.epoch_
= static_cast<u16
>(epoch
); }
36 void SetIgnoreBit() { part_
.ignore_accesses_
= 1; }
37 void ClearIgnoreBit() { part_
.ignore_accesses_
= 0; }
38 bool GetIgnoreBit() const { return part_
.ignore_accesses_
; }
45 u32 epoch_
: kEpochBits
;
47 u32 ignore_accesses_
: 1;
55 static_assert(sizeof(FastState
) == kShadowSize
, "bad FastState size");
59 static constexpr RawShadow kEmpty
= static_cast<RawShadow
>(0);
61 Shadow(FastState state
, u32 addr
, u32 size
, AccessType typ
) {
65 UNUSED Sid sid0
= part_
.sid_
;
66 UNUSED u16 epoch0
= part_
.epoch_
;
67 raw_
|= (!!(typ
& kAccessAtomic
) << kIsAtomicShift
) |
68 (!!(typ
& kAccessRead
) << kIsReadShift
) |
69 (((((1u << size
) - 1) << (addr
& 0x7)) & 0xff) << kAccessShift
);
70 // Note: we don't check kAccessAtomic because it overlaps with
71 // FastState::ignore_accesses_ and it may be set spuriously.
72 DCHECK_EQ(part_
.is_read_
, !!(typ
& kAccessRead
));
73 DCHECK_EQ(sid(), sid0
);
74 DCHECK_EQ(epoch(), epoch0
);
77 explicit Shadow(RawShadow x
= Shadow::kEmpty
) { raw_
= static_cast<u32
>(x
); }
79 RawShadow
raw() const { return static_cast<RawShadow
>(raw_
); }
80 Sid
sid() const { return part_
.sid_
; }
81 Epoch
epoch() const { return static_cast<Epoch
>(part_
.epoch_
); }
82 u8
access() const { return part_
.access_
; }
84 void GetAccess(uptr
*addr
, uptr
*size
, AccessType
*typ
) const {
85 DCHECK(part_
.access_
!= 0 || raw_
== static_cast<u32
>(Shadow::kRodata
));
87 *addr
= part_
.access_
? __builtin_ffs(part_
.access_
) - 1 : 0;
89 *size
= part_
.access_
== kFreeAccess
? kShadowCell
90 : __builtin_popcount(part_
.access_
);
92 *typ
= part_
.is_read_
? kAccessRead
: kAccessWrite
;
94 *typ
|= kAccessAtomic
;
95 if (part_
.access_
== kFreeAccess
)
101 bool IsBothReadsOrAtomic(AccessType typ
) const {
102 u32 is_read
= !!(typ
& kAccessRead
);
103 u32 is_atomic
= !!(typ
& kAccessAtomic
);
105 raw_
& ((is_atomic
<< kIsAtomicShift
) | (is_read
<< kIsReadShift
));
107 (part_
.is_read_
&& is_read
) || (part_
.is_atomic_
&& is_atomic
));
112 bool IsRWWeakerOrEqual(AccessType typ
) const {
113 u32 is_read
= !!(typ
& kAccessRead
);
114 u32 is_atomic
= !!(typ
& kAccessAtomic
);
116 (part_
.is_atomic_
> is_atomic
) ||
117 (part_
.is_atomic_
== is_atomic
&& part_
.is_read_
>= is_read
);
118 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
119 const u32 kAtomicReadMask
= (1 << kIsAtomicShift
) | (1 << kIsReadShift
);
120 bool res
= (raw_
& kAtomicReadMask
) >=
121 ((is_atomic
<< kIsAtomicShift
) | (is_read
<< kIsReadShift
));
123 DCHECK_EQ(res
, res0
);
130 // The FreedMarker must not pass "the same access check" so that we don't
131 // return from the race detection algorithm early.
132 static RawShadow
FreedMarker() {
135 fs
.SetEpoch(kEpochLast
);
136 Shadow
s(fs
, 0, 8, kAccessWrite
);
140 static RawShadow
FreedInfo(Sid sid
, Epoch epoch
) {
143 s
.part_
.epoch_
= static_cast<u16
>(epoch
);
144 s
.part_
.access_
= kFreeAccess
;
152 u16 epoch_
: kEpochBits
;
161 static constexpr u8 kFreeAccess
= 0x81;
163 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
164 static constexpr uptr kAccessShift
= 0;
165 static constexpr uptr kIsReadShift
= 30;
166 static constexpr uptr kIsAtomicShift
= 31;
168 static constexpr uptr kAccessShift
= 24;
169 static constexpr uptr kIsReadShift
= 1;
170 static constexpr uptr kIsAtomicShift
= 0;
174 // .rodata shadow marker, see MapRodata and ContainsSameAccessFast.
175 static constexpr RawShadow kRodata
=
176 static_cast<RawShadow
>(1 << kIsReadShift
);
179 static_assert(sizeof(Shadow
) == kShadowSize
, "bad Shadow size");
181 ALWAYS_INLINE RawShadow
LoadShadow(RawShadow
*p
) {
182 return static_cast<RawShadow
>(
183 atomic_load((atomic_uint32_t
*)p
, memory_order_relaxed
));
186 ALWAYS_INLINE
void StoreShadow(RawShadow
*sp
, RawShadow s
) {
187 atomic_store((atomic_uint32_t
*)sp
, static_cast<u32
>(s
),
188 memory_order_relaxed
);
191 } // namespace __tsan