1 //===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "sanitizer_stack_store.h"
11 #include "sanitizer_atomic.h"
12 #include "sanitizer_common.h"
13 #include "sanitizer_internal_defs.h"
14 #include "sanitizer_leb128.h"
15 #include "sanitizer_lzw.h"
16 #include "sanitizer_placement_new.h"
17 #include "sanitizer_stacktrace.h"
19 namespace __sanitizer
{
22 struct StackTraceHeader
{
23 static constexpr u32 kStackSizeBits
= 8;
27 explicit StackTraceHeader(const StackTrace
&trace
)
28 : size(Min
<uptr
>(trace
.size
, (1u << 8) - 1)), tag(trace
.tag
) {
29 CHECK_EQ(trace
.tag
, static_cast<uptr
>(tag
));
31 explicit StackTraceHeader(uptr h
)
32 : size(h
& ((1 << kStackSizeBits
) - 1)), tag(h
>> kStackSizeBits
) {}
35 return static_cast<uptr
>(size
) | (static_cast<uptr
>(tag
) << kStackSizeBits
);
40 StackStore::Id
StackStore::Store(const StackTrace
&trace
, uptr
*pack
) {
41 if (!trace
.size
&& !trace
.tag
)
43 StackTraceHeader
h(trace
);
46 uptr
*stack_trace
= Alloc(h
.size
+ 1, &idx
, pack
);
47 *stack_trace
= h
.ToUptr();
48 internal_memcpy(stack_trace
+ 1, trace
.trace
, h
.size
* sizeof(uptr
));
49 *pack
+= blocks_
[GetBlockIdx(idx
)].Stored(h
.size
+ 1);
50 return OffsetToId(idx
);
53 StackTrace
StackStore::Load(Id id
) {
56 uptr idx
= IdToOffset(id
);
57 uptr block_idx
= GetBlockIdx(idx
);
58 CHECK_LT(block_idx
, ARRAY_SIZE(blocks_
));
59 const uptr
*stack_trace
= blocks_
[block_idx
].GetOrUnpack(this);
62 stack_trace
+= GetInBlockIdx(idx
);
63 StackTraceHeader
h(*stack_trace
);
64 return StackTrace(stack_trace
+ 1, h
.size
, h
.tag
);
67 uptr
StackStore::Allocated() const {
68 return atomic_load_relaxed(&allocated_
) + sizeof(*this);
71 uptr
*StackStore::Alloc(uptr count
, uptr
*idx
, uptr
*pack
) {
73 // Optimisic lock-free allocation, essentially try to bump the
75 uptr start
= atomic_fetch_add(&total_frames_
, count
, memory_order_relaxed
);
76 uptr block_idx
= GetBlockIdx(start
);
77 uptr last_idx
= GetBlockIdx(start
+ count
- 1);
78 if (LIKELY(block_idx
== last_idx
)) {
79 // Fits into the a single block.
80 CHECK_LT(block_idx
, ARRAY_SIZE(blocks_
));
82 return blocks_
[block_idx
].GetOrCreate(this) + GetInBlockIdx(start
);
85 // Retry. We can't use range allocated in two different blocks.
86 CHECK_LE(count
, kBlockSizeFrames
);
87 uptr in_first
= kBlockSizeFrames
- GetInBlockIdx(start
);
88 // Mark tail/head of these blocks as "stored".to avoid waiting before we can
90 *pack
+= blocks_
[block_idx
].Stored(in_first
);
91 *pack
+= blocks_
[last_idx
].Stored(count
- in_first
);
95 void *StackStore::Map(uptr size
, const char *mem_type
) {
96 atomic_fetch_add(&allocated_
, size
, memory_order_relaxed
);
97 return MmapNoReserveOrDie(size
, mem_type
);
100 void StackStore::Unmap(void *addr
, uptr size
) {
101 atomic_fetch_sub(&allocated_
, size
, memory_order_relaxed
);
102 UnmapOrDie(addr
, size
);
105 uptr
StackStore::Pack(Compression type
) {
107 for (BlockInfo
&b
: blocks_
) res
+= b
.Pack(type
, this);
111 void StackStore::LockAll() {
112 for (BlockInfo
&b
: blocks_
) b
.Lock();
115 void StackStore::UnlockAll() {
116 for (BlockInfo
&b
: blocks_
) b
.Unlock();
119 void StackStore::TestOnlyUnmap() {
120 for (BlockInfo
&b
: blocks_
) b
.TestOnlyUnmap(this);
121 internal_memset(this, 0, sizeof(*this));
124 uptr
*StackStore::BlockInfo::Get() const {
125 // Idiomatic double-checked locking uses memory_order_acquire here. But
126 // relaxed is fine for us, justification is similar to
127 // TwoLevelMap::GetOrCreate.
128 return reinterpret_cast<uptr
*>(atomic_load_relaxed(&data_
));
131 uptr
*StackStore::BlockInfo::Create(StackStore
*store
) {
132 SpinMutexLock
l(&mtx_
);
135 ptr
= reinterpret_cast<uptr
*>(store
->Map(kBlockSizeBytes
, "StackStore"));
136 atomic_store(&data_
, reinterpret_cast<uptr
>(ptr
), memory_order_release
);
141 uptr
*StackStore::BlockInfo::GetOrCreate(StackStore
*store
) {
145 return Create(store
);
148 class SLeb128Encoder
{
150 SLeb128Encoder(u8
*begin
, u8
*end
) : begin(begin
), end(end
) {}
152 bool operator==(const SLeb128Encoder
&other
) const {
153 return begin
== other
.begin
;
156 bool operator!=(const SLeb128Encoder
&other
) const {
157 return begin
!= other
.begin
;
160 SLeb128Encoder
&operator=(uptr v
) {
161 sptr diff
= v
- previous
;
162 begin
= EncodeSLEB128(diff
, begin
, end
);
166 SLeb128Encoder
&operator*() { return *this; }
167 SLeb128Encoder
&operator++() { return *this; }
169 u8
*base() const { return begin
; }
177 class SLeb128Decoder
{
179 SLeb128Decoder(const u8
*begin
, const u8
*end
) : begin(begin
), end(end
) {}
181 bool operator==(const SLeb128Decoder
&other
) const {
182 return begin
== other
.begin
;
185 bool operator!=(const SLeb128Decoder
&other
) const {
186 return begin
!= other
.begin
;
191 begin
= DecodeSLEB128(begin
, end
, &diff
);
195 SLeb128Decoder
&operator++() { return *this; }
197 SLeb128Decoder
operator++(int) { return *this; }
205 static u8
*CompressDelta(const uptr
*from
, const uptr
*from_end
, u8
*to
,
207 SLeb128Encoder
encoder(to
, to_end
);
208 for (; from
!= from_end
; ++from
, ++encoder
) *encoder
= *from
;
209 return encoder
.base();
212 static uptr
*UncompressDelta(const u8
*from
, const u8
*from_end
, uptr
*to
,
214 SLeb128Decoder
decoder(from
, from_end
);
215 SLeb128Decoder
end(from_end
, from_end
);
216 for (; decoder
!= end
; ++to
, ++decoder
) *to
= *decoder
;
217 CHECK_EQ(to
, to_end
);
221 static u8
*CompressLzw(const uptr
*from
, const uptr
*from_end
, u8
*to
,
223 SLeb128Encoder
encoder(to
, to_end
);
224 encoder
= LzwEncode
<uptr
>(from
, from_end
, encoder
);
225 return encoder
.base();
228 static uptr
*UncompressLzw(const u8
*from
, const u8
*from_end
, uptr
*to
,
230 SLeb128Decoder
decoder(from
, from_end
);
231 SLeb128Decoder
end(from_end
, from_end
);
232 to
= LzwDecode
<uptr
>(decoder
, end
, to
);
233 CHECK_EQ(to
, to_end
);
237 #if defined(_MSC_VER) && !defined(__clang__)
238 # pragma warning(push)
239 // Disable 'nonstandard extension used: zero-sized array in struct/union'.
240 # pragma warning(disable : 4200)
243 struct PackedHeader
{
245 StackStore::Compression type
;
249 #if defined(_MSC_VER) && !defined(__clang__)
250 # pragma warning(pop)
253 uptr
*StackStore::BlockInfo::GetOrUnpack(StackStore
*store
) {
254 SpinMutexLock
l(&mtx_
);
257 state
= State::Unpacked
;
259 case State::Unpacked
:
265 u8
*ptr
= reinterpret_cast<u8
*>(Get());
266 CHECK_NE(nullptr, ptr
);
267 const PackedHeader
*header
= reinterpret_cast<const PackedHeader
*>(ptr
);
268 CHECK_LE(header
->size
, kBlockSizeBytes
);
269 CHECK_GE(header
->size
, sizeof(PackedHeader
));
271 uptr packed_size_aligned
= RoundUpTo(header
->size
, GetPageSizeCached());
274 reinterpret_cast<uptr
*>(store
->Map(kBlockSizeBytes
, "StackStoreUnpack"));
277 switch (header
->type
) {
278 case Compression::Delta
:
279 unpacked_end
= UncompressDelta(header
->data
, ptr
+ header
->size
, unpacked
,
280 unpacked
+ kBlockSizeFrames
);
282 case Compression::LZW
:
283 unpacked_end
= UncompressLzw(header
->data
, ptr
+ header
->size
, unpacked
,
284 unpacked
+ kBlockSizeFrames
);
287 UNREACHABLE("Unexpected type");
291 CHECK_EQ(kBlockSizeFrames
, unpacked_end
- unpacked
);
293 MprotectReadOnly(reinterpret_cast<uptr
>(unpacked
), kBlockSizeBytes
);
294 atomic_store(&data_
, reinterpret_cast<uptr
>(unpacked
), memory_order_release
);
295 store
->Unmap(ptr
, packed_size_aligned
);
297 state
= State::Unpacked
;
301 uptr
StackStore::BlockInfo::Pack(Compression type
, StackStore
*store
) {
302 if (type
== Compression::None
)
305 SpinMutexLock
l(&mtx_
);
307 case State::Unpacked
:
315 if (!ptr
|| !Stored(0))
319 reinterpret_cast<u8
*>(store
->Map(kBlockSizeBytes
, "StackStorePack"));
320 PackedHeader
*header
= reinterpret_cast<PackedHeader
*>(packed
);
321 u8
*alloc_end
= packed
+ kBlockSizeBytes
;
323 u8
*packed_end
= nullptr;
325 case Compression::Delta
:
327 CompressDelta(ptr
, ptr
+ kBlockSizeFrames
, header
->data
, alloc_end
);
329 case Compression::LZW
:
331 CompressLzw(ptr
, ptr
+ kBlockSizeFrames
, header
->data
, alloc_end
);
334 UNREACHABLE("Unexpected type");
339 header
->size
= packed_end
- packed
;
341 VPrintf(1, "Packed block of %zu KiB to %zu KiB\n", kBlockSizeBytes
>> 10,
344 if (kBlockSizeBytes
- header
->size
< kBlockSizeBytes
/ 8) {
345 VPrintf(1, "Undo and keep block unpacked\n");
346 MprotectReadOnly(reinterpret_cast<uptr
>(ptr
), kBlockSizeBytes
);
347 store
->Unmap(packed
, kBlockSizeBytes
);
348 state
= State::Unpacked
;
352 uptr packed_size_aligned
= RoundUpTo(header
->size
, GetPageSizeCached());
353 store
->Unmap(packed
+ packed_size_aligned
,
354 kBlockSizeBytes
- packed_size_aligned
);
355 MprotectReadOnly(reinterpret_cast<uptr
>(packed
), packed_size_aligned
);
357 atomic_store(&data_
, reinterpret_cast<uptr
>(packed
), memory_order_release
);
358 store
->Unmap(ptr
, kBlockSizeBytes
);
360 state
= State::Packed
;
361 return kBlockSizeBytes
- packed_size_aligned
;
364 void StackStore::BlockInfo::TestOnlyUnmap(StackStore
*store
) {
365 if (uptr
*ptr
= Get())
366 store
->Unmap(ptr
, kBlockSizeBytes
);
369 bool StackStore::BlockInfo::Stored(uptr n
) {
370 return n
+ atomic_fetch_add(&stored_
, n
, memory_order_release
) ==
374 bool StackStore::BlockInfo::IsPacked() const {
375 SpinMutexLock
l(&mtx_
);
376 return state
== State::Packed
;
379 } // namespace __sanitizer