1 //===-- stack_depot.h -------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_STACK_DEPOT_H_
10 #define SCUDO_STACK_DEPOT_H_
12 #include "atomic_helpers.h"
17 class MurMur2HashBuilder
{
18 static const u32 M
= 0x5bd1e995;
19 static const u32 Seed
= 0x9747b28c;
20 static const u32 R
= 24;
24 explicit MurMur2HashBuilder(u32 Init
= 0) { H
= Seed
^ Init
; }
42 HybridMutex RingEndMu
;
45 // This data structure stores a stack trace for each allocation and
46 // deallocation when stack trace recording is enabled, that may be looked up
47 // using a hash of the stack trace. The lower bits of the hash are an index
48 // into the Tab array, which stores an index into the Ring array where the
49 // stack traces are stored. As the name implies, Ring is a ring buffer, so a
50 // stack trace may wrap around to the start of the array.
52 // Each stack trace in Ring is prefixed by a stack trace marker consisting of
53 // a fixed 1 bit in bit 0 (this allows disambiguation between stack frames
54 // and stack trace markers in the case where instruction pointers are 4-byte
55 // aligned, as they are on arm64), the stack trace hash in bits 1-32, and the
56 // size of the stack trace in bits 33-63.
58 // The insert() function is potentially racy in its accesses to the Tab and
59 // Ring arrays, but find() is resilient to races in the sense that, barring
60 // hash collisions, it will either return the correct stack trace or no stack
61 // trace at all, even if two instances of insert() raced with one another.
62 // This is achieved by re-checking the hash of the stack trace before
63 // returning the trace.
65 #if SCUDO_SMALL_STACK_DEPOT
66 static const uptr TabBits
= 4;
68 static const uptr TabBits
= 16;
70 static const uptr TabSize
= 1 << TabBits
;
71 static const uptr TabMask
= TabSize
- 1;
72 atomic_u32 Tab
[TabSize
] = {};
74 #if SCUDO_SMALL_STACK_DEPOT
75 static const uptr RingBits
= 4;
77 static const uptr RingBits
= 19;
79 static const uptr RingSize
= 1 << RingBits
;
80 static const uptr RingMask
= RingSize
- 1;
81 atomic_u64 Ring
[RingSize
] = {};
84 // Insert hash of the stack trace [Begin, End) into the stack depot, and
86 u32
insert(uptr
*Begin
, uptr
*End
) {
88 for (uptr
*I
= Begin
; I
!= End
; ++I
)
92 u32 Pos
= Hash
& TabMask
;
93 u32 RingPos
= atomic_load_relaxed(&Tab
[Pos
]);
94 u64 Entry
= atomic_load_relaxed(&Ring
[RingPos
]);
95 u64 Id
= (u64(End
- Begin
) << 33) | (u64(Hash
) << 1) | 1;
99 ScopedLock
Lock(RingEndMu
);
101 atomic_store_relaxed(&Tab
[Pos
], RingPos
);
102 atomic_store_relaxed(&Ring
[RingPos
], Id
);
103 for (uptr
*I
= Begin
; I
!= End
; ++I
) {
104 RingPos
= (RingPos
+ 1) & RingMask
;
105 atomic_store_relaxed(&Ring
[RingPos
], *I
);
107 RingEnd
= (RingPos
+ 1) & RingMask
;
111 // Look up a stack trace by hash. Returns true if successful. The trace may be
112 // accessed via operator[] passing indexes between *RingPosPtr and
113 // *RingPosPtr + *SizePtr.
114 bool find(u32 Hash
, uptr
*RingPosPtr
, uptr
*SizePtr
) const {
115 u32 Pos
= Hash
& TabMask
;
116 u32 RingPos
= atomic_load_relaxed(&Tab
[Pos
]);
117 if (RingPos
>= RingSize
)
119 u64 Entry
= atomic_load_relaxed(&Ring
[RingPos
]);
120 u64 HashWithTagBit
= (u64(Hash
) << 1) | 1;
121 if ((Entry
& 0x1ffffffff) != HashWithTagBit
)
123 u32 Size
= u32(Entry
>> 33);
124 if (Size
>= RingSize
)
126 *RingPosPtr
= (RingPos
+ 1) & RingMask
;
128 MurMur2HashBuilder B
;
129 for (uptr I
= 0; I
!= Size
; ++I
) {
130 RingPos
= (RingPos
+ 1) & RingMask
;
131 B
.add(u32(atomic_load_relaxed(&Ring
[RingPos
])) >> 2);
133 return B
.get() == Hash
;
136 u64
operator[](uptr RingPos
) const {
137 return atomic_load_relaxed(&Ring
[RingPos
& RingMask
]);
143 #endif // SCUDO_STACK_DEPOT_H_