1 //===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Implementation of a mapping from arbitrary values to unique 32-bit
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_STACKDEPOTBASE_H
14 #define SANITIZER_STACKDEPOTBASE_H
18 #include "sanitizer_atomic.h"
19 #include "sanitizer_flat_map.h"
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_mutex.h"
23 namespace __sanitizer
{
25 template <class Node
, int kReservedBits
, int kTabSizeLog
>
26 class StackDepotBase
{
27 static constexpr u32 kIdSizeLog
=
28 sizeof(u32
) * 8 - Max(kReservedBits
, 1 /* At least 1 bit for locking. */);
29 static constexpr u32 kNodesSize1Log
= kIdSizeLog
/ 2;
30 static constexpr u32 kNodesSize2Log
= kIdSizeLog
- kNodesSize1Log
;
31 static constexpr int kTabSize
= 1 << kTabSizeLog
; // Hash table size.
32 static constexpr u32 kUnlockMask
= (1ull << kIdSizeLog
) - 1;
33 static constexpr u32 kLockMask
= ~kUnlockMask
;
36 typedef typename
Node::args_type args_type
;
37 typedef typename
Node::handle_type handle_type
;
38 typedef typename
Node::hash_type hash_type
;
40 static constexpr u64 kNodesSize1
= 1ull << kNodesSize1Log
;
41 static constexpr u64 kNodesSize2
= 1ull << kNodesSize2Log
;
43 // Maps stack trace to an unique id.
44 u32
Put(args_type args
, bool *inserted
= nullptr);
45 // Retrieves a stored stack trace by the id.
46 args_type
Get(u32 id
);
48 StackDepotStats
GetStats() const {
50 atomic_load_relaxed(&n_uniq_ids
),
51 nodes
.MemoryUsage() + Node::allocated(),
55 void LockBeforeFork();
56 void UnlockAfterFork(bool fork_child
);
59 void TestOnlyUnmap() {
60 nodes
.TestOnlyUnmap();
61 internal_memset(this, 0, sizeof(*this));
66 u32
find(u32 s
, args_type args
, hash_type hash
) const;
67 static u32
lock(atomic_uint32_t
*p
);
68 static void unlock(atomic_uint32_t
*p
, u32 s
);
69 atomic_uint32_t tab
[kTabSize
]; // Hash table of Node's.
71 atomic_uint32_t n_uniq_ids
;
73 TwoLevelMap
<Node
, kNodesSize1
, kNodesSize2
> nodes
;
75 friend class StackDepotReverseMap
;
78 template <class Node
, int kReservedBits
, int kTabSizeLog
>
79 u32 StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::find(
80 u32 s
, args_type args
, hash_type hash
) const {
81 // Searches linked list s for the stack, returns its id.
83 const Node
&node
= nodes
[s
];
84 if (node
.eq(hash
, args
))
91 template <class Node
, int kReservedBits
, int kTabSizeLog
>
92 u32 StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::lock(atomic_uint32_t
*p
) {
93 // Uses the pointer lsb as mutex.
94 for (int i
= 0;; i
++) {
95 u32 cmp
= atomic_load(p
, memory_order_relaxed
);
96 if ((cmp
& kLockMask
) == 0 &&
97 atomic_compare_exchange_weak(p
, &cmp
, cmp
| kLockMask
,
98 memory_order_acquire
))
103 internal_sched_yield();
107 template <class Node
, int kReservedBits
, int kTabSizeLog
>
108 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::unlock(
109 atomic_uint32_t
*p
, u32 s
) {
110 DCHECK_EQ(s
& kLockMask
, 0);
111 atomic_store(p
, s
, memory_order_release
);
114 template <class Node
, int kReservedBits
, int kTabSizeLog
>
115 u32 StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::Put(args_type args
,
119 if (!LIKELY(Node::is_valid(args
)))
121 hash_type h
= Node::hash(args
);
122 atomic_uint32_t
*p
= &tab
[h
% kTabSize
];
123 u32 v
= atomic_load(p
, memory_order_consume
);
124 u32 s
= v
& kUnlockMask
;
125 // First, try to find the existing stack.
126 u32 node
= find(s
, args
, h
);
130 // If failed, lock, retry and insert new.
133 node
= find(s2
, args
, h
);
139 s
= atomic_fetch_add(&n_uniq_ids
, 1, memory_order_relaxed
) + 1;
140 CHECK_EQ(s
& kUnlockMask
, s
);
141 CHECK_EQ(s
& (((u32
)-1) >> kReservedBits
), s
);
142 Node
&new_node
= nodes
[s
];
143 new_node
.store(s
, args
, h
);
146 if (inserted
) *inserted
= true;
150 template <class Node
, int kReservedBits
, int kTabSizeLog
>
151 typename StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::args_type
152 StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::Get(u32 id
) {
155 CHECK_EQ(id
& (((u32
)-1) >> kReservedBits
), id
);
156 if (!nodes
.contains(id
))
158 const Node
&node
= nodes
[id
];
159 return node
.load(id
);
162 template <class Node
, int kReservedBits
, int kTabSizeLog
>
163 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::LockBeforeFork() {
164 // Do not lock hash table. It's very expensive, but it's not rely needed. The
165 // parent process will neither lock nor unlock. Child process risks to be
166 // deadlocked on already locked buckets. To avoid deadlock we will unlock
167 // every locked buckets in `UnlockAfterFork`. This may affect consistency of
168 // the hash table, but the only issue is a few items inserted by parent
169 // process will be not found by child, and the child may insert them again,
170 // wasting some space in `stackStore`.
172 // We still need to lock nodes.
176 template <class Node
, int kReservedBits
, int kTabSizeLog
>
177 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::UnlockAfterFork(
181 // Only unlock in child process to avoid deadlock. See `LockBeforeFork`.
185 for (int i
= 0; i
< kTabSize
; ++i
) {
186 atomic_uint32_t
*p
= &tab
[i
];
187 uptr s
= atomic_load(p
, memory_order_relaxed
);
189 unlock(p
, s
& kUnlockMask
);
193 template <class Node
, int kReservedBits
, int kTabSizeLog
>
194 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::PrintAll() {
195 for (int i
= 0; i
< kTabSize
; ++i
) {
196 atomic_uint32_t
*p
= &tab
[i
];
197 u32 s
= atomic_load(p
, memory_order_consume
) & kUnlockMask
;
199 const Node
&node
= nodes
[s
];
200 Printf("Stack for id %u:\n", s
);
201 node
.load(s
).Print();
207 } // namespace __sanitizer
209 #endif // SANITIZER_STACKDEPOTBASE_H