[ORC] Fail materialization in tasks that are destroyed before running.
[llvm-project.git] / compiler-rt / lib / scudo / standalone / tests / tsd_test.cpp
blob851ac46b9f0660f98f43a1f4849e390590e48e0f
1 //===-- tsd_test.cpp --------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "tests/scudo_unit_test.h"
11 #include "tsd_exclusive.h"
12 #include "tsd_shared.h"
14 #include <stdlib.h>
16 #include <condition_variable>
17 #include <mutex>
18 #include <set>
19 #include <thread>
20 #include <type_traits>
22 // We mock out an allocator with a TSD registry, mostly using empty stubs. The
23 // cache contains a single volatile uptr, to be able to test that several
24 // concurrent threads will not access or modify the same cache at the same time.
25 template <class Config> class MockAllocator {
26 public:
27 using ThisT = MockAllocator<Config>;
28 using TSDRegistryT = typename Config::template TSDRegistryT<ThisT>;
29 using CacheT = struct MockCache {
30 volatile scudo::uptr Canary;
32 using QuarantineCacheT = struct MockQuarantine {};
34 void init() {
35 // This should only be called once by the registry.
36 EXPECT_FALSE(Initialized);
37 Initialized = true;
40 void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); }
41 void initCache(CacheT *Cache) { *Cache = {}; }
42 void commitBack(UNUSED scudo::TSD<MockAllocator> *TSD) {}
43 TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
44 void callPostInitCallback() {}
46 bool isInitialized() { return Initialized; }
48 void *operator new(size_t Size) {
49 void *P = nullptr;
50 EXPECT_EQ(0, posix_memalign(&P, alignof(ThisT), Size));
51 return P;
53 void operator delete(void *P) { free(P); }
55 private:
56 bool Initialized = false;
57 TSDRegistryT TSDRegistry;
60 struct OneCache {
61 template <class Allocator>
62 using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 1U, 1U>;
65 struct SharedCaches {
66 template <class Allocator>
67 using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 16U, 8U>;
70 struct ExclusiveCaches {
71 template <class Allocator>
72 using TSDRegistryT = scudo::TSDRegistryExT<Allocator>;
75 TEST(ScudoTSDTest, TSDRegistryInit) {
76 using AllocatorT = MockAllocator<OneCache>;
77 auto Deleter = [](AllocatorT *A) {
78 A->unmapTestOnly();
79 delete A;
81 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
82 Deleter);
83 EXPECT_FALSE(Allocator->isInitialized());
85 auto Registry = Allocator->getTSDRegistry();
86 Registry->initOnceMaybe(Allocator.get());
87 EXPECT_TRUE(Allocator->isInitialized());
90 template <class AllocatorT>
91 static void testRegistry() NO_THREAD_SAFETY_ANALYSIS {
92 auto Deleter = [](AllocatorT *A) {
93 A->unmapTestOnly();
94 delete A;
96 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
97 Deleter);
98 EXPECT_FALSE(Allocator->isInitialized());
100 auto Registry = Allocator->getTSDRegistry();
101 Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true);
102 EXPECT_TRUE(Allocator->isInitialized());
105 typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
106 EXPECT_EQ(TSD->getCache().Canary, 0U);
109 Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
111 typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
112 EXPECT_EQ(TSD->getCache().Canary, 0U);
113 memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
117 TEST(ScudoTSDTest, TSDRegistryBasic) {
118 testRegistry<MockAllocator<OneCache>>();
119 testRegistry<MockAllocator<SharedCaches>>();
120 #if !SCUDO_FUCHSIA
121 testRegistry<MockAllocator<ExclusiveCaches>>();
122 #endif
125 static std::mutex Mutex;
126 static std::condition_variable Cv;
127 static bool Ready;
129 // Accessing `TSD->getCache()` requires `TSD::Mutex` which isn't easy to test
130 // using thread-safety analysis. Alternatively, we verify the thread safety
131 // through a runtime check in ScopedTSD and mark the test body with
132 // NO_THREAD_SAFETY_ANALYSIS.
133 template <typename AllocatorT>
134 static void stressCache(AllocatorT *Allocator) NO_THREAD_SAFETY_ANALYSIS {
135 auto Registry = Allocator->getTSDRegistry();
137 std::unique_lock<std::mutex> Lock(Mutex);
138 while (!Ready)
139 Cv.wait(Lock);
141 Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
142 typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
143 // For an exclusive TSD, the cache should be empty. We cannot guarantee the
144 // same for a shared TSD.
145 if (std::is_same<typename AllocatorT::TSDRegistryT,
146 scudo::TSDRegistryExT<AllocatorT>>()) {
147 EXPECT_EQ(TSD->getCache().Canary, 0U);
149 // Transform the thread id to a uptr to use it as canary.
150 const scudo::uptr Canary = static_cast<scudo::uptr>(
151 std::hash<std::thread::id>{}(std::this_thread::get_id()));
152 TSD->getCache().Canary = Canary;
153 // Loop a few times to make sure that a concurrent thread isn't modifying it.
154 for (scudo::uptr I = 0; I < 4096U; I++)
155 EXPECT_EQ(TSD->getCache().Canary, Canary);
158 template <class AllocatorT> static void testRegistryThreaded() {
159 Ready = false;
160 auto Deleter = [](AllocatorT *A) {
161 A->unmapTestOnly();
162 delete A;
164 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
165 Deleter);
166 std::thread Threads[32];
167 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
168 Threads[I] = std::thread(stressCache<AllocatorT>, Allocator.get());
170 std::unique_lock<std::mutex> Lock(Mutex);
171 Ready = true;
172 Cv.notify_all();
174 for (auto &T : Threads)
175 T.join();
178 TEST(ScudoTSDTest, TSDRegistryThreaded) {
179 testRegistryThreaded<MockAllocator<OneCache>>();
180 testRegistryThreaded<MockAllocator<SharedCaches>>();
181 #if !SCUDO_FUCHSIA
182 testRegistryThreaded<MockAllocator<ExclusiveCaches>>();
183 #endif
186 static std::set<void *> Pointers;
188 static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
189 std::set<void *> Set;
190 auto Registry = Allocator->getTSDRegistry();
192 std::unique_lock<std::mutex> Lock(Mutex);
193 while (!Ready)
194 Cv.wait(Lock);
196 Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
197 for (scudo::uptr I = 0; I < 4096U; I++) {
198 typename MockAllocator<SharedCaches>::TSDRegistryT::ScopedTSD TSD(
199 *Registry);
200 Set.insert(reinterpret_cast<void *>(&*TSD));
203 std::unique_lock<std::mutex> Lock(Mutex);
204 Pointers.insert(Set.begin(), Set.end());
208 TEST(ScudoTSDTest, TSDRegistryTSDsCount) {
209 Ready = false;
210 Pointers.clear();
211 using AllocatorT = MockAllocator<SharedCaches>;
212 auto Deleter = [](AllocatorT *A) {
213 A->unmapTestOnly();
214 delete A;
216 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
217 Deleter);
218 // We attempt to use as many TSDs as the shared cache offers by creating a
219 // decent amount of threads that will be run concurrently and attempt to get
220 // and lock TSDs. We put them all in a set and count the number of entries
221 // after we are done.
222 std::thread Threads[32];
223 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
224 Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
226 std::unique_lock<std::mutex> Lock(Mutex);
227 Ready = true;
228 Cv.notify_all();
230 for (auto &T : Threads)
231 T.join();
232 // The initial number of TSDs we get will be the minimum of the default count
233 // and the number of CPUs.
234 EXPECT_LE(Pointers.size(), 8U);
235 Pointers.clear();
236 auto Registry = Allocator->getTSDRegistry();
237 // Increase the number of TSDs to 16.
238 Registry->setOption(scudo::Option::MaxTSDsCount, 16);
239 Ready = false;
240 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
241 Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
243 std::unique_lock<std::mutex> Lock(Mutex);
244 Ready = true;
245 Cv.notify_all();
247 for (auto &T : Threads)
248 T.join();
249 // We should get 16 distinct TSDs back.
250 EXPECT_EQ(Pointers.size(), 16U);