Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / compiler-rt / lib / scudo / standalone / tests / tsd_test.cpp
blobfad8fcf900771162c5b9ceaad458099ab92e497f
1 //===-- tsd_test.cpp --------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "tests/scudo_unit_test.h"
11 #include "tsd_exclusive.h"
12 #include "tsd_shared.h"
14 #include <stdlib.h>
16 #include <condition_variable>
17 #include <mutex>
18 #include <set>
19 #include <thread>
21 // We mock out an allocator with a TSD registry, mostly using empty stubs. The
22 // cache contains a single volatile uptr, to be able to test that several
23 // concurrent threads will not access or modify the same cache at the same time.
24 template <class Config> class MockAllocator {
25 public:
26 using ThisT = MockAllocator<Config>;
27 using TSDRegistryT = typename Config::template TSDRegistryT<ThisT>;
28 using CacheT = struct MockCache {
29 volatile scudo::uptr Canary;
31 using QuarantineCacheT = struct MockQuarantine {};
33 void init() {
34 // This should only be called once by the registry.
35 EXPECT_FALSE(Initialized);
36 Initialized = true;
39 void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); }
40 void initCache(CacheT *Cache) { *Cache = {}; }
41 void commitBack(UNUSED scudo::TSD<MockAllocator> *TSD) {}
42 TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
43 void callPostInitCallback() {}
45 bool isInitialized() { return Initialized; }
47 void *operator new(size_t Size) {
48 void *P = nullptr;
49 EXPECT_EQ(0, posix_memalign(&P, alignof(ThisT), Size));
50 return P;
52 void operator delete(void *P) { free(P); }
54 private:
55 bool Initialized = false;
56 TSDRegistryT TSDRegistry;
59 struct OneCache {
60 template <class Allocator>
61 using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 1U, 1U>;
64 struct SharedCaches {
65 template <class Allocator>
66 using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 16U, 8U>;
69 struct ExclusiveCaches {
70 template <class Allocator>
71 using TSDRegistryT = scudo::TSDRegistryExT<Allocator>;
74 TEST(ScudoTSDTest, TSDRegistryInit) {
75 using AllocatorT = MockAllocator<OneCache>;
76 auto Deleter = [](AllocatorT *A) {
77 A->unmapTestOnly();
78 delete A;
80 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
81 Deleter);
82 EXPECT_FALSE(Allocator->isInitialized());
84 auto Registry = Allocator->getTSDRegistry();
85 Registry->initOnceMaybe(Allocator.get());
86 EXPECT_TRUE(Allocator->isInitialized());
89 template <class AllocatorT> static void testRegistry() {
90 auto Deleter = [](AllocatorT *A) {
91 A->unmapTestOnly();
92 delete A;
94 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
95 Deleter);
96 EXPECT_FALSE(Allocator->isInitialized());
98 auto Registry = Allocator->getTSDRegistry();
99 Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true);
100 EXPECT_TRUE(Allocator->isInitialized());
102 bool UnlockRequired;
103 auto TSD = Registry->getTSDAndLock(&UnlockRequired);
104 TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
105 EXPECT_NE(TSD, nullptr);
106 EXPECT_EQ(TSD->getCache().Canary, 0U);
107 if (UnlockRequired)
108 TSD->unlock();
110 Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
111 TSD = Registry->getTSDAndLock(&UnlockRequired);
112 TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
113 EXPECT_NE(TSD, nullptr);
114 EXPECT_EQ(TSD->getCache().Canary, 0U);
115 memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
116 if (UnlockRequired)
117 TSD->unlock();
120 TEST(ScudoTSDTest, TSDRegistryBasic) {
121 testRegistry<MockAllocator<OneCache>>();
122 testRegistry<MockAllocator<SharedCaches>>();
123 #if !SCUDO_FUCHSIA
124 testRegistry<MockAllocator<ExclusiveCaches>>();
125 #endif
128 static std::mutex Mutex;
129 static std::condition_variable Cv;
130 static bool Ready;
132 template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
133 auto Registry = Allocator->getTSDRegistry();
135 std::unique_lock<std::mutex> Lock(Mutex);
136 while (!Ready)
137 Cv.wait(Lock);
139 Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
140 bool UnlockRequired;
141 auto TSD = Registry->getTSDAndLock(&UnlockRequired);
142 TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
143 EXPECT_NE(TSD, nullptr);
144 // For an exclusive TSD, the cache should be empty. We cannot guarantee the
145 // same for a shared TSD.
146 if (!UnlockRequired)
147 EXPECT_EQ(TSD->getCache().Canary, 0U);
148 // Transform the thread id to a uptr to use it as canary.
149 const scudo::uptr Canary = static_cast<scudo::uptr>(
150 std::hash<std::thread::id>{}(std::this_thread::get_id()));
151 TSD->getCache().Canary = Canary;
152 // Loop a few times to make sure that a concurrent thread isn't modifying it.
153 for (scudo::uptr I = 0; I < 4096U; I++)
154 EXPECT_EQ(TSD->getCache().Canary, Canary);
155 if (UnlockRequired)
156 TSD->unlock();
159 template <class AllocatorT> static void testRegistryThreaded() {
160 Ready = false;
161 auto Deleter = [](AllocatorT *A) {
162 A->unmapTestOnly();
163 delete A;
165 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
166 Deleter);
167 std::thread Threads[32];
168 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
169 Threads[I] = std::thread(stressCache<AllocatorT>, Allocator.get());
171 std::unique_lock<std::mutex> Lock(Mutex);
172 Ready = true;
173 Cv.notify_all();
175 for (auto &T : Threads)
176 T.join();
179 TEST(ScudoTSDTest, TSDRegistryThreaded) {
180 testRegistryThreaded<MockAllocator<OneCache>>();
181 testRegistryThreaded<MockAllocator<SharedCaches>>();
182 #if !SCUDO_FUCHSIA
183 testRegistryThreaded<MockAllocator<ExclusiveCaches>>();
184 #endif
187 static std::set<void *> Pointers;
189 static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
190 std::set<void *> Set;
191 auto Registry = Allocator->getTSDRegistry();
193 std::unique_lock<std::mutex> Lock(Mutex);
194 while (!Ready)
195 Cv.wait(Lock);
197 Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
198 bool UnlockRequired;
199 for (scudo::uptr I = 0; I < 4096U; I++) {
200 auto TSD = Registry->getTSDAndLock(&UnlockRequired);
201 TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
202 EXPECT_NE(TSD, nullptr);
203 Set.insert(reinterpret_cast<void *>(TSD));
204 if (UnlockRequired)
205 TSD->unlock();
208 std::unique_lock<std::mutex> Lock(Mutex);
209 Pointers.insert(Set.begin(), Set.end());
213 TEST(ScudoTSDTest, TSDRegistryTSDsCount) {
214 Ready = false;
215 Pointers.clear();
216 using AllocatorT = MockAllocator<SharedCaches>;
217 auto Deleter = [](AllocatorT *A) {
218 A->unmapTestOnly();
219 delete A;
221 std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
222 Deleter);
223 // We attempt to use as many TSDs as the shared cache offers by creating a
224 // decent amount of threads that will be run concurrently and attempt to get
225 // and lock TSDs. We put them all in a set and count the number of entries
226 // after we are done.
227 std::thread Threads[32];
228 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
229 Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
231 std::unique_lock<std::mutex> Lock(Mutex);
232 Ready = true;
233 Cv.notify_all();
235 for (auto &T : Threads)
236 T.join();
237 // The initial number of TSDs we get will be the minimum of the default count
238 // and the number of CPUs.
239 EXPECT_LE(Pointers.size(), 8U);
240 Pointers.clear();
241 auto Registry = Allocator->getTSDRegistry();
242 // Increase the number of TSDs to 16.
243 Registry->setOption(scudo::Option::MaxTSDsCount, 16);
244 Ready = false;
245 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
246 Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
248 std::unique_lock<std::mutex> Lock(Mutex);
249 Ready = true;
250 Cv.notify_all();
252 for (auto &T : Threads)
253 T.join();
254 // We should get 16 distinct TSDs back.
255 EXPECT_EQ(Pointers.size(), 16U);