1 //===-- tsd_test.cpp --------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "tests/scudo_unit_test.h"
11 #include "tsd_exclusive.h"
12 #include "tsd_shared.h"
16 #include <condition_variable>
21 // We mock out an allocator with a TSD registry, mostly using empty stubs. The
22 // cache contains a single volatile uptr, to be able to test that several
23 // concurrent threads will not access or modify the same cache at the same time.
24 template <class Config
> class MockAllocator
{
26 using ThisT
= MockAllocator
<Config
>;
27 using TSDRegistryT
= typename
Config::template TSDRegistryT
<ThisT
>;
28 using CacheT
= struct MockCache
{
29 volatile scudo::uptr Canary
;
31 using QuarantineCacheT
= struct MockQuarantine
{};
34 // This should only be called once by the registry.
35 EXPECT_FALSE(Initialized
);
39 void unmapTestOnly() { TSDRegistry
.unmapTestOnly(this); }
40 void initCache(CacheT
*Cache
) { *Cache
= {}; }
41 void commitBack(UNUSED
scudo::TSD
<MockAllocator
> *TSD
) {}
42 TSDRegistryT
*getTSDRegistry() { return &TSDRegistry
; }
43 void callPostInitCallback() {}
45 bool isInitialized() { return Initialized
; }
47 void *operator new(size_t Size
) {
49 EXPECT_EQ(0, posix_memalign(&P
, alignof(ThisT
), Size
));
52 void operator delete(void *P
) { free(P
); }
55 bool Initialized
= false;
56 TSDRegistryT TSDRegistry
;
60 template <class Allocator
>
61 using TSDRegistryT
= scudo::TSDRegistrySharedT
<Allocator
, 1U, 1U>;
65 template <class Allocator
>
66 using TSDRegistryT
= scudo::TSDRegistrySharedT
<Allocator
, 16U, 8U>;
69 struct ExclusiveCaches
{
70 template <class Allocator
>
71 using TSDRegistryT
= scudo::TSDRegistryExT
<Allocator
>;
74 TEST(ScudoTSDTest
, TSDRegistryInit
) {
75 using AllocatorT
= MockAllocator
<OneCache
>;
76 auto Deleter
= [](AllocatorT
*A
) {
80 std::unique_ptr
<AllocatorT
, decltype(Deleter
)> Allocator(new AllocatorT
,
82 EXPECT_FALSE(Allocator
->isInitialized());
84 auto Registry
= Allocator
->getTSDRegistry();
85 Registry
->initOnceMaybe(Allocator
.get());
86 EXPECT_TRUE(Allocator
->isInitialized());
89 template <class AllocatorT
> static void testRegistry() {
90 auto Deleter
= [](AllocatorT
*A
) {
94 std::unique_ptr
<AllocatorT
, decltype(Deleter
)> Allocator(new AllocatorT
,
96 EXPECT_FALSE(Allocator
->isInitialized());
98 auto Registry
= Allocator
->getTSDRegistry();
99 Registry
->initThreadMaybe(Allocator
.get(), /*MinimalInit=*/true);
100 EXPECT_TRUE(Allocator
->isInitialized());
103 auto TSD
= Registry
->getTSDAndLock(&UnlockRequired
);
104 TSD
->assertLocked(/*BypassCheck=*/!UnlockRequired
);
105 EXPECT_NE(TSD
, nullptr);
106 EXPECT_EQ(TSD
->getCache().Canary
, 0U);
110 Registry
->initThreadMaybe(Allocator
.get(), /*MinimalInit=*/false);
111 TSD
= Registry
->getTSDAndLock(&UnlockRequired
);
112 TSD
->assertLocked(/*BypassCheck=*/!UnlockRequired
);
113 EXPECT_NE(TSD
, nullptr);
114 EXPECT_EQ(TSD
->getCache().Canary
, 0U);
115 memset(&TSD
->getCache(), 0x42, sizeof(TSD
->getCache()));
120 TEST(ScudoTSDTest
, TSDRegistryBasic
) {
121 testRegistry
<MockAllocator
<OneCache
>>();
122 testRegistry
<MockAllocator
<SharedCaches
>>();
124 testRegistry
<MockAllocator
<ExclusiveCaches
>>();
128 static std::mutex Mutex
;
129 static std::condition_variable Cv
;
132 template <typename AllocatorT
> static void stressCache(AllocatorT
*Allocator
) {
133 auto Registry
= Allocator
->getTSDRegistry();
135 std::unique_lock
<std::mutex
> Lock(Mutex
);
139 Registry
->initThreadMaybe(Allocator
, /*MinimalInit=*/false);
141 auto TSD
= Registry
->getTSDAndLock(&UnlockRequired
);
142 TSD
->assertLocked(/*BypassCheck=*/!UnlockRequired
);
143 EXPECT_NE(TSD
, nullptr);
144 // For an exclusive TSD, the cache should be empty. We cannot guarantee the
145 // same for a shared TSD.
147 EXPECT_EQ(TSD
->getCache().Canary
, 0U);
148 // Transform the thread id to a uptr to use it as canary.
149 const scudo::uptr Canary
= static_cast<scudo::uptr
>(
150 std::hash
<std::thread::id
>{}(std::this_thread::get_id()));
151 TSD
->getCache().Canary
= Canary
;
152 // Loop a few times to make sure that a concurrent thread isn't modifying it.
153 for (scudo::uptr I
= 0; I
< 4096U; I
++)
154 EXPECT_EQ(TSD
->getCache().Canary
, Canary
);
159 template <class AllocatorT
> static void testRegistryThreaded() {
161 auto Deleter
= [](AllocatorT
*A
) {
165 std::unique_ptr
<AllocatorT
, decltype(Deleter
)> Allocator(new AllocatorT
,
167 std::thread Threads
[32];
168 for (scudo::uptr I
= 0; I
< ARRAY_SIZE(Threads
); I
++)
169 Threads
[I
] = std::thread(stressCache
<AllocatorT
>, Allocator
.get());
171 std::unique_lock
<std::mutex
> Lock(Mutex
);
175 for (auto &T
: Threads
)
179 TEST(ScudoTSDTest
, TSDRegistryThreaded
) {
180 testRegistryThreaded
<MockAllocator
<OneCache
>>();
181 testRegistryThreaded
<MockAllocator
<SharedCaches
>>();
183 testRegistryThreaded
<MockAllocator
<ExclusiveCaches
>>();
187 static std::set
<void *> Pointers
;
189 static void stressSharedRegistry(MockAllocator
<SharedCaches
> *Allocator
) {
190 std::set
<void *> Set
;
191 auto Registry
= Allocator
->getTSDRegistry();
193 std::unique_lock
<std::mutex
> Lock(Mutex
);
197 Registry
->initThreadMaybe(Allocator
, /*MinimalInit=*/false);
199 for (scudo::uptr I
= 0; I
< 4096U; I
++) {
200 auto TSD
= Registry
->getTSDAndLock(&UnlockRequired
);
201 TSD
->assertLocked(/*BypassCheck=*/!UnlockRequired
);
202 EXPECT_NE(TSD
, nullptr);
203 Set
.insert(reinterpret_cast<void *>(TSD
));
208 std::unique_lock
<std::mutex
> Lock(Mutex
);
209 Pointers
.insert(Set
.begin(), Set
.end());
213 TEST(ScudoTSDTest
, TSDRegistryTSDsCount
) {
216 using AllocatorT
= MockAllocator
<SharedCaches
>;
217 auto Deleter
= [](AllocatorT
*A
) {
221 std::unique_ptr
<AllocatorT
, decltype(Deleter
)> Allocator(new AllocatorT
,
223 // We attempt to use as many TSDs as the shared cache offers by creating a
224 // decent amount of threads that will be run concurrently and attempt to get
225 // and lock TSDs. We put them all in a set and count the number of entries
226 // after we are done.
227 std::thread Threads
[32];
228 for (scudo::uptr I
= 0; I
< ARRAY_SIZE(Threads
); I
++)
229 Threads
[I
] = std::thread(stressSharedRegistry
, Allocator
.get());
231 std::unique_lock
<std::mutex
> Lock(Mutex
);
235 for (auto &T
: Threads
)
237 // The initial number of TSDs we get will be the minimum of the default count
238 // and the number of CPUs.
239 EXPECT_LE(Pointers
.size(), 8U);
241 auto Registry
= Allocator
->getTSDRegistry();
242 // Increase the number of TSDs to 16.
243 Registry
->setOption(scudo::Option::MaxTSDsCount
, 16);
245 for (scudo::uptr I
= 0; I
< ARRAY_SIZE(Threads
); I
++)
246 Threads
[I
] = std::thread(stressSharedRegistry
, Allocator
.get());
248 std::unique_lock
<std::mutex
> Lock(Mutex
);
252 for (auto &T
: Threads
)
254 // We should get 16 distinct TSDs back.
255 EXPECT_EQ(Pointers
.size(), 16U);