1 //===-- tsd_test.cpp --------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "tests/scudo_unit_test.h"
11 #include "tsd_exclusive.h"
12 #include "tsd_shared.h"
16 #include <condition_variable>
20 #include <type_traits>
22 // We mock out an allocator with a TSD registry, mostly using empty stubs. The
23 // cache contains a single volatile uptr, to be able to test that several
24 // concurrent threads will not access or modify the same cache at the same time.
25 template <class Config
> class MockAllocator
{
27 using ThisT
= MockAllocator
<Config
>;
28 using TSDRegistryT
= typename
Config::template TSDRegistryT
<ThisT
>;
29 using CacheT
= struct MockCache
{
30 volatile scudo::uptr Canary
;
32 using QuarantineCacheT
= struct MockQuarantine
{};
35 // This should only be called once by the registry.
36 EXPECT_FALSE(Initialized
);
40 void unmapTestOnly() { TSDRegistry
.unmapTestOnly(this); }
41 void initCache(CacheT
*Cache
) { *Cache
= {}; }
42 void commitBack(UNUSED
scudo::TSD
<MockAllocator
> *TSD
) {}
43 TSDRegistryT
*getTSDRegistry() { return &TSDRegistry
; }
44 void callPostInitCallback() {}
46 bool isInitialized() { return Initialized
; }
48 void *operator new(size_t Size
) {
50 EXPECT_EQ(0, posix_memalign(&P
, alignof(ThisT
), Size
));
53 void operator delete(void *P
) { free(P
); }
56 bool Initialized
= false;
57 TSDRegistryT TSDRegistry
;
61 template <class Allocator
>
62 using TSDRegistryT
= scudo::TSDRegistrySharedT
<Allocator
, 1U, 1U>;
66 template <class Allocator
>
67 using TSDRegistryT
= scudo::TSDRegistrySharedT
<Allocator
, 16U, 8U>;
70 struct ExclusiveCaches
{
71 template <class Allocator
>
72 using TSDRegistryT
= scudo::TSDRegistryExT
<Allocator
>;
75 TEST(ScudoTSDTest
, TSDRegistryInit
) {
76 using AllocatorT
= MockAllocator
<OneCache
>;
77 auto Deleter
= [](AllocatorT
*A
) {
81 std::unique_ptr
<AllocatorT
, decltype(Deleter
)> Allocator(new AllocatorT
,
83 EXPECT_FALSE(Allocator
->isInitialized());
85 auto Registry
= Allocator
->getTSDRegistry();
86 Registry
->initOnceMaybe(Allocator
.get());
87 EXPECT_TRUE(Allocator
->isInitialized());
90 template <class AllocatorT
>
91 static void testRegistry() NO_THREAD_SAFETY_ANALYSIS
{
92 auto Deleter
= [](AllocatorT
*A
) {
96 std::unique_ptr
<AllocatorT
, decltype(Deleter
)> Allocator(new AllocatorT
,
98 EXPECT_FALSE(Allocator
->isInitialized());
100 auto Registry
= Allocator
->getTSDRegistry();
101 Registry
->initThreadMaybe(Allocator
.get(), /*MinimalInit=*/true);
102 EXPECT_TRUE(Allocator
->isInitialized());
105 typename
AllocatorT::TSDRegistryT::ScopedTSD
TSD(*Registry
);
106 EXPECT_EQ(TSD
->getCache().Canary
, 0U);
109 Registry
->initThreadMaybe(Allocator
.get(), /*MinimalInit=*/false);
111 typename
AllocatorT::TSDRegistryT::ScopedTSD
TSD(*Registry
);
112 EXPECT_EQ(TSD
->getCache().Canary
, 0U);
113 memset(&TSD
->getCache(), 0x42, sizeof(TSD
->getCache()));
117 TEST(ScudoTSDTest
, TSDRegistryBasic
) {
118 testRegistry
<MockAllocator
<OneCache
>>();
119 testRegistry
<MockAllocator
<SharedCaches
>>();
121 testRegistry
<MockAllocator
<ExclusiveCaches
>>();
125 static std::mutex Mutex
;
126 static std::condition_variable Cv
;
129 // Accessing `TSD->getCache()` requires `TSD::Mutex` which isn't easy to test
130 // using thread-safety analysis. Alternatively, we verify the thread safety
131 // through a runtime check in ScopedTSD and mark the test body with
132 // NO_THREAD_SAFETY_ANALYSIS.
133 template <typename AllocatorT
>
134 static void stressCache(AllocatorT
*Allocator
) NO_THREAD_SAFETY_ANALYSIS
{
135 auto Registry
= Allocator
->getTSDRegistry();
137 std::unique_lock
<std::mutex
> Lock(Mutex
);
141 Registry
->initThreadMaybe(Allocator
, /*MinimalInit=*/false);
142 typename
AllocatorT::TSDRegistryT::ScopedTSD
TSD(*Registry
);
143 // For an exclusive TSD, the cache should be empty. We cannot guarantee the
144 // same for a shared TSD.
145 if (std::is_same
<typename
AllocatorT::TSDRegistryT
,
146 scudo::TSDRegistryExT
<AllocatorT
>>()) {
147 EXPECT_EQ(TSD
->getCache().Canary
, 0U);
149 // Transform the thread id to a uptr to use it as canary.
150 const scudo::uptr Canary
= static_cast<scudo::uptr
>(
151 std::hash
<std::thread::id
>{}(std::this_thread::get_id()));
152 TSD
->getCache().Canary
= Canary
;
153 // Loop a few times to make sure that a concurrent thread isn't modifying it.
154 for (scudo::uptr I
= 0; I
< 4096U; I
++)
155 EXPECT_EQ(TSD
->getCache().Canary
, Canary
);
158 template <class AllocatorT
> static void testRegistryThreaded() {
160 auto Deleter
= [](AllocatorT
*A
) {
164 std::unique_ptr
<AllocatorT
, decltype(Deleter
)> Allocator(new AllocatorT
,
166 std::thread Threads
[32];
167 for (scudo::uptr I
= 0; I
< ARRAY_SIZE(Threads
); I
++)
168 Threads
[I
] = std::thread(stressCache
<AllocatorT
>, Allocator
.get());
170 std::unique_lock
<std::mutex
> Lock(Mutex
);
174 for (auto &T
: Threads
)
178 TEST(ScudoTSDTest
, TSDRegistryThreaded
) {
179 testRegistryThreaded
<MockAllocator
<OneCache
>>();
180 testRegistryThreaded
<MockAllocator
<SharedCaches
>>();
182 testRegistryThreaded
<MockAllocator
<ExclusiveCaches
>>();
186 static std::set
<void *> Pointers
;
188 static void stressSharedRegistry(MockAllocator
<SharedCaches
> *Allocator
) {
189 std::set
<void *> Set
;
190 auto Registry
= Allocator
->getTSDRegistry();
192 std::unique_lock
<std::mutex
> Lock(Mutex
);
196 Registry
->initThreadMaybe(Allocator
, /*MinimalInit=*/false);
197 for (scudo::uptr I
= 0; I
< 4096U; I
++) {
198 typename MockAllocator
<SharedCaches
>::TSDRegistryT::ScopedTSD
TSD(
200 Set
.insert(reinterpret_cast<void *>(&*TSD
));
203 std::unique_lock
<std::mutex
> Lock(Mutex
);
204 Pointers
.insert(Set
.begin(), Set
.end());
208 TEST(ScudoTSDTest
, TSDRegistryTSDsCount
) {
211 using AllocatorT
= MockAllocator
<SharedCaches
>;
212 auto Deleter
= [](AllocatorT
*A
) {
216 std::unique_ptr
<AllocatorT
, decltype(Deleter
)> Allocator(new AllocatorT
,
218 // We attempt to use as many TSDs as the shared cache offers by creating a
219 // decent amount of threads that will be run concurrently and attempt to get
220 // and lock TSDs. We put them all in a set and count the number of entries
221 // after we are done.
222 std::thread Threads
[32];
223 for (scudo::uptr I
= 0; I
< ARRAY_SIZE(Threads
); I
++)
224 Threads
[I
] = std::thread(stressSharedRegistry
, Allocator
.get());
226 std::unique_lock
<std::mutex
> Lock(Mutex
);
230 for (auto &T
: Threads
)
232 // The initial number of TSDs we get will be the minimum of the default count
233 // and the number of CPUs.
234 EXPECT_LE(Pointers
.size(), 8U);
236 auto Registry
= Allocator
->getTSDRegistry();
237 // Increase the number of TSDs to 16.
238 Registry
->setOption(scudo::Option::MaxTSDsCount
, 16);
240 for (scudo::uptr I
= 0; I
< ARRAY_SIZE(Threads
); I
++)
241 Threads
[I
] = std::thread(stressSharedRegistry
, Allocator
.get());
243 std::unique_lock
<std::mutex
> Lock(Mutex
);
247 for (auto &T
: Threads
)
249 // We should get 16 distinct TSDs back.
250 EXPECT_EQ(Pointers
.size(), 16U);