1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Part of the Sanitizer Allocator.
11 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ALLOCATOR_H
13 #error This file must be included inside sanitizer_allocator.h
16 template<class SizeClassAllocator
> struct SizeClassAllocator32LocalCache
;
18 // SizeClassAllocator32 -- allocator for 32-bit address space.
19 // This allocator can theoretically be used on 64-bit arch, but there it is less
20 // efficient than SizeClassAllocator64.
22 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
23 // be returned by MmapOrDie().
26 // a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
28 // Since the regions are aligned by kRegionSize, there are exactly
29 // kNumPossibleRegions possible regions in the address space and so we keep
30 // a ByteMap possible_regions to store the size classes of each Region.
31 // 0 size class means the region is not used by the allocator.
33 // One Region is used to allocate chunks of a single size class.
34 // A Region looks like this:
35 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
37 // In order to avoid false sharing the objects of this class should be
38 // chache-line aligned.
40 struct SizeClassAllocator32FlagMasks
{ // Bit masks.
42 kRandomShuffleChunks
= 1,
43 kUseSeparateSizeClassForBatch
= 2,
47 template <class Params
>
48 class SizeClassAllocator32
{
50 static const u64 kTwoLevelByteMapSize1
=
51 (Params::kSpaceSize
>> Params::kRegionSizeLog
) >> 12;
52 static const u64 kMinFirstMapSizeTwoLevelByteMap
= 4;
55 using AddressSpaceView
= typename
Params::AddressSpaceView
;
56 static const uptr kSpaceBeg
= Params::kSpaceBeg
;
57 static const u64 kSpaceSize
= Params::kSpaceSize
;
58 static const uptr kMetadataSize
= Params::kMetadataSize
;
59 typedef typename
Params::SizeClassMap SizeClassMap
;
60 static const uptr kRegionSizeLog
= Params::kRegionSizeLog
;
61 typedef typename
Params::MapUnmapCallback MapUnmapCallback
;
62 using ByteMap
= typename conditional
<
63 (kTwoLevelByteMapSize1
< kMinFirstMapSizeTwoLevelByteMap
),
64 FlatByteMap
<(Params::kSpaceSize
>> Params::kRegionSizeLog
),
66 TwoLevelByteMap
<kTwoLevelByteMapSize1
, 1 << 12, AddressSpaceView
>>::type
;
68 COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES
||
69 (kSpaceSize
& (kSpaceSize
- 1)) == 0);
71 static const bool kRandomShuffleChunks
= Params::kFlags
&
72 SizeClassAllocator32FlagMasks::kRandomShuffleChunks
;
73 static const bool kUseSeparateSizeClassForBatch
= Params::kFlags
&
74 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch
;
76 struct TransferBatch
{
77 static const uptr kMaxNumCached
= SizeClassMap::kMaxNumCachedHint
- 2;
78 void SetFromArray(void *batch
[], uptr count
) {
79 DCHECK_LE(count
, kMaxNumCached
);
81 for (uptr i
= 0; i
< count
; i
++)
84 uptr
Count() const { return count_
; }
85 void Clear() { count_
= 0; }
87 batch_
[count_
++] = ptr
;
88 DCHECK_LE(count_
, kMaxNumCached
);
90 void CopyToArray(void *to_batch
[]) const {
91 for (uptr i
= 0, n
= Count(); i
< n
; i
++)
92 to_batch
[i
] = batch_
[i
];
95 // How much memory do we need for a batch containing n elements.
96 static uptr
AllocationSizeRequiredForNElements(uptr n
) {
97 return sizeof(uptr
) * 2 + sizeof(void *) * n
;
99 static uptr
MaxCached(uptr size
) {
100 return Min(kMaxNumCached
, SizeClassMap::MaxCachedHint(size
));
107 void *batch_
[kMaxNumCached
];
110 static const uptr kBatchSize
= sizeof(TransferBatch
);
111 COMPILER_CHECK((kBatchSize
& (kBatchSize
- 1)) == 0);
112 COMPILER_CHECK(kBatchSize
== SizeClassMap::kMaxNumCachedHint
* sizeof(uptr
));
114 static uptr
ClassIdToSize(uptr class_id
) {
115 return (class_id
== SizeClassMap::kBatchClassID
) ?
116 kBatchSize
: SizeClassMap::Size(class_id
);
119 typedef SizeClassAllocator32
<Params
> ThisT
;
120 typedef SizeClassAllocator32LocalCache
<ThisT
> AllocatorCache
;
122 void Init(s32 release_to_os_interval_ms
, uptr heap_start
= 0) {
124 possible_regions
.Init();
125 internal_memset(size_class_info_array
, 0, sizeof(size_class_info_array
));
128 s32
ReleaseToOSIntervalMs() const {
129 return kReleaseToOSIntervalNever
;
132 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms
) {
133 // This is empty here. Currently only implemented in 64-bit allocator.
136 void ForceReleaseToOS() {
137 // Currently implemented in 64-bit allocator only.
140 void *MapWithCallback(uptr size
) {
141 void *res
= MmapOrDie(size
, PrimaryAllocatorName
);
142 MapUnmapCallback().OnMap((uptr
)res
, size
);
146 void UnmapWithCallback(uptr beg
, uptr size
) {
147 MapUnmapCallback().OnUnmap(beg
, size
);
148 UnmapOrDie(reinterpret_cast<void *>(beg
), size
);
151 static bool CanAllocate(uptr size
, uptr alignment
) {
152 return size
<= SizeClassMap::kMaxSize
&&
153 alignment
<= SizeClassMap::kMaxSize
;
156 void *GetMetaData(const void *p
) {
157 CHECK(kMetadataSize
);
158 CHECK(PointerIsMine(p
));
159 uptr mem
= reinterpret_cast<uptr
>(p
);
160 uptr beg
= ComputeRegionBeg(mem
);
161 uptr size
= ClassIdToSize(GetSizeClass(p
));
162 u32 offset
= mem
- beg
;
163 uptr n
= offset
/ (u32
)size
; // 32-bit division
164 uptr meta
= (beg
+ kRegionSize
) - (n
+ 1) * kMetadataSize
;
165 return reinterpret_cast<void*>(meta
);
168 NOINLINE TransferBatch
*AllocateBatch(AllocatorStats
*stat
, AllocatorCache
*c
,
170 DCHECK_LT(class_id
, kNumClasses
);
171 SizeClassInfo
*sci
= GetSizeClassInfo(class_id
);
172 SpinMutexLock
l(&sci
->mutex
);
173 if (sci
->free_list
.empty()) {
174 if (UNLIKELY(!PopulateFreeList(stat
, c
, sci
, class_id
)))
176 DCHECK(!sci
->free_list
.empty());
178 TransferBatch
*b
= sci
->free_list
.front();
179 sci
->free_list
.pop_front();
183 NOINLINE
void DeallocateBatch(AllocatorStats
*stat
, uptr class_id
,
185 DCHECK_LT(class_id
, kNumClasses
);
186 CHECK_GT(b
->Count(), 0);
187 SizeClassInfo
*sci
= GetSizeClassInfo(class_id
);
188 SpinMutexLock
l(&sci
->mutex
);
189 sci
->free_list
.push_front(b
);
192 bool PointerIsMine(const void *p
) const {
193 uptr mem
= reinterpret_cast<uptr
>(p
);
194 if (SANITIZER_SIGN_EXTENDED_ADDRESSES
)
195 mem
&= (kSpaceSize
- 1);
196 if (mem
< kSpaceBeg
|| mem
>= kSpaceBeg
+ kSpaceSize
)
198 return GetSizeClass(p
) != 0;
201 uptr
GetSizeClass(const void *p
) const {
202 uptr id
= ComputeRegionId(reinterpret_cast<uptr
>(p
));
203 return possible_regions
.contains(id
) ? possible_regions
[id
] : 0;
206 void *GetBlockBegin(const void *p
) {
207 CHECK(PointerIsMine(p
));
208 uptr mem
= reinterpret_cast<uptr
>(p
);
209 uptr beg
= ComputeRegionBeg(mem
);
210 uptr size
= ClassIdToSize(GetSizeClass(p
));
211 u32 offset
= mem
- beg
;
212 u32 n
= offset
/ (u32
)size
; // 32-bit division
213 uptr res
= beg
+ (n
* (u32
)size
);
214 return reinterpret_cast<void*>(res
);
217 uptr
GetActuallyAllocatedSize(void *p
) {
218 CHECK(PointerIsMine(p
));
219 return ClassIdToSize(GetSizeClass(p
));
222 static uptr
ClassID(uptr size
) { return SizeClassMap::ClassID(size
); }
224 uptr
TotalMemoryUsed() {
225 // No need to lock here.
227 for (uptr i
= 0; i
< kNumPossibleRegions
; i
++)
228 if (possible_regions
[i
])
233 void TestOnlyUnmap() {
234 for (uptr i
= 0; i
< kNumPossibleRegions
; i
++)
235 if (possible_regions
[i
])
236 UnmapWithCallback((i
* kRegionSize
), kRegionSize
);
239 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
240 // introspection API.
241 void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
242 for (uptr i
= 0; i
< kNumClasses
; i
++) {
243 GetSizeClassInfo(i
)->mutex
.Lock();
247 void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
248 for (int i
= kNumClasses
- 1; i
>= 0; i
--) {
249 GetSizeClassInfo(i
)->mutex
.Unlock();
253 // Iterate over all existing chunks.
254 // The allocator must be locked when calling this function.
255 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) const {
256 for (uptr region
= 0; region
< kNumPossibleRegions
; region
++)
257 if (possible_regions
.contains(region
) && possible_regions
[region
]) {
258 uptr chunk_size
= ClassIdToSize(possible_regions
[region
]);
259 uptr max_chunks_in_region
= kRegionSize
/ (chunk_size
+ kMetadataSize
);
260 uptr region_beg
= region
* kRegionSize
;
261 for (uptr chunk
= region_beg
;
262 chunk
< region_beg
+ max_chunks_in_region
* chunk_size
;
263 chunk
+= chunk_size
) {
264 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
265 callback(chunk
, arg
);
272 static uptr
AdditionalSize() { return 0; }
274 typedef SizeClassMap SizeClassMapT
;
275 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
278 static const uptr kRegionSize
= 1 << kRegionSizeLog
;
279 static const uptr kNumPossibleRegions
= kSpaceSize
/ kRegionSize
;
281 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE
) SizeClassInfo
{
282 StaticSpinMutex mutex
;
283 IntrusiveList
<TransferBatch
> free_list
;
286 COMPILER_CHECK(sizeof(SizeClassInfo
) % kCacheLineSize
== 0);
288 uptr
ComputeRegionId(uptr mem
) const {
289 if (SANITIZER_SIGN_EXTENDED_ADDRESSES
)
290 mem
&= (kSpaceSize
- 1);
291 const uptr res
= mem
>> kRegionSizeLog
;
292 CHECK_LT(res
, kNumPossibleRegions
);
296 uptr
ComputeRegionBeg(uptr mem
) const { return mem
& ~(kRegionSize
- 1); }
298 uptr
AllocateRegion(AllocatorStats
*stat
, uptr class_id
) {
299 DCHECK_LT(class_id
, kNumClasses
);
300 const uptr res
= reinterpret_cast<uptr
>(MmapAlignedOrDieOnFatalError(
301 kRegionSize
, kRegionSize
, PrimaryAllocatorName
));
304 MapUnmapCallback().OnMap(res
, kRegionSize
);
305 stat
->Add(AllocatorStatMapped
, kRegionSize
);
306 CHECK(IsAligned(res
, kRegionSize
));
307 possible_regions
[ComputeRegionId(res
)] = class_id
;
311 SizeClassInfo
*GetSizeClassInfo(uptr class_id
) {
312 DCHECK_LT(class_id
, kNumClasses
);
313 return &size_class_info_array
[class_id
];
316 bool PopulateBatches(AllocatorCache
*c
, SizeClassInfo
*sci
, uptr class_id
,
317 TransferBatch
**current_batch
, uptr max_count
,
318 uptr
*pointers_array
, uptr count
) {
319 // If using a separate class for batches, we do not need to shuffle it.
320 if (kRandomShuffleChunks
&& (!kUseSeparateSizeClassForBatch
||
321 class_id
!= SizeClassMap::kBatchClassID
))
322 RandomShuffle(pointers_array
, count
, &sci
->rand_state
);
323 TransferBatch
*b
= *current_batch
;
324 for (uptr i
= 0; i
< count
; i
++) {
326 b
= c
->CreateBatch(class_id
, this, (TransferBatch
*)pointers_array
[i
]);
331 b
->Add((void*)pointers_array
[i
]);
332 if (b
->Count() == max_count
) {
333 sci
->free_list
.push_back(b
);
341 bool PopulateFreeList(AllocatorStats
*stat
, AllocatorCache
*c
,
342 SizeClassInfo
*sci
, uptr class_id
) {
343 const uptr region
= AllocateRegion(stat
, class_id
);
344 if (UNLIKELY(!region
))
346 if (kRandomShuffleChunks
)
347 if (UNLIKELY(sci
->rand_state
== 0))
348 // The random state is initialized from ASLR (PIE) and time.
349 sci
->rand_state
= reinterpret_cast<uptr
>(sci
) ^ NanoTime();
350 const uptr size
= ClassIdToSize(class_id
);
351 const uptr n_chunks
= kRegionSize
/ (size
+ kMetadataSize
);
352 const uptr max_count
= TransferBatch::MaxCached(size
);
353 DCHECK_GT(max_count
, 0);
354 TransferBatch
*b
= nullptr;
355 constexpr uptr kShuffleArraySize
= 48;
356 UNINITIALIZED uptr shuffle_array
[kShuffleArraySize
];
358 for (uptr i
= region
; i
< region
+ n_chunks
* size
; i
+= size
) {
359 shuffle_array
[count
++] = i
;
360 if (count
== kShuffleArraySize
) {
361 if (UNLIKELY(!PopulateBatches(c
, sci
, class_id
, &b
, max_count
,
362 shuffle_array
, count
)))
368 if (UNLIKELY(!PopulateBatches(c
, sci
, class_id
, &b
, max_count
,
369 shuffle_array
, count
)))
373 CHECK_GT(b
->Count(), 0);
374 sci
->free_list
.push_back(b
);
379 ByteMap possible_regions
;
380 SizeClassInfo size_class_info_array
[kNumClasses
];