1 //===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Part of the Sanitizer Allocator.
11 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ALLOCATOR_H
13 #error This file must be included inside sanitizer_allocator.h
16 template<class SizeClassAllocator
> struct SizeClassAllocator64LocalCache
;
18 // SizeClassAllocator64 -- allocator for 64-bit address space.
19 // The template parameter Params is a class containing the actual parameters.
21 // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
22 // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap.
23 // Otherwise SpaceBeg=kSpaceBeg (fixed address).
24 // kSpaceSize is a power of two.
25 // At the beginning the entire space is mprotect-ed, then small parts of it
26 // are mapped on demand.
28 // Region: a part of Space dedicated to a single size class.
29 // There are kNumClasses Regions of equal size.
31 // UserChunk: a piece of memory returned to user.
32 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
34 // FreeArray is an array free-d chunks (stored as 4-byte offsets)
36 // A Region looks like this:
37 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
39 struct SizeClassAllocator64FlagMasks
{ // Bit masks.
41 kRandomShuffleChunks
= 1,
45 template <typename Allocator
>
48 typedef typename
Allocator::CompactPtrT CompactPtrT
;
50 explicit MemoryMapper(const Allocator
&allocator
) : allocator_(allocator
) {}
52 bool GetAndResetStats(uptr
&ranges
, uptr
&bytes
) {
53 ranges
= released_ranges_count_
;
54 released_ranges_count_
= 0;
55 bytes
= released_bytes_
;
60 u64
*MapPackedCounterArrayBuffer(uptr count
) {
62 buffer_
.resize(count
);
63 return buffer_
.data();
66 // Releases [from, to) range of pages back to OS.
67 void ReleasePageRangeToOS(uptr class_id
, CompactPtrT from
, CompactPtrT to
) {
68 const uptr region_base
= allocator_
.GetRegionBeginBySizeClass(class_id
);
69 const uptr from_page
= allocator_
.CompactPtrToPointer(region_base
, from
);
70 const uptr to_page
= allocator_
.CompactPtrToPointer(region_base
, to
);
71 ReleaseMemoryPagesToOS(from_page
, to_page
);
72 released_ranges_count_
++;
73 released_bytes_
+= to_page
- from_page
;
77 const Allocator
&allocator_
;
78 uptr released_ranges_count_
= 0;
79 uptr released_bytes_
= 0;
80 InternalMmapVector
<u64
> buffer_
;
83 template <class Params
>
84 class SizeClassAllocator64
{
86 using AddressSpaceView
= typename
Params::AddressSpaceView
;
87 static const uptr kSpaceBeg
= Params::kSpaceBeg
;
88 static const uptr kSpaceSize
= Params::kSpaceSize
;
89 static const uptr kMetadataSize
= Params::kMetadataSize
;
90 typedef typename
Params::SizeClassMap SizeClassMap
;
91 typedef typename
Params::MapUnmapCallback MapUnmapCallback
;
93 static const bool kRandomShuffleChunks
=
94 Params::kFlags
& SizeClassAllocator64FlagMasks::kRandomShuffleChunks
;
96 typedef SizeClassAllocator64
<Params
> ThisT
;
97 typedef SizeClassAllocator64LocalCache
<ThisT
> AllocatorCache
;
98 typedef MemoryMapper
<ThisT
> MemoryMapperT
;
100 // When we know the size class (the region base) we can represent a pointer
101 // as a 4-byte integer (offset from the region start shifted right by 4).
102 typedef u32 CompactPtrT
;
103 static const uptr kCompactPtrScale
= 4;
104 CompactPtrT
PointerToCompactPtr(uptr base
, uptr ptr
) const {
105 return static_cast<CompactPtrT
>((ptr
- base
) >> kCompactPtrScale
);
107 uptr
CompactPtrToPointer(uptr base
, CompactPtrT ptr32
) const {
108 return base
+ (static_cast<uptr
>(ptr32
) << kCompactPtrScale
);
111 // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W
112 // at heap_start and places the heap there. This mode requires kSpaceBeg ==
114 void Init(s32 release_to_os_interval_ms
, uptr heap_start
= 0) {
115 uptr TotalSpaceSize
= kSpaceSize
+ AdditionalSize();
116 PremappedHeap
= heap_start
!= 0;
118 CHECK(!kUsingConstantSpaceBeg
);
119 NonConstSpaceBeg
= heap_start
;
120 uptr RegionInfoSize
= AdditionalSize();
122 address_range
.Init(RegionInfoSize
, PrimaryAllocatorName
);
123 CHECK_NE(RegionInfoSpace
, ~(uptr
)0);
124 CHECK_EQ(RegionInfoSpace
,
125 address_range
.MapOrDie(RegionInfoSpace
, RegionInfoSize
,
126 "SizeClassAllocator: region info"));
127 MapUnmapCallback().OnMap(RegionInfoSpace
, RegionInfoSize
);
129 if (kUsingConstantSpaceBeg
) {
130 CHECK(IsAligned(kSpaceBeg
, SizeClassMap::kMaxSize
));
132 address_range
.Init(TotalSpaceSize
, PrimaryAllocatorName
,
135 // Combined allocator expects that an 2^N allocation is always aligned
136 // to 2^N. For this to work, the start of the space needs to be aligned
137 // as high as the largest size class (which also needs to be a power of
139 NonConstSpaceBeg
= address_range
.InitAligned(
140 TotalSpaceSize
, SizeClassMap::kMaxSize
, PrimaryAllocatorName
);
141 CHECK_NE(NonConstSpaceBeg
, ~(uptr
)0);
143 RegionInfoSpace
= SpaceEnd();
144 MapWithCallbackOrDie(RegionInfoSpace
, AdditionalSize(),
145 "SizeClassAllocator: region info");
147 SetReleaseToOSIntervalMs(release_to_os_interval_ms
);
148 // Check that the RegionInfo array is aligned on the CacheLine size.
149 DCHECK_EQ(RegionInfoSpace
% kCacheLineSize
, 0);
152 s32
ReleaseToOSIntervalMs() const {
153 return atomic_load(&release_to_os_interval_ms_
, memory_order_relaxed
);
156 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms
) {
157 atomic_store(&release_to_os_interval_ms_
, release_to_os_interval_ms
,
158 memory_order_relaxed
);
161 void ForceReleaseToOS() {
162 MemoryMapperT
memory_mapper(*this);
163 for (uptr class_id
= 1; class_id
< kNumClasses
; class_id
++) {
164 Lock
l(&GetRegionInfo(class_id
)->mutex
);
165 MaybeReleaseToOS(&memory_mapper
, class_id
, true /*force*/);
169 static bool CanAllocate(uptr size
, uptr alignment
) {
170 return size
<= SizeClassMap::kMaxSize
&&
171 alignment
<= SizeClassMap::kMaxSize
;
174 NOINLINE
void ReturnToAllocator(MemoryMapperT
*memory_mapper
,
175 AllocatorStats
*stat
, uptr class_id
,
176 const CompactPtrT
*chunks
, uptr n_chunks
) {
177 RegionInfo
*region
= GetRegionInfo(class_id
);
178 uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
179 CompactPtrT
*free_array
= GetFreeArray(region_beg
);
181 Lock
l(®ion
->mutex
);
182 uptr old_num_chunks
= region
->num_freed_chunks
;
183 uptr new_num_freed_chunks
= old_num_chunks
+ n_chunks
;
184 // Failure to allocate free array space while releasing memory is non
186 if (UNLIKELY(!EnsureFreeArraySpace(region
, region_beg
,
187 new_num_freed_chunks
))) {
188 Report("FATAL: Internal error: %s's allocator exhausted the free list "
189 "space for size class %zd (%zd bytes).\n", SanitizerToolName
,
190 class_id
, ClassIdToSize(class_id
));
193 for (uptr i
= 0; i
< n_chunks
; i
++)
194 free_array
[old_num_chunks
+ i
] = chunks
[i
];
195 region
->num_freed_chunks
= new_num_freed_chunks
;
196 region
->stats
.n_freed
+= n_chunks
;
198 MaybeReleaseToOS(memory_mapper
, class_id
, false /*force*/);
201 NOINLINE
bool GetFromAllocator(AllocatorStats
*stat
, uptr class_id
,
202 CompactPtrT
*chunks
, uptr n_chunks
) {
203 RegionInfo
*region
= GetRegionInfo(class_id
);
204 uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
205 CompactPtrT
*free_array
= GetFreeArray(region_beg
);
207 Lock
l(®ion
->mutex
);
208 #if SANITIZER_WINDOWS
209 /* On Windows unmapping of memory during __sanitizer_purge_allocator is
210 explicit and immediate, so unmapped regions must be explicitly mapped back
211 in when they are accessed again. */
212 if (region
->rtoi
.last_released_bytes
> 0) {
213 MmapFixedOrDie(region_beg
, region
->mapped_user
,
214 "SizeClassAllocator: region data");
215 region
->rtoi
.n_freed_at_last_release
= 0;
216 region
->rtoi
.last_released_bytes
= 0;
219 if (UNLIKELY(region
->num_freed_chunks
< n_chunks
)) {
220 if (UNLIKELY(!PopulateFreeArray(stat
, class_id
, region
,
221 n_chunks
- region
->num_freed_chunks
)))
223 CHECK_GE(region
->num_freed_chunks
, n_chunks
);
225 region
->num_freed_chunks
-= n_chunks
;
226 uptr base_idx
= region
->num_freed_chunks
;
227 for (uptr i
= 0; i
< n_chunks
; i
++)
228 chunks
[i
] = free_array
[base_idx
+ i
];
229 region
->stats
.n_allocated
+= n_chunks
;
233 bool PointerIsMine(const void *p
) const {
234 uptr P
= reinterpret_cast<uptr
>(p
);
235 if (kUsingConstantSpaceBeg
&& (kSpaceBeg
% kSpaceSize
) == 0)
236 return P
/ kSpaceSize
== kSpaceBeg
/ kSpaceSize
;
237 return P
>= SpaceBeg() && P
< SpaceEnd();
240 uptr
GetRegionBegin(const void *p
) {
241 if (kUsingConstantSpaceBeg
)
242 return reinterpret_cast<uptr
>(p
) & ~(kRegionSize
- 1);
243 uptr space_beg
= SpaceBeg();
244 return ((reinterpret_cast<uptr
>(p
) - space_beg
) & ~(kRegionSize
- 1)) +
248 uptr
GetRegionBeginBySizeClass(uptr class_id
) const {
249 return SpaceBeg() + kRegionSize
* class_id
;
252 uptr
GetSizeClass(const void *p
) {
253 if (kUsingConstantSpaceBeg
&& (kSpaceBeg
% kSpaceSize
) == 0)
254 return ((reinterpret_cast<uptr
>(p
)) / kRegionSize
) % kNumClassesRounded
;
255 return ((reinterpret_cast<uptr
>(p
) - SpaceBeg()) / kRegionSize
) %
259 void *GetBlockBegin(const void *p
) {
260 uptr class_id
= GetSizeClass(p
);
261 if (class_id
>= kNumClasses
) return nullptr;
262 uptr size
= ClassIdToSize(class_id
);
263 if (!size
) return nullptr;
264 uptr chunk_idx
= GetChunkIdx((uptr
)p
, size
);
265 uptr reg_beg
= GetRegionBegin(p
);
266 uptr beg
= chunk_idx
* size
;
267 uptr next_beg
= beg
+ size
;
268 const RegionInfo
*region
= AddressSpaceView::Load(GetRegionInfo(class_id
));
269 if (region
->mapped_user
>= next_beg
)
270 return reinterpret_cast<void*>(reg_beg
+ beg
);
274 uptr
GetActuallyAllocatedSize(void *p
) {
275 CHECK(PointerIsMine(p
));
276 return ClassIdToSize(GetSizeClass(p
));
279 static uptr
ClassID(uptr size
) { return SizeClassMap::ClassID(size
); }
281 void *GetMetaData(const void *p
) {
282 CHECK(kMetadataSize
);
283 uptr class_id
= GetSizeClass(p
);
284 uptr size
= ClassIdToSize(class_id
);
287 uptr chunk_idx
= GetChunkIdx(reinterpret_cast<uptr
>(p
), size
);
288 uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
289 return reinterpret_cast<void *>(GetMetadataEnd(region_beg
) -
290 (1 + chunk_idx
) * kMetadataSize
);
293 uptr
TotalMemoryUsed() {
295 for (uptr i
= 0; i
< kNumClasses
; i
++)
296 res
+= GetRegionInfo(i
)->allocated_user
;
301 void TestOnlyUnmap() {
302 UnmapWithCallbackOrDie((uptr
)address_range
.base(), address_range
.size());
305 static void FillMemoryProfile(uptr start
, uptr rss
, bool file
, uptr
*stats
) {
306 for (uptr class_id
= 0; class_id
< kNumClasses
; class_id
++)
307 if (stats
[class_id
] == start
)
308 stats
[class_id
] = rss
;
311 void PrintStats(uptr class_id
, uptr rss
) {
312 RegionInfo
*region
= GetRegionInfo(class_id
);
313 if (region
->mapped_user
== 0) return;
314 uptr in_use
= region
->stats
.n_allocated
- region
->stats
.n_freed
;
315 uptr avail_chunks
= region
->allocated_user
/ ClassIdToSize(class_id
);
317 "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
318 "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
319 "last released: %6lldK region: 0x%zx\n",
320 region
->exhausted
? "F" : " ", class_id
, ClassIdToSize(class_id
),
321 region
->mapped_user
>> 10, region
->stats
.n_allocated
,
322 region
->stats
.n_freed
, in_use
, region
->num_freed_chunks
, avail_chunks
,
323 rss
>> 10, region
->rtoi
.num_releases
,
324 region
->rtoi
.last_released_bytes
>> 10,
325 SpaceBeg() + kRegionSize
* class_id
);
329 uptr rss_stats
[kNumClasses
];
330 for (uptr class_id
= 0; class_id
< kNumClasses
; class_id
++)
331 rss_stats
[class_id
] = SpaceBeg() + kRegionSize
* class_id
;
332 GetMemoryProfile(FillMemoryProfile
, rss_stats
);
334 uptr total_mapped
= 0;
336 uptr n_allocated
= 0;
338 for (uptr class_id
= 1; class_id
< kNumClasses
; class_id
++) {
339 RegionInfo
*region
= GetRegionInfo(class_id
);
340 if (region
->mapped_user
!= 0) {
341 total_mapped
+= region
->mapped_user
;
342 total_rss
+= rss_stats
[class_id
];
344 n_allocated
+= region
->stats
.n_allocated
;
345 n_freed
+= region
->stats
.n_freed
;
348 Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
349 "%zd allocations; remains %zd\n", total_mapped
>> 20,
350 total_rss
>> 20, n_allocated
, n_allocated
- n_freed
);
351 for (uptr class_id
= 1; class_id
< kNumClasses
; class_id
++)
352 PrintStats(class_id
, rss_stats
[class_id
]);
355 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
356 // introspection API.
357 void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
358 for (uptr i
= 0; i
< kNumClasses
; i
++) {
359 GetRegionInfo(i
)->mutex
.Lock();
363 void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
364 for (int i
= (int)kNumClasses
- 1; i
>= 0; i
--) {
365 GetRegionInfo(i
)->mutex
.Unlock();
369 // Iterate over all existing chunks.
370 // The allocator must be locked when calling this function.
371 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
372 for (uptr class_id
= 1; class_id
< kNumClasses
; class_id
++) {
373 RegionInfo
*region
= GetRegionInfo(class_id
);
374 uptr chunk_size
= ClassIdToSize(class_id
);
375 uptr region_beg
= SpaceBeg() + class_id
* kRegionSize
;
376 uptr region_allocated_user_size
=
377 AddressSpaceView::Load(region
)->allocated_user
;
378 for (uptr chunk
= region_beg
;
379 chunk
< region_beg
+ region_allocated_user_size
;
380 chunk
+= chunk_size
) {
381 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
382 callback(chunk
, arg
);
387 static uptr
ClassIdToSize(uptr class_id
) {
388 return SizeClassMap::Size(class_id
);
391 static uptr
AdditionalSize() {
392 return RoundUpTo(sizeof(RegionInfo
) * kNumClassesRounded
,
393 GetPageSizeCached());
396 typedef SizeClassMap SizeClassMapT
;
397 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
398 static const uptr kNumClassesRounded
= SizeClassMap::kNumClassesRounded
;
400 // A packed array of counters. Each counter occupies 2^n bits, enough to store
401 // counter's max_value. Ctor will try to allocate the required buffer via
402 // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
403 // whether the initialization was successful by checking IsAllocated() result.
404 // For the performance sake, none of the accessors check the validity of the
405 // arguments, it is assumed that index is always in [0, n) range and the value
406 // is not incremented past max_value.
407 class PackedCounterArray
{
409 template <typename MemoryMapper
>
410 PackedCounterArray(u64 num_counters
, u64 max_value
, MemoryMapper
*mapper
)
412 CHECK_GT(num_counters
, 0);
413 CHECK_GT(max_value
, 0);
414 constexpr u64 kMaxCounterBits
= sizeof(*buffer
) * 8ULL;
415 // Rounding counter storage size up to the power of two allows for using
416 // bit shifts calculating particular counter's index and offset.
417 uptr counter_size_bits
=
418 RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value
) + 1);
419 CHECK_LE(counter_size_bits
, kMaxCounterBits
);
420 counter_size_bits_log
= Log2(counter_size_bits
);
421 counter_mask
= ~0ULL >> (kMaxCounterBits
- counter_size_bits
);
423 uptr packing_ratio
= kMaxCounterBits
>> counter_size_bits_log
;
424 CHECK_GT(packing_ratio
, 0);
425 packing_ratio_log
= Log2(packing_ratio
);
426 bit_offset_mask
= packing_ratio
- 1;
428 buffer
= mapper
->MapPackedCounterArrayBuffer(
429 RoundUpTo(n
, 1ULL << packing_ratio_log
) >> packing_ratio_log
);
432 bool IsAllocated() const {
436 u64
GetCount() const {
440 uptr
Get(uptr i
) const {
442 uptr index
= i
>> packing_ratio_log
;
443 uptr bit_offset
= (i
& bit_offset_mask
) << counter_size_bits_log
;
444 return (buffer
[index
] >> bit_offset
) & counter_mask
;
447 void Inc(uptr i
) const {
448 DCHECK_LT(Get(i
), counter_mask
);
449 uptr index
= i
>> packing_ratio_log
;
450 uptr bit_offset
= (i
& bit_offset_mask
) << counter_size_bits_log
;
451 buffer
[index
] += 1ULL << bit_offset
;
454 void IncRange(uptr from
, uptr to
) const {
456 for (uptr i
= from
; i
<= to
; i
++)
462 u64 counter_size_bits_log
;
464 u64 packing_ratio_log
;
469 template <class MemoryMapperT
>
470 class FreePagesRangeTracker
{
472 FreePagesRangeTracker(MemoryMapperT
*mapper
, uptr class_id
)
473 : memory_mapper(mapper
),
475 page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale
)) {}
477 void NextPage(bool freed
) {
480 current_range_start_page
= current_page
;
494 void CloseOpenedRange() {
496 memory_mapper
->ReleasePageRangeToOS(
497 class_id
, current_range_start_page
<< page_size_scaled_log
,
498 current_page
<< page_size_scaled_log
);
499 in_the_range
= false;
503 MemoryMapperT
*const memory_mapper
= nullptr;
504 const uptr class_id
= 0;
505 const uptr page_size_scaled_log
= 0;
506 bool in_the_range
= false;
507 uptr current_page
= 0;
508 uptr current_range_start_page
= 0;
511 // Iterates over the free_array to identify memory pages containing freed
512 // chunks only and returns these pages back to OS.
513 // allocated_pages_count is the total number of pages allocated for the
515 template <typename MemoryMapper
>
516 static void ReleaseFreeMemoryToOS(CompactPtrT
*free_array
,
517 uptr free_array_count
, uptr chunk_size
,
518 uptr allocated_pages_count
,
519 MemoryMapper
*memory_mapper
,
521 const uptr page_size
= GetPageSizeCached();
523 // Figure out the number of chunks per page and whether we can take a fast
524 // path (the number of chunks per page is the same for all pages).
525 uptr full_pages_chunk_count_max
;
526 bool same_chunk_count_per_page
;
527 if (chunk_size
<= page_size
&& page_size
% chunk_size
== 0) {
528 // Same number of chunks per page, no cross overs.
529 full_pages_chunk_count_max
= page_size
/ chunk_size
;
530 same_chunk_count_per_page
= true;
531 } else if (chunk_size
<= page_size
&& page_size
% chunk_size
!= 0 &&
532 chunk_size
% (page_size
% chunk_size
) == 0) {
533 // Some chunks are crossing page boundaries, which means that the page
534 // contains one or two partial chunks, but all pages contain the same
536 full_pages_chunk_count_max
= page_size
/ chunk_size
+ 1;
537 same_chunk_count_per_page
= true;
538 } else if (chunk_size
<= page_size
) {
539 // Some chunks are crossing page boundaries, which means that the page
540 // contains one or two partial chunks.
541 full_pages_chunk_count_max
= page_size
/ chunk_size
+ 2;
542 same_chunk_count_per_page
= false;
543 } else if (chunk_size
> page_size
&& chunk_size
% page_size
== 0) {
544 // One chunk covers multiple pages, no cross overs.
545 full_pages_chunk_count_max
= 1;
546 same_chunk_count_per_page
= true;
547 } else if (chunk_size
> page_size
) {
548 // One chunk covers multiple pages, Some chunks are crossing page
549 // boundaries. Some pages contain one chunk, some contain two.
550 full_pages_chunk_count_max
= 2;
551 same_chunk_count_per_page
= false;
553 UNREACHABLE("All chunk_size/page_size ratios must be handled.");
556 PackedCounterArray
counters(allocated_pages_count
,
557 full_pages_chunk_count_max
, memory_mapper
);
558 if (!counters
.IsAllocated())
561 const uptr chunk_size_scaled
= chunk_size
>> kCompactPtrScale
;
562 const uptr page_size_scaled
= page_size
>> kCompactPtrScale
;
563 const uptr page_size_scaled_log
= Log2(page_size_scaled
);
565 // Iterate over free chunks and count how many free chunks affect each
567 if (chunk_size
<= page_size
&& page_size
% chunk_size
== 0) {
568 // Each chunk affects one page only.
569 for (uptr i
= 0; i
< free_array_count
; i
++)
570 counters
.Inc(free_array
[i
] >> page_size_scaled_log
);
572 // In all other cases chunks might affect more than one page.
573 for (uptr i
= 0; i
< free_array_count
; i
++) {
575 free_array
[i
] >> page_size_scaled_log
,
576 (free_array
[i
] + chunk_size_scaled
- 1) >> page_size_scaled_log
);
580 // Iterate over pages detecting ranges of pages with chunk counters equal
581 // to the expected number of chunks for the particular page.
582 FreePagesRangeTracker
<MemoryMapper
> range_tracker(memory_mapper
, class_id
);
583 if (same_chunk_count_per_page
) {
584 // Fast path, every page has the same number of chunks affecting it.
585 for (uptr i
= 0; i
< counters
.GetCount(); i
++)
586 range_tracker
.NextPage(counters
.Get(i
) == full_pages_chunk_count_max
);
588 // Show path, go through the pages keeping count how many chunks affect
591 chunk_size
< page_size
? page_size_scaled
/ chunk_size_scaled
: 1;
592 const uptr pnc
= pn
* chunk_size_scaled
;
593 // The idea is to increment the current page pointer by the first chunk
594 // size, middle portion size (the portion of the page covered by chunks
595 // except the first and the last one) and then the last chunk size, adding
596 // up the number of chunks on the current page and checking on every step
597 // whether the page boundary was crossed.
598 uptr prev_page_boundary
= 0;
599 uptr current_boundary
= 0;
600 for (uptr i
= 0; i
< counters
.GetCount(); i
++) {
601 uptr page_boundary
= prev_page_boundary
+ page_size_scaled
;
602 uptr chunks_per_page
= pn
;
603 if (current_boundary
< page_boundary
) {
604 if (current_boundary
> prev_page_boundary
)
606 current_boundary
+= pnc
;
607 if (current_boundary
< page_boundary
) {
609 current_boundary
+= chunk_size_scaled
;
612 prev_page_boundary
= page_boundary
;
614 range_tracker
.NextPage(counters
.Get(i
) == chunks_per_page
);
617 range_tracker
.Done();
621 friend class MemoryMapper
<ThisT
>;
623 ReservedAddressRange address_range
;
625 static const uptr kRegionSize
= kSpaceSize
/ kNumClassesRounded
;
626 // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
627 // In the worst case it may require kRegionSize/SizeClassMap::kMinSize
628 // elements, but in reality this will not happen. For simplicity we
629 // dedicate 1/8 of the region's virtual space to FreeArray.
630 static const uptr kFreeArraySize
= kRegionSize
/ 8;
632 static const bool kUsingConstantSpaceBeg
= kSpaceBeg
!= ~(uptr
)0;
633 uptr NonConstSpaceBeg
;
634 uptr
SpaceBeg() const {
635 return kUsingConstantSpaceBeg
? kSpaceBeg
: NonConstSpaceBeg
;
637 uptr
SpaceEnd() const { return SpaceBeg() + kSpaceSize
; }
638 // kRegionSize should be able to satisfy the largest size class.
639 static_assert(kRegionSize
>= SizeClassMap::kMaxSize
,
640 "Region size exceed largest size");
641 // kRegionSize must be <= 2^36, see CompactPtrT.
642 COMPILER_CHECK((kRegionSize
) <= (1ULL << (SANITIZER_WORDSIZE
/ 2 + 4)));
643 // Call mmap for user memory with at least this size.
644 static const uptr kUserMapSize
= 1 << 16;
645 // Call mmap for metadata memory with at least this size.
646 static const uptr kMetaMapSize
= 1 << 16;
647 // Call mmap for free array memory with at least this size.
648 static const uptr kFreeArrayMapSize
= 1 << 16;
650 atomic_sint32_t release_to_os_interval_ms_
;
652 uptr RegionInfoSpace
;
654 // True if the user has already mapped the entire heap R/W.
662 struct ReleaseToOsInfo
{
663 uptr n_freed_at_last_release
;
665 u64 last_release_at_ns
;
666 u64 last_released_bytes
;
669 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE
) RegionInfo
{
671 uptr num_freed_chunks
; // Number of elements in the freearray.
672 uptr mapped_free_array
; // Bytes mapped for freearray.
673 uptr allocated_user
; // Bytes allocated for user memory.
674 uptr allocated_meta
; // Bytes allocated for metadata.
675 uptr mapped_user
; // Bytes mapped for user memory.
676 uptr mapped_meta
; // Bytes mapped for metadata.
677 u32 rand_state
; // Seed for random shuffle, used if kRandomShuffleChunks.
678 bool exhausted
; // Whether region is out of space for new chunks.
680 ReleaseToOsInfo rtoi
;
682 COMPILER_CHECK(sizeof(RegionInfo
) % kCacheLineSize
== 0);
684 RegionInfo
*GetRegionInfo(uptr class_id
) const {
685 DCHECK_LT(class_id
, kNumClasses
);
686 RegionInfo
*regions
= reinterpret_cast<RegionInfo
*>(RegionInfoSpace
);
687 return ®ions
[class_id
];
690 uptr
GetMetadataEnd(uptr region_beg
) const {
691 return region_beg
+ kRegionSize
- kFreeArraySize
;
694 uptr
GetChunkIdx(uptr chunk
, uptr size
) const {
695 if (!kUsingConstantSpaceBeg
)
698 uptr offset
= chunk
% kRegionSize
;
699 // Here we divide by a non-constant. This is costly.
700 // size always fits into 32-bits. If the offset fits too, use 32-bit div.
701 if (offset
>> (SANITIZER_WORDSIZE
/ 2))
702 return offset
/ size
;
703 return (u32
)offset
/ (u32
)size
;
706 CompactPtrT
*GetFreeArray(uptr region_beg
) const {
707 return reinterpret_cast<CompactPtrT
*>(GetMetadataEnd(region_beg
));
710 bool MapWithCallback(uptr beg
, uptr size
, const char *name
) {
712 return beg
>= NonConstSpaceBeg
&&
713 beg
+ size
<= NonConstSpaceBeg
+ kSpaceSize
;
714 uptr mapped
= address_range
.Map(beg
, size
, name
);
715 if (UNLIKELY(!mapped
))
717 CHECK_EQ(beg
, mapped
);
718 MapUnmapCallback().OnMap(beg
, size
);
722 void MapWithCallbackOrDie(uptr beg
, uptr size
, const char *name
) {
724 CHECK_GE(beg
, NonConstSpaceBeg
);
725 CHECK_LE(beg
+ size
, NonConstSpaceBeg
+ kSpaceSize
);
728 CHECK_EQ(beg
, address_range
.MapOrDie(beg
, size
, name
));
729 MapUnmapCallback().OnMap(beg
, size
);
732 void UnmapWithCallbackOrDie(uptr beg
, uptr size
) {
735 MapUnmapCallback().OnUnmap(beg
, size
);
736 address_range
.Unmap(beg
, size
);
739 bool EnsureFreeArraySpace(RegionInfo
*region
, uptr region_beg
,
740 uptr num_freed_chunks
) {
741 uptr needed_space
= num_freed_chunks
* sizeof(CompactPtrT
);
742 if (region
->mapped_free_array
< needed_space
) {
743 uptr new_mapped_free_array
= RoundUpTo(needed_space
, kFreeArrayMapSize
);
744 CHECK_LE(new_mapped_free_array
, kFreeArraySize
);
745 uptr current_map_end
= reinterpret_cast<uptr
>(GetFreeArray(region_beg
)) +
746 region
->mapped_free_array
;
747 uptr new_map_size
= new_mapped_free_array
- region
->mapped_free_array
;
748 if (UNLIKELY(!MapWithCallback(current_map_end
, new_map_size
,
749 "SizeClassAllocator: freearray")))
751 region
->mapped_free_array
= new_mapped_free_array
;
756 // Check whether this size class is exhausted.
757 bool IsRegionExhausted(RegionInfo
*region
, uptr class_id
,
758 uptr additional_map_size
) {
759 if (LIKELY(region
->mapped_user
+ region
->mapped_meta
+
760 additional_map_size
<= kRegionSize
- kFreeArraySize
))
762 if (!region
->exhausted
) {
763 region
->exhausted
= true;
764 Printf("%s: Out of memory. ", SanitizerToolName
);
765 Printf("The process has exhausted %zuMB for size class %zu.\n",
766 kRegionSize
>> 20, ClassIdToSize(class_id
));
771 NOINLINE
bool PopulateFreeArray(AllocatorStats
*stat
, uptr class_id
,
772 RegionInfo
*region
, uptr requested_count
) {
773 // region->mutex is held.
774 const uptr region_beg
= GetRegionBeginBySizeClass(class_id
);
775 const uptr size
= ClassIdToSize(class_id
);
777 const uptr total_user_bytes
=
778 region
->allocated_user
+ requested_count
* size
;
779 // Map more space for chunks, if necessary.
780 if (LIKELY(total_user_bytes
> region
->mapped_user
)) {
781 if (UNLIKELY(region
->mapped_user
== 0)) {
782 if (!kUsingConstantSpaceBeg
&& kRandomShuffleChunks
)
783 // The random state is initialized from ASLR.
784 region
->rand_state
= static_cast<u32
>(region_beg
>> 12);
785 // Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
786 // preventing just allocated memory from being released sooner than
787 // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
788 // for short lived processes.
789 // Do it only when the feature is turned on, to avoid a potentially
790 // extraneous syscall.
791 if (ReleaseToOSIntervalMs() >= 0)
792 region
->rtoi
.last_release_at_ns
= MonotonicNanoTime();
794 // Do the mmap for the user memory.
795 const uptr user_map_size
=
796 RoundUpTo(total_user_bytes
- region
->mapped_user
, kUserMapSize
);
797 if (UNLIKELY(IsRegionExhausted(region
, class_id
, user_map_size
)))
799 if (UNLIKELY(!MapWithCallback(region_beg
+ region
->mapped_user
,
801 "SizeClassAllocator: region data")))
803 stat
->Add(AllocatorStatMapped
, user_map_size
);
804 region
->mapped_user
+= user_map_size
;
806 const uptr new_chunks_count
=
807 (region
->mapped_user
- region
->allocated_user
) / size
;
810 // Calculate the required space for metadata.
811 const uptr total_meta_bytes
=
812 region
->allocated_meta
+ new_chunks_count
* kMetadataSize
;
813 const uptr meta_map_size
= (total_meta_bytes
> region
->mapped_meta
) ?
814 RoundUpTo(total_meta_bytes
- region
->mapped_meta
, kMetaMapSize
) : 0;
815 // Map more space for metadata, if necessary.
817 if (UNLIKELY(IsRegionExhausted(region
, class_id
, meta_map_size
)))
819 if (UNLIKELY(!MapWithCallback(
820 GetMetadataEnd(region_beg
) - region
->mapped_meta
- meta_map_size
,
821 meta_map_size
, "SizeClassAllocator: region metadata")))
823 region
->mapped_meta
+= meta_map_size
;
827 // If necessary, allocate more space for the free array and populate it with
828 // newly allocated chunks.
829 const uptr total_freed_chunks
= region
->num_freed_chunks
+ new_chunks_count
;
830 if (UNLIKELY(!EnsureFreeArraySpace(region
, region_beg
, total_freed_chunks
)))
832 CompactPtrT
*free_array
= GetFreeArray(region_beg
);
833 for (uptr i
= 0, chunk
= region
->allocated_user
; i
< new_chunks_count
;
835 free_array
[total_freed_chunks
- 1 - i
] = PointerToCompactPtr(0, chunk
);
836 if (kRandomShuffleChunks
)
837 RandomShuffle(&free_array
[region
->num_freed_chunks
], new_chunks_count
,
838 ®ion
->rand_state
);
840 // All necessary memory is mapped and now it is safe to advance all
841 // 'allocated_*' counters.
842 region
->num_freed_chunks
+= new_chunks_count
;
843 region
->allocated_user
+= new_chunks_count
* size
;
844 CHECK_LE(region
->allocated_user
, region
->mapped_user
);
845 region
->allocated_meta
+= new_chunks_count
* kMetadataSize
;
846 CHECK_LE(region
->allocated_meta
, region
->mapped_meta
);
847 region
->exhausted
= false;
849 // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
850 // MaybeReleaseToOS from releasing just allocated pages or protect these
851 // not yet used chunks some other way.
856 // Attempts to release RAM occupied by freed chunks back to OS. The region is
857 // expected to be locked.
859 // TODO(morehouse): Support a callback on memory release so HWASan can release
861 void MaybeReleaseToOS(MemoryMapperT
*memory_mapper
, uptr class_id
,
863 RegionInfo
*region
= GetRegionInfo(class_id
);
864 const uptr chunk_size
= ClassIdToSize(class_id
);
865 const uptr page_size
= GetPageSizeCached();
867 uptr n
= region
->num_freed_chunks
;
868 if (n
* chunk_size
< page_size
)
869 return; // No chance to release anything.
870 if ((region
->stats
.n_freed
-
871 region
->rtoi
.n_freed_at_last_release
) * chunk_size
< page_size
) {
872 return; // Nothing new to release.
876 s32 interval_ms
= ReleaseToOSIntervalMs();
880 if (region
->rtoi
.last_release_at_ns
+ interval_ms
* 1000000ULL >
881 MonotonicNanoTime()) {
882 return; // Memory was returned recently.
886 ReleaseFreeMemoryToOS(
887 GetFreeArray(GetRegionBeginBySizeClass(class_id
)), n
, chunk_size
,
888 RoundUpTo(region
->allocated_user
, page_size
) / page_size
, memory_mapper
,
892 if (memory_mapper
->GetAndResetStats(ranges
, bytes
)) {
893 region
->rtoi
.n_freed_at_last_release
= region
->stats
.n_freed
;
894 region
->rtoi
.num_releases
+= ranges
;
895 region
->rtoi
.last_released_bytes
= bytes
;
897 region
->rtoi
.last_release_at_ns
= MonotonicNanoTime();