1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
20 #include "string_utils.h"
21 #include "thread_annotations.h"
25 // This allocator wraps the platform allocation primitives, and as such is on
26 // the slower side and should preferably be used for larger sized allocations.
27 // Blocks allocated will be preceded and followed by a guard page, and hold
28 // their own header that is not checksummed: the guard pages and the Combined
29 // header should be enough for our purpose.
31 namespace LargeBlock
{
33 struct alignas(Max
<uptr
>(archSupportsMemoryTagging()
34 ? archMemoryTagGranuleSize()
36 1U << SCUDO_MIN_ALIGNMENT_LOG
)) Header
{
37 LargeBlock::Header
*Prev
;
38 LargeBlock::Header
*Next
;
44 static_assert(sizeof(Header
) % (1U << SCUDO_MIN_ALIGNMENT_LOG
) == 0, "");
45 static_assert(!archSupportsMemoryTagging() ||
46 sizeof(Header
) % archMemoryTagGranuleSize() == 0,
49 constexpr uptr
getHeaderSize() { return sizeof(Header
); }
51 template <typename Config
> static uptr
addHeaderTag(uptr Ptr
) {
52 if (allocatorSupportsMemoryTagging
<Config
>())
53 return addFixedTag(Ptr
, 1);
57 template <typename Config
> static Header
*getHeader(uptr Ptr
) {
58 return reinterpret_cast<Header
*>(addHeaderTag
<Config
>(Ptr
)) - 1;
61 template <typename Config
> static Header
*getHeader(const void *Ptr
) {
62 return getHeader
<Config
>(reinterpret_cast<uptr
>(Ptr
));
65 } // namespace LargeBlock
67 static inline void unmap(LargeBlock::Header
*H
) {
68 // Note that the `H->MapMap` is stored on the pages managed by itself. Take
69 // over the ownership before unmap() so that any operation along with unmap()
70 // won't touch inaccessible pages.
71 MemMapT MemMap
= H
->MemMap
;
72 MemMap
.unmap(MemMap
.getBase(), MemMap
.getCapacity());
83 bool isValid() { return CommitBase
!= 0; }
85 void invalidate() { CommitBase
= 0; }
89 template <typename Config
> class MapAllocatorNoCache
{
91 void init(UNUSED s32 ReleaseToOsInterval
) {}
92 bool retrieve(UNUSED Options Options
, UNUSED uptr Size
, UNUSED uptr Alignment
,
93 UNUSED uptr HeadersSize
, UNUSED
LargeBlock::Header
**H
,
94 UNUSED
bool *Zeroed
) {
97 void store(UNUSED Options Options
, LargeBlock::Header
*H
) { unmap(H
); }
98 bool canCache(UNUSED uptr Size
) { return false; }
101 void releaseToOS() {}
102 void disableMemoryTagging() {}
103 void unmapTestOnly() {}
104 bool setOption(Option O
, UNUSED sptr Value
) {
105 if (O
== Option::ReleaseInterval
|| O
== Option::MaxCacheEntriesCount
||
106 O
== Option::MaxCacheEntrySize
)
108 // Not supported by the Secondary Cache, but not an error either.
112 void getStats(UNUSED ScopedString
*Str
) {
113 Str
->append("Secondary Cache Disabled\n");
117 static const uptr MaxUnusedCachePages
= 4U;
119 template <typename Config
>
120 bool mapSecondary(const Options
&Options
, uptr CommitBase
, uptr CommitSize
,
121 uptr AllocPos
, uptr Flags
, MemMapT
&MemMap
) {
122 Flags
|= MAP_RESIZABLE
;
123 Flags
|= MAP_ALLOWNOMEM
;
125 const uptr MaxUnusedCacheBytes
= MaxUnusedCachePages
* getPageSizeCached();
126 if (useMemoryTagging
<Config
>(Options
) && CommitSize
> MaxUnusedCacheBytes
) {
127 const uptr UntaggedPos
= Max(AllocPos
, CommitBase
+ MaxUnusedCacheBytes
);
128 return MemMap
.remap(CommitBase
, UntaggedPos
- CommitBase
, "scudo:secondary",
129 MAP_MEMTAG
| Flags
) &&
130 MemMap
.remap(UntaggedPos
, CommitBase
+ CommitSize
- UntaggedPos
,
131 "scudo:secondary", Flags
);
133 const uptr RemapFlags
=
134 (useMemoryTagging
<Config
>(Options
) ? MAP_MEMTAG
: 0) | Flags
;
135 return MemMap
.remap(CommitBase
, CommitSize
, "scudo:secondary", RemapFlags
);
139 // Template specialization to avoid producing zero-length array
140 template <typename T
, size_t Size
> class NonZeroLengthArray
{
142 T
&operator[](uptr Idx
) { return values
[Idx
]; }
147 template <typename T
> class NonZeroLengthArray
<T
, 0> {
149 T
&operator[](uptr UNUSED Idx
) { UNREACHABLE("Unsupported!"); }
152 template <typename Config
> class MapAllocatorCache
{
154 using CacheConfig
= typename
Config::Secondary::Cache
;
156 void getStats(ScopedString
*Str
) {
160 computePercentage(SuccessfulRetrieves
, CallsToRetrieve
, &Integral
,
162 Str
->append("Stats: MapAllocatorCache: EntriesCount: %d, "
163 "MaxEntriesCount: %u, MaxEntrySize: %zu\n",
164 EntriesCount
, atomic_load_relaxed(&MaxEntriesCount
),
165 atomic_load_relaxed(&MaxEntrySize
));
166 Str
->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
168 SuccessfulRetrieves
, CallsToRetrieve
, Integral
, Fractional
);
169 for (CachedBlock Entry
: Entries
) {
170 if (!Entry
.isValid())
172 Str
->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
173 "BlockSize: %zu %s\n",
174 Entry
.CommitBase
, Entry
.CommitBase
+ Entry
.CommitSize
,
175 Entry
.CommitSize
, Entry
.Time
== 0 ? "[R]" : "");
179 // Ensure the default maximum specified fits the array.
180 static_assert(CacheConfig::DefaultMaxEntriesCount
<=
181 CacheConfig::EntriesArraySize
,
184 void init(s32 ReleaseToOsInterval
) NO_THREAD_SAFETY_ANALYSIS
{
185 DCHECK_EQ(EntriesCount
, 0U);
186 setOption(Option::MaxCacheEntriesCount
,
187 static_cast<sptr
>(CacheConfig::DefaultMaxEntriesCount
));
188 setOption(Option::MaxCacheEntrySize
,
189 static_cast<sptr
>(CacheConfig::DefaultMaxEntrySize
));
190 setOption(Option::ReleaseInterval
, static_cast<sptr
>(ReleaseToOsInterval
));
193 void store(const Options
&Options
, LargeBlock::Header
*H
) EXCLUDES(Mutex
) {
194 if (!canCache(H
->CommitSize
))
197 bool EntryCached
= false;
198 bool EmptyCache
= false;
199 const s32 Interval
= atomic_load_relaxed(&ReleaseToOsIntervalMs
);
200 const u64 Time
= getMonotonicTimeFast();
201 const u32 MaxCount
= atomic_load_relaxed(&MaxEntriesCount
);
203 Entry
.CommitBase
= H
->CommitBase
;
204 Entry
.CommitSize
= H
->CommitSize
;
205 Entry
.BlockBegin
= reinterpret_cast<uptr
>(H
+ 1);
206 Entry
.MemMap
= H
->MemMap
;
208 if (useMemoryTagging
<Config
>(Options
)) {
209 if (Interval
== 0 && !SCUDO_FUCHSIA
) {
210 // Release the memory and make it inaccessible at the same time by
211 // creating a new MAP_NOACCESS mapping on top of the existing mapping.
212 // Fuchsia does not support replacing mappings by creating a new mapping
213 // on top so we just do the two syscalls there.
215 mapSecondary
<Config
>(Options
, Entry
.CommitBase
, Entry
.CommitSize
,
216 Entry
.CommitBase
, MAP_NOACCESS
, Entry
.MemMap
);
218 Entry
.MemMap
.setMemoryPermission(Entry
.CommitBase
, Entry
.CommitSize
,
221 } else if (Interval
== 0) {
222 Entry
.MemMap
.releaseAndZeroPagesToOS(Entry
.CommitBase
, Entry
.CommitSize
);
227 if (useMemoryTagging
<Config
>(Options
) && QuarantinePos
== -1U) {
228 // If we get here then memory tagging was disabled in between when we
229 // read Options and when we locked Mutex. We can't insert our entry into
230 // the quarantine or the cache because the permissions would be wrong so
234 if (CacheConfig::QuarantineSize
&& useMemoryTagging
<Config
>(Options
)) {
236 (QuarantinePos
+ 1) % Max(CacheConfig::QuarantineSize
, 1u);
237 if (!Quarantine
[QuarantinePos
].isValid()) {
238 Quarantine
[QuarantinePos
] = Entry
;
241 CachedBlock PrevEntry
= Quarantine
[QuarantinePos
];
242 Quarantine
[QuarantinePos
] = Entry
;
244 OldestTime
= Entry
.Time
;
247 if (EntriesCount
>= MaxCount
) {
248 if (IsFullEvents
++ == 4U)
251 for (u32 I
= 0; I
< MaxCount
; I
++) {
252 if (Entries
[I
].isValid())
255 Entries
[I
] = Entries
[0];
259 OldestTime
= Entry
.Time
;
267 else if (Interval
>= 0)
268 releaseOlderThan(Time
- static_cast<u64
>(Interval
) * 1000000);
270 Entry
.MemMap
.unmap(Entry
.MemMap
.getBase(), Entry
.MemMap
.getCapacity());
273 bool retrieve(Options Options
, uptr Size
, uptr Alignment
, uptr HeadersSize
,
274 LargeBlock::Header
**H
, bool *Zeroed
) EXCLUDES(Mutex
) {
275 const uptr PageSize
= getPageSizeCached();
276 const u32 MaxCount
= atomic_load_relaxed(&MaxEntriesCount
);
277 // 10% of the requested size proved to be the optimal choice for
278 // retrieving cached blocks after testing several options.
279 constexpr u32 FragmentedBytesDivisor
= 10;
282 uptr EntryHeaderPos
= 0;
286 if (EntriesCount
== 0)
288 u32 OptimalFitIndex
= 0;
289 uptr MinDiff
= UINTPTR_MAX
;
290 for (u32 I
= 0; I
< MaxCount
; I
++) {
291 if (!Entries
[I
].isValid())
293 const uptr CommitBase
= Entries
[I
].CommitBase
;
294 const uptr CommitSize
= Entries
[I
].CommitSize
;
295 const uptr AllocPos
=
296 roundDown(CommitBase
+ CommitSize
- Size
, Alignment
);
297 const uptr HeaderPos
= AllocPos
- HeadersSize
;
298 if (HeaderPos
> CommitBase
+ CommitSize
)
300 if (HeaderPos
< CommitBase
||
301 AllocPos
> CommitBase
+ PageSize
* MaxUnusedCachePages
) {
305 const uptr Diff
= HeaderPos
- CommitBase
;
306 // immediately use a cached block if it's size is close enough to the
308 const uptr MaxAllowedFragmentedBytes
=
309 (CommitBase
+ CommitSize
- HeaderPos
) / FragmentedBytesDivisor
;
310 if (Diff
<= MaxAllowedFragmentedBytes
) {
312 EntryHeaderPos
= HeaderPos
;
315 // keep track of the smallest cached block
316 // that is greater than (AllocSize + HeaderSize)
321 EntryHeaderPos
= HeaderPos
;
324 Entry
= Entries
[OptimalFitIndex
];
325 Entries
[OptimalFitIndex
].invalidate();
327 SuccessfulRetrieves
++;
333 *H
= reinterpret_cast<LargeBlock::Header
*>(
334 LargeBlock::addHeaderTag
<Config
>(EntryHeaderPos
));
335 *Zeroed
= Entry
.Time
== 0;
336 if (useMemoryTagging
<Config
>(Options
))
337 Entry
.MemMap
.setMemoryPermission(Entry
.CommitBase
, Entry
.CommitSize
, 0);
338 uptr NewBlockBegin
= reinterpret_cast<uptr
>(*H
+ 1);
339 if (useMemoryTagging
<Config
>(Options
)) {
341 storeTags(LargeBlock::addHeaderTag
<Config
>(Entry
.CommitBase
),
343 } else if (Entry
.BlockBegin
< NewBlockBegin
) {
344 storeTags(Entry
.BlockBegin
, NewBlockBegin
);
346 storeTags(untagPointer(NewBlockBegin
), untagPointer(Entry
.BlockBegin
));
349 (*H
)->CommitBase
= Entry
.CommitBase
;
350 (*H
)->CommitSize
= Entry
.CommitSize
;
351 (*H
)->MemMap
= Entry
.MemMap
;
355 bool canCache(uptr Size
) {
356 return atomic_load_relaxed(&MaxEntriesCount
) != 0U &&
357 Size
<= atomic_load_relaxed(&MaxEntrySize
);
360 bool setOption(Option O
, sptr Value
) {
361 if (O
== Option::ReleaseInterval
) {
362 const s32 Interval
= Max(
363 Min(static_cast<s32
>(Value
), CacheConfig::MaxReleaseToOsIntervalMs
),
364 CacheConfig::MinReleaseToOsIntervalMs
);
365 atomic_store_relaxed(&ReleaseToOsIntervalMs
, Interval
);
368 if (O
== Option::MaxCacheEntriesCount
) {
369 const u32 MaxCount
= static_cast<u32
>(Value
);
370 if (MaxCount
> CacheConfig::EntriesArraySize
)
372 atomic_store_relaxed(&MaxEntriesCount
, MaxCount
);
375 if (O
== Option::MaxCacheEntrySize
) {
376 atomic_store_relaxed(&MaxEntrySize
, static_cast<uptr
>(Value
));
379 // Not supported by the Secondary Cache, but not an error either.
383 void releaseToOS() { releaseOlderThan(UINT64_MAX
); }
385 void disableMemoryTagging() EXCLUDES(Mutex
) {
387 for (u32 I
= 0; I
!= CacheConfig::QuarantineSize
; ++I
) {
388 if (Quarantine
[I
].isValid()) {
389 MemMapT
&MemMap
= Quarantine
[I
].MemMap
;
390 MemMap
.unmap(MemMap
.getBase(), MemMap
.getCapacity());
391 Quarantine
[I
].invalidate();
394 const u32 MaxCount
= atomic_load_relaxed(&MaxEntriesCount
);
395 for (u32 I
= 0; I
< MaxCount
; I
++) {
396 if (Entries
[I
].isValid()) {
397 Entries
[I
].MemMap
.setMemoryPermission(Entries
[I
].CommitBase
,
398 Entries
[I
].CommitSize
, 0);
404 void disable() NO_THREAD_SAFETY_ANALYSIS
{ Mutex
.lock(); }
406 void enable() NO_THREAD_SAFETY_ANALYSIS
{ Mutex
.unlock(); }
408 void unmapTestOnly() { empty(); }
412 MemMapT MapInfo
[CacheConfig::EntriesArraySize
];
416 for (uptr I
= 0; I
< CacheConfig::EntriesArraySize
; I
++) {
417 if (!Entries
[I
].isValid())
419 MapInfo
[N
] = Entries
[I
].MemMap
;
420 Entries
[I
].invalidate();
426 for (uptr I
= 0; I
< N
; I
++) {
427 MemMapT
&MemMap
= MapInfo
[I
];
428 MemMap
.unmap(MemMap
.getBase(), MemMap
.getCapacity());
432 void releaseIfOlderThan(CachedBlock
&Entry
, u64 Time
) REQUIRES(Mutex
) {
433 if (!Entry
.isValid() || !Entry
.Time
)
435 if (Entry
.Time
> Time
) {
436 if (OldestTime
== 0 || Entry
.Time
< OldestTime
)
437 OldestTime
= Entry
.Time
;
440 Entry
.MemMap
.releaseAndZeroPagesToOS(Entry
.CommitBase
, Entry
.CommitSize
);
444 void releaseOlderThan(u64 Time
) EXCLUDES(Mutex
) {
446 if (!EntriesCount
|| OldestTime
== 0 || OldestTime
> Time
)
449 for (uptr I
= 0; I
< CacheConfig::QuarantineSize
; I
++)
450 releaseIfOlderThan(Quarantine
[I
], Time
);
451 for (uptr I
= 0; I
< CacheConfig::EntriesArraySize
; I
++)
452 releaseIfOlderThan(Entries
[I
], Time
);
456 u32 EntriesCount
GUARDED_BY(Mutex
) = 0;
457 u32 QuarantinePos
GUARDED_BY(Mutex
) = 0;
458 atomic_u32 MaxEntriesCount
= {};
459 atomic_uptr MaxEntrySize
= {};
460 u64 OldestTime
GUARDED_BY(Mutex
) = 0;
461 u32 IsFullEvents
GUARDED_BY(Mutex
) = 0;
462 atomic_s32 ReleaseToOsIntervalMs
= {};
463 u32 CallsToRetrieve
GUARDED_BY(Mutex
) = 0;
464 u32 SuccessfulRetrieves
GUARDED_BY(Mutex
) = 0;
466 CachedBlock Entries
[CacheConfig::EntriesArraySize
] GUARDED_BY(Mutex
) = {};
467 NonZeroLengthArray
<CachedBlock
, CacheConfig::QuarantineSize
>
468 Quarantine
GUARDED_BY(Mutex
) = {};
471 template <typename Config
> class MapAllocator
{
473 void init(GlobalStats
*S
,
474 s32 ReleaseToOsInterval
= -1) NO_THREAD_SAFETY_ANALYSIS
{
475 DCHECK_EQ(AllocatedBytes
, 0U);
476 DCHECK_EQ(FreedBytes
, 0U);
477 Cache
.init(ReleaseToOsInterval
);
483 void *allocate(const Options
&Options
, uptr Size
, uptr AlignmentHint
= 0,
484 uptr
*BlockEnd
= nullptr,
485 FillContentsMode FillContents
= NoFill
);
487 void deallocate(const Options
&Options
, void *Ptr
);
489 static uptr
getBlockEnd(void *Ptr
) {
490 auto *B
= LargeBlock::getHeader
<Config
>(Ptr
);
491 return B
->CommitBase
+ B
->CommitSize
;
494 static uptr
getBlockSize(void *Ptr
) {
495 return getBlockEnd(Ptr
) - reinterpret_cast<uptr
>(Ptr
);
498 static constexpr uptr
getHeadersSize() {
499 return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
502 void disable() NO_THREAD_SAFETY_ANALYSIS
{
507 void enable() NO_THREAD_SAFETY_ANALYSIS
{
512 template <typename F
> void iterateOverBlocks(F Callback
) const {
515 for (const auto &H
: InUseBlocks
) {
516 uptr Ptr
= reinterpret_cast<uptr
>(&H
) + LargeBlock::getHeaderSize();
517 if (allocatorSupportsMemoryTagging
<Config
>())
518 Ptr
= untagPointer(Ptr
);
523 bool canCache(uptr Size
) { return Cache
.canCache(Size
); }
525 bool setOption(Option O
, sptr Value
) { return Cache
.setOption(O
, Value
); }
527 void releaseToOS() { Cache
.releaseToOS(); }
529 void disableMemoryTagging() { Cache
.disableMemoryTagging(); }
531 void unmapTestOnly() { Cache
.unmapTestOnly(); }
533 void getStats(ScopedString
*Str
);
536 typename
Config::Secondary::template CacheT
<Config
> Cache
;
538 mutable HybridMutex Mutex
;
539 DoublyLinkedList
<LargeBlock::Header
> InUseBlocks
GUARDED_BY(Mutex
);
540 uptr AllocatedBytes
GUARDED_BY(Mutex
) = 0;
541 uptr FreedBytes
GUARDED_BY(Mutex
) = 0;
542 uptr FragmentedBytes
GUARDED_BY(Mutex
) = 0;
543 uptr LargestSize
GUARDED_BY(Mutex
) = 0;
544 u32 NumberOfAllocs
GUARDED_BY(Mutex
) = 0;
545 u32 NumberOfFrees
GUARDED_BY(Mutex
) = 0;
546 LocalStats Stats
GUARDED_BY(Mutex
);
549 // As with the Primary, the size passed to this function includes any desired
550 // alignment, so that the frontend can align the user allocation. The hint
551 // parameter allows us to unmap spurious memory when dealing with larger
552 // (greater than a page) alignments on 32-bit platforms.
553 // Due to the sparsity of address space available on those platforms, requesting
554 // an allocation from the Secondary with a large alignment would end up wasting
555 // VA space (even though we are not committing the whole thing), hence the need
556 // to trim off some of the reserved space.
557 // For allocations requested with an alignment greater than or equal to a page,
558 // the committed memory will amount to something close to Size - AlignmentHint
559 // (pending rounding and headers).
560 template <typename Config
>
561 void *MapAllocator
<Config
>::allocate(const Options
&Options
, uptr Size
,
562 uptr Alignment
, uptr
*BlockEndPtr
,
563 FillContentsMode FillContents
) {
564 if (Options
.get(OptionBit::AddLargeAllocationSlack
))
565 Size
+= 1UL << SCUDO_MIN_ALIGNMENT_LOG
;
566 Alignment
= Max(Alignment
, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG
);
567 const uptr PageSize
= getPageSizeCached();
569 // Note that cached blocks may have aligned address already. Thus we simply
570 // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
571 const uptr MinNeededSizeForCache
= roundUp(Size
+ getHeadersSize(), PageSize
);
573 if (Alignment
< PageSize
&& Cache
.canCache(MinNeededSizeForCache
)) {
574 LargeBlock::Header
*H
;
576 if (Cache
.retrieve(Options
, Size
, Alignment
, getHeadersSize(), &H
,
578 const uptr BlockEnd
= H
->CommitBase
+ H
->CommitSize
;
580 *BlockEndPtr
= BlockEnd
;
581 uptr HInt
= reinterpret_cast<uptr
>(H
);
582 if (allocatorSupportsMemoryTagging
<Config
>())
583 HInt
= untagPointer(HInt
);
584 const uptr PtrInt
= HInt
+ LargeBlock::getHeaderSize();
585 void *Ptr
= reinterpret_cast<void *>(PtrInt
);
586 if (FillContents
&& !Zeroed
)
587 memset(Ptr
, FillContents
== ZeroFill
? 0 : PatternFillByte
,
591 InUseBlocks
.push_back(H
);
592 AllocatedBytes
+= H
->CommitSize
;
593 FragmentedBytes
+= H
->MemMap
.getCapacity() - H
->CommitSize
;
595 Stats
.add(StatAllocated
, H
->CommitSize
);
596 Stats
.add(StatMapped
, H
->MemMap
.getCapacity());
603 roundUp(roundUp(Size
, Alignment
) + getHeadersSize(), PageSize
);
604 if (Alignment
> PageSize
)
605 RoundedSize
+= Alignment
- PageSize
;
607 ReservedMemoryT ReservedMemory
;
608 const uptr MapSize
= RoundedSize
+ 2 * PageSize
;
609 if (UNLIKELY(!ReservedMemory
.create(/*Addr=*/0U, MapSize
, nullptr,
614 // Take the entire ownership of reserved region.
615 MemMapT MemMap
= ReservedMemory
.dispatch(ReservedMemory
.getBase(),
616 ReservedMemory
.getCapacity());
617 uptr MapBase
= MemMap
.getBase();
618 uptr CommitBase
= MapBase
+ PageSize
;
619 uptr MapEnd
= MapBase
+ MapSize
;
621 // In the unlikely event of alignments larger than a page, adjust the amount
622 // of memory we want to commit, and trim the extra memory.
623 if (UNLIKELY(Alignment
>= PageSize
)) {
624 // For alignments greater than or equal to a page, the user pointer (eg: the
625 // pointer that is returned by the C or C++ allocation APIs) ends up on a
626 // page boundary , and our headers will live in the preceding page.
627 CommitBase
= roundUp(MapBase
+ PageSize
+ 1, Alignment
) - PageSize
;
628 const uptr NewMapBase
= CommitBase
- PageSize
;
629 DCHECK_GE(NewMapBase
, MapBase
);
630 // We only trim the extra memory on 32-bit platforms: 64-bit platforms
631 // are less constrained memory wise, and that saves us two syscalls.
632 if (SCUDO_WORDSIZE
== 32U && NewMapBase
!= MapBase
) {
633 MemMap
.unmap(MapBase
, NewMapBase
- MapBase
);
634 MapBase
= NewMapBase
;
636 const uptr NewMapEnd
=
637 CommitBase
+ PageSize
+ roundUp(Size
, PageSize
) + PageSize
;
638 DCHECK_LE(NewMapEnd
, MapEnd
);
639 if (SCUDO_WORDSIZE
== 32U && NewMapEnd
!= MapEnd
) {
640 MemMap
.unmap(NewMapEnd
, MapEnd
- NewMapEnd
);
645 const uptr CommitSize
= MapEnd
- PageSize
- CommitBase
;
646 const uptr AllocPos
= roundDown(CommitBase
+ CommitSize
- Size
, Alignment
);
647 if (!mapSecondary
<Config
>(Options
, CommitBase
, CommitSize
, AllocPos
, 0,
649 MemMap
.unmap(MemMap
.getBase(), MemMap
.getCapacity());
652 const uptr HeaderPos
= AllocPos
- getHeadersSize();
653 LargeBlock::Header
*H
= reinterpret_cast<LargeBlock::Header
*>(
654 LargeBlock::addHeaderTag
<Config
>(HeaderPos
));
655 if (useMemoryTagging
<Config
>(Options
))
656 storeTags(LargeBlock::addHeaderTag
<Config
>(CommitBase
),
657 reinterpret_cast<uptr
>(H
+ 1));
658 H
->CommitBase
= CommitBase
;
659 H
->CommitSize
= CommitSize
;
662 *BlockEndPtr
= CommitBase
+ CommitSize
;
665 InUseBlocks
.push_back(H
);
666 AllocatedBytes
+= CommitSize
;
667 FragmentedBytes
+= H
->MemMap
.getCapacity() - CommitSize
;
668 if (LargestSize
< CommitSize
)
669 LargestSize
= CommitSize
;
671 Stats
.add(StatAllocated
, CommitSize
);
672 Stats
.add(StatMapped
, H
->MemMap
.getCapacity());
674 return reinterpret_cast<void *>(HeaderPos
+ LargeBlock::getHeaderSize());
677 template <typename Config
>
678 void MapAllocator
<Config
>::deallocate(const Options
&Options
, void *Ptr
)
680 LargeBlock::Header
*H
= LargeBlock::getHeader
<Config
>(Ptr
);
681 const uptr CommitSize
= H
->CommitSize
;
684 InUseBlocks
.remove(H
);
685 FreedBytes
+= CommitSize
;
686 FragmentedBytes
-= H
->MemMap
.getCapacity() - CommitSize
;
688 Stats
.sub(StatAllocated
, CommitSize
);
689 Stats
.sub(StatMapped
, H
->MemMap
.getCapacity());
691 Cache
.store(Options
, H
);
694 template <typename Config
>
695 void MapAllocator
<Config
>::getStats(ScopedString
*Str
) EXCLUDES(Mutex
) {
697 Str
->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
698 "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
699 NumberOfAllocs
, AllocatedBytes
>> 10, NumberOfFrees
,
700 FreedBytes
>> 10, NumberOfAllocs
- NumberOfFrees
,
701 (AllocatedBytes
- FreedBytes
) >> 10, LargestSize
>> 20,
702 FragmentedBytes
>> 10);
708 #endif // SCUDO_SECONDARY_H_