1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
20 #include "string_utils.h"
21 #include "thread_annotations.h"
25 // This allocator wraps the platform allocation primitives, and as such is on
26 // the slower side and should preferably be used for larger sized allocations.
27 // Blocks allocated will be preceded and followed by a guard page, and hold
28 // their own header that is not checksummed: the guard pages and the Combined
29 // header should be enough for our purpose.
31 namespace LargeBlock
{
33 struct alignas(Max
<uptr
>(archSupportsMemoryTagging()
34 ? archMemoryTagGranuleSize()
36 1U << SCUDO_MIN_ALIGNMENT_LOG
)) Header
{
37 LargeBlock::Header
*Prev
;
38 LargeBlock::Header
*Next
;
44 static_assert(sizeof(Header
) % (1U << SCUDO_MIN_ALIGNMENT_LOG
) == 0, "");
45 static_assert(!archSupportsMemoryTagging() ||
46 sizeof(Header
) % archMemoryTagGranuleSize() == 0,
49 constexpr uptr
getHeaderSize() { return sizeof(Header
); }
51 template <typename Config
> static uptr
addHeaderTag(uptr Ptr
) {
52 if (allocatorSupportsMemoryTagging
<Config
>())
53 return addFixedTag(Ptr
, 1);
57 template <typename Config
> static Header
*getHeader(uptr Ptr
) {
58 return reinterpret_cast<Header
*>(addHeaderTag
<Config
>(Ptr
)) - 1;
61 template <typename Config
> static Header
*getHeader(const void *Ptr
) {
62 return getHeader
<Config
>(reinterpret_cast<uptr
>(Ptr
));
65 } // namespace LargeBlock
67 static inline void unmap(LargeBlock::Header
*H
) {
68 // Note that the `H->MapMap` is stored on the pages managed by itself. Take
69 // over the ownership before unmap() so that any operation along with unmap()
70 // won't touch inaccessible pages.
71 MemMapT MemMap
= H
->MemMap
;
72 MemMap
.unmap(MemMap
.getBase(), MemMap
.getCapacity());
83 bool isValid() { return CommitBase
!= 0; }
85 void invalidate() { CommitBase
= 0; }
89 template <typename Config
> class MapAllocatorNoCache
{
91 void init(UNUSED s32 ReleaseToOsInterval
) {}
92 bool retrieve(UNUSED Options Options
, UNUSED uptr Size
, UNUSED uptr Alignment
,
93 UNUSED uptr HeadersSize
, UNUSED
LargeBlock::Header
**H
,
94 UNUSED
bool *Zeroed
) {
97 void store(UNUSED Options Options
, LargeBlock::Header
*H
) { unmap(H
); }
98 bool canCache(UNUSED uptr Size
) { return false; }
101 void releaseToOS() {}
102 void disableMemoryTagging() {}
103 void unmapTestOnly() {}
104 bool setOption(Option O
, UNUSED sptr Value
) {
105 if (O
== Option::ReleaseInterval
|| O
== Option::MaxCacheEntriesCount
||
106 O
== Option::MaxCacheEntrySize
)
108 // Not supported by the Secondary Cache, but not an error either.
112 void getStats(UNUSED ScopedString
*Str
) {
113 Str
->append("Secondary Cache Disabled\n");
117 static const uptr MaxUnusedCachePages
= 4U;
119 template <typename Config
>
120 bool mapSecondary(const Options
&Options
, uptr CommitBase
, uptr CommitSize
,
121 uptr AllocPos
, uptr Flags
, MemMapT
&MemMap
) {
122 Flags
|= MAP_RESIZABLE
;
123 Flags
|= MAP_ALLOWNOMEM
;
125 const uptr MaxUnusedCacheBytes
= MaxUnusedCachePages
* getPageSizeCached();
126 if (useMemoryTagging
<Config
>(Options
) && CommitSize
> MaxUnusedCacheBytes
) {
127 const uptr UntaggedPos
= Max(AllocPos
, CommitBase
+ MaxUnusedCacheBytes
);
128 return MemMap
.remap(CommitBase
, UntaggedPos
- CommitBase
, "scudo:secondary",
129 MAP_MEMTAG
| Flags
) &&
130 MemMap
.remap(UntaggedPos
, CommitBase
+ CommitSize
- UntaggedPos
,
131 "scudo:secondary", Flags
);
133 const uptr RemapFlags
=
134 (useMemoryTagging
<Config
>(Options
) ? MAP_MEMTAG
: 0) | Flags
;
135 return MemMap
.remap(CommitBase
, CommitSize
, "scudo:secondary", RemapFlags
);
139 // Template specialization to avoid producing zero-length array
140 template <typename T
, size_t Size
> class NonZeroLengthArray
{
142 T
&operator[](uptr Idx
) { return values
[Idx
]; }
147 template <typename T
> class NonZeroLengthArray
<T
, 0> {
149 T
&operator[](uptr UNUSED Idx
) { UNREACHABLE("Unsupported!"); }
152 template <typename Config
> class MapAllocatorCache
{
154 using CacheConfig
= typename
Config::Secondary::Cache
;
156 void getStats(ScopedString
*Str
) {
160 if (CallsToRetrieve
!= 0) {
161 Integral
= SuccessfulRetrieves
* 100 / CallsToRetrieve
;
162 Fractional
= (((SuccessfulRetrieves
* 100) % CallsToRetrieve
) * 100 +
163 CallsToRetrieve
/ 2) /
166 Str
->append("Stats: MapAllocatorCache: EntriesCount: %d, "
167 "MaxEntriesCount: %u, MaxEntrySize: %zu\n",
168 EntriesCount
, atomic_load_relaxed(&MaxEntriesCount
),
169 atomic_load_relaxed(&MaxEntrySize
));
170 Str
->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
172 SuccessfulRetrieves
, CallsToRetrieve
, Integral
, Fractional
);
173 for (CachedBlock Entry
: Entries
) {
174 if (!Entry
.isValid())
176 Str
->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
177 "BlockSize: %zu %s\n",
178 Entry
.CommitBase
, Entry
.CommitBase
+ Entry
.CommitSize
,
179 Entry
.CommitSize
, Entry
.Time
== 0 ? "[R]" : "");
183 // Ensure the default maximum specified fits the array.
184 static_assert(CacheConfig::DefaultMaxEntriesCount
<=
185 CacheConfig::EntriesArraySize
,
188 void init(s32 ReleaseToOsInterval
) NO_THREAD_SAFETY_ANALYSIS
{
189 DCHECK_EQ(EntriesCount
, 0U);
190 setOption(Option::MaxCacheEntriesCount
,
191 static_cast<sptr
>(CacheConfig::DefaultMaxEntriesCount
));
192 setOption(Option::MaxCacheEntrySize
,
193 static_cast<sptr
>(CacheConfig::DefaultMaxEntrySize
));
194 setOption(Option::ReleaseInterval
, static_cast<sptr
>(ReleaseToOsInterval
));
197 void store(const Options
&Options
, LargeBlock::Header
*H
) EXCLUDES(Mutex
) {
198 if (!canCache(H
->CommitSize
))
201 bool EntryCached
= false;
202 bool EmptyCache
= false;
203 const s32 Interval
= atomic_load_relaxed(&ReleaseToOsIntervalMs
);
204 const u64 Time
= getMonotonicTimeFast();
205 const u32 MaxCount
= atomic_load_relaxed(&MaxEntriesCount
);
207 Entry
.CommitBase
= H
->CommitBase
;
208 Entry
.CommitSize
= H
->CommitSize
;
209 Entry
.BlockBegin
= reinterpret_cast<uptr
>(H
+ 1);
210 Entry
.MemMap
= H
->MemMap
;
212 if (useMemoryTagging
<Config
>(Options
)) {
213 if (Interval
== 0 && !SCUDO_FUCHSIA
) {
214 // Release the memory and make it inaccessible at the same time by
215 // creating a new MAP_NOACCESS mapping on top of the existing mapping.
216 // Fuchsia does not support replacing mappings by creating a new mapping
217 // on top so we just do the two syscalls there.
219 mapSecondary
<Config
>(Options
, Entry
.CommitBase
, Entry
.CommitSize
,
220 Entry
.CommitBase
, MAP_NOACCESS
, Entry
.MemMap
);
222 Entry
.MemMap
.setMemoryPermission(Entry
.CommitBase
, Entry
.CommitSize
,
225 } else if (Interval
== 0) {
226 Entry
.MemMap
.releasePagesToOS(Entry
.CommitBase
, Entry
.CommitSize
);
231 if (useMemoryTagging
<Config
>(Options
) && QuarantinePos
== -1U) {
232 // If we get here then memory tagging was disabled in between when we
233 // read Options and when we locked Mutex. We can't insert our entry into
234 // the quarantine or the cache because the permissions would be wrong so
238 if (CacheConfig::QuarantineSize
&& useMemoryTagging
<Config
>(Options
)) {
240 (QuarantinePos
+ 1) % Max(CacheConfig::QuarantineSize
, 1u);
241 if (!Quarantine
[QuarantinePos
].isValid()) {
242 Quarantine
[QuarantinePos
] = Entry
;
245 CachedBlock PrevEntry
= Quarantine
[QuarantinePos
];
246 Quarantine
[QuarantinePos
] = Entry
;
248 OldestTime
= Entry
.Time
;
251 if (EntriesCount
>= MaxCount
) {
252 if (IsFullEvents
++ == 4U)
255 for (u32 I
= 0; I
< MaxCount
; I
++) {
256 if (Entries
[I
].isValid())
259 Entries
[I
] = Entries
[0];
263 OldestTime
= Entry
.Time
;
271 else if (Interval
>= 0)
272 releaseOlderThan(Time
- static_cast<u64
>(Interval
) * 1000000);
274 Entry
.MemMap
.unmap(Entry
.MemMap
.getBase(), Entry
.MemMap
.getCapacity());
277 bool retrieve(Options Options
, uptr Size
, uptr Alignment
, uptr HeadersSize
,
278 LargeBlock::Header
**H
, bool *Zeroed
) EXCLUDES(Mutex
) {
279 const uptr PageSize
= getPageSizeCached();
280 const u32 MaxCount
= atomic_load_relaxed(&MaxEntriesCount
);
281 // 10% of the requested size proved to be the optimal choice for
282 // retrieving cached blocks after testing several options.
283 constexpr u32 FragmentedBytesDivisor
= 10;
286 uptr EntryHeaderPos
= 0;
290 if (EntriesCount
== 0)
292 u32 OptimalFitIndex
= 0;
293 uptr MinDiff
= UINTPTR_MAX
;
294 for (u32 I
= 0; I
< MaxCount
; I
++) {
295 if (!Entries
[I
].isValid())
297 const uptr CommitBase
= Entries
[I
].CommitBase
;
298 const uptr CommitSize
= Entries
[I
].CommitSize
;
299 const uptr AllocPos
=
300 roundDown(CommitBase
+ CommitSize
- Size
, Alignment
);
301 const uptr HeaderPos
= AllocPos
- HeadersSize
;
302 if (HeaderPos
> CommitBase
+ CommitSize
)
304 if (HeaderPos
< CommitBase
||
305 AllocPos
> CommitBase
+ PageSize
* MaxUnusedCachePages
) {
309 const uptr Diff
= HeaderPos
- CommitBase
;
310 // immediately use a cached block if it's size is close enough to the
312 const uptr MaxAllowedFragmentedBytes
=
313 (CommitBase
+ CommitSize
- HeaderPos
) / FragmentedBytesDivisor
;
314 if (Diff
<= MaxAllowedFragmentedBytes
) {
316 EntryHeaderPos
= HeaderPos
;
319 // keep track of the smallest cached block
320 // that is greater than (AllocSize + HeaderSize)
325 EntryHeaderPos
= HeaderPos
;
328 Entry
= Entries
[OptimalFitIndex
];
329 Entries
[OptimalFitIndex
].invalidate();
331 SuccessfulRetrieves
++;
337 *H
= reinterpret_cast<LargeBlock::Header
*>(
338 LargeBlock::addHeaderTag
<Config
>(EntryHeaderPos
));
339 *Zeroed
= Entry
.Time
== 0;
340 if (useMemoryTagging
<Config
>(Options
))
341 Entry
.MemMap
.setMemoryPermission(Entry
.CommitBase
, Entry
.CommitSize
, 0);
342 uptr NewBlockBegin
= reinterpret_cast<uptr
>(*H
+ 1);
343 if (useMemoryTagging
<Config
>(Options
)) {
345 storeTags(LargeBlock::addHeaderTag
<Config
>(Entry
.CommitBase
),
347 } else if (Entry
.BlockBegin
< NewBlockBegin
) {
348 storeTags(Entry
.BlockBegin
, NewBlockBegin
);
350 storeTags(untagPointer(NewBlockBegin
), untagPointer(Entry
.BlockBegin
));
353 (*H
)->CommitBase
= Entry
.CommitBase
;
354 (*H
)->CommitSize
= Entry
.CommitSize
;
355 (*H
)->MemMap
= Entry
.MemMap
;
359 bool canCache(uptr Size
) {
360 return atomic_load_relaxed(&MaxEntriesCount
) != 0U &&
361 Size
<= atomic_load_relaxed(&MaxEntrySize
);
364 bool setOption(Option O
, sptr Value
) {
365 if (O
== Option::ReleaseInterval
) {
366 const s32 Interval
= Max(
367 Min(static_cast<s32
>(Value
), CacheConfig::MaxReleaseToOsIntervalMs
),
368 CacheConfig::MinReleaseToOsIntervalMs
);
369 atomic_store_relaxed(&ReleaseToOsIntervalMs
, Interval
);
372 if (O
== Option::MaxCacheEntriesCount
) {
373 const u32 MaxCount
= static_cast<u32
>(Value
);
374 if (MaxCount
> CacheConfig::EntriesArraySize
)
376 atomic_store_relaxed(&MaxEntriesCount
, MaxCount
);
379 if (O
== Option::MaxCacheEntrySize
) {
380 atomic_store_relaxed(&MaxEntrySize
, static_cast<uptr
>(Value
));
383 // Not supported by the Secondary Cache, but not an error either.
387 void releaseToOS() { releaseOlderThan(UINT64_MAX
); }
389 void disableMemoryTagging() EXCLUDES(Mutex
) {
391 for (u32 I
= 0; I
!= CacheConfig::QuarantineSize
; ++I
) {
392 if (Quarantine
[I
].isValid()) {
393 MemMapT
&MemMap
= Quarantine
[I
].MemMap
;
394 MemMap
.unmap(MemMap
.getBase(), MemMap
.getCapacity());
395 Quarantine
[I
].invalidate();
398 const u32 MaxCount
= atomic_load_relaxed(&MaxEntriesCount
);
399 for (u32 I
= 0; I
< MaxCount
; I
++) {
400 if (Entries
[I
].isValid()) {
401 Entries
[I
].MemMap
.setMemoryPermission(Entries
[I
].CommitBase
,
402 Entries
[I
].CommitSize
, 0);
408 void disable() NO_THREAD_SAFETY_ANALYSIS
{ Mutex
.lock(); }
410 void enable() NO_THREAD_SAFETY_ANALYSIS
{ Mutex
.unlock(); }
412 void unmapTestOnly() { empty(); }
416 MemMapT MapInfo
[CacheConfig::EntriesArraySize
];
420 for (uptr I
= 0; I
< CacheConfig::EntriesArraySize
; I
++) {
421 if (!Entries
[I
].isValid())
423 MapInfo
[N
] = Entries
[I
].MemMap
;
424 Entries
[I
].invalidate();
430 for (uptr I
= 0; I
< N
; I
++) {
431 MemMapT
&MemMap
= MapInfo
[I
];
432 MemMap
.unmap(MemMap
.getBase(), MemMap
.getCapacity());
436 void releaseIfOlderThan(CachedBlock
&Entry
, u64 Time
) REQUIRES(Mutex
) {
437 if (!Entry
.isValid() || !Entry
.Time
)
439 if (Entry
.Time
> Time
) {
440 if (OldestTime
== 0 || Entry
.Time
< OldestTime
)
441 OldestTime
= Entry
.Time
;
444 Entry
.MemMap
.releasePagesToOS(Entry
.CommitBase
, Entry
.CommitSize
);
448 void releaseOlderThan(u64 Time
) EXCLUDES(Mutex
) {
450 if (!EntriesCount
|| OldestTime
== 0 || OldestTime
> Time
)
453 for (uptr I
= 0; I
< CacheConfig::QuarantineSize
; I
++)
454 releaseIfOlderThan(Quarantine
[I
], Time
);
455 for (uptr I
= 0; I
< CacheConfig::EntriesArraySize
; I
++)
456 releaseIfOlderThan(Entries
[I
], Time
);
460 u32 EntriesCount
GUARDED_BY(Mutex
) = 0;
461 u32 QuarantinePos
GUARDED_BY(Mutex
) = 0;
462 atomic_u32 MaxEntriesCount
= {};
463 atomic_uptr MaxEntrySize
= {};
464 u64 OldestTime
GUARDED_BY(Mutex
) = 0;
465 u32 IsFullEvents
GUARDED_BY(Mutex
) = 0;
466 atomic_s32 ReleaseToOsIntervalMs
= {};
467 u32 CallsToRetrieve
GUARDED_BY(Mutex
) = 0;
468 u32 SuccessfulRetrieves
GUARDED_BY(Mutex
) = 0;
470 CachedBlock Entries
[CacheConfig::EntriesArraySize
] GUARDED_BY(Mutex
) = {};
471 NonZeroLengthArray
<CachedBlock
, CacheConfig::QuarantineSize
>
472 Quarantine
GUARDED_BY(Mutex
) = {};
475 template <typename Config
> class MapAllocator
{
477 void init(GlobalStats
*S
,
478 s32 ReleaseToOsInterval
= -1) NO_THREAD_SAFETY_ANALYSIS
{
479 DCHECK_EQ(AllocatedBytes
, 0U);
480 DCHECK_EQ(FreedBytes
, 0U);
481 Cache
.init(ReleaseToOsInterval
);
487 void *allocate(const Options
&Options
, uptr Size
, uptr AlignmentHint
= 0,
488 uptr
*BlockEnd
= nullptr,
489 FillContentsMode FillContents
= NoFill
);
491 void deallocate(const Options
&Options
, void *Ptr
);
493 static uptr
getBlockEnd(void *Ptr
) {
494 auto *B
= LargeBlock::getHeader
<Config
>(Ptr
);
495 return B
->CommitBase
+ B
->CommitSize
;
498 static uptr
getBlockSize(void *Ptr
) {
499 return getBlockEnd(Ptr
) - reinterpret_cast<uptr
>(Ptr
);
502 static constexpr uptr
getHeadersSize() {
503 return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
506 void disable() NO_THREAD_SAFETY_ANALYSIS
{
511 void enable() NO_THREAD_SAFETY_ANALYSIS
{
516 template <typename F
> void iterateOverBlocks(F Callback
) const {
519 for (const auto &H
: InUseBlocks
) {
520 uptr Ptr
= reinterpret_cast<uptr
>(&H
) + LargeBlock::getHeaderSize();
521 if (allocatorSupportsMemoryTagging
<Config
>())
522 Ptr
= untagPointer(Ptr
);
527 bool canCache(uptr Size
) { return Cache
.canCache(Size
); }
529 bool setOption(Option O
, sptr Value
) { return Cache
.setOption(O
, Value
); }
531 void releaseToOS() { Cache
.releaseToOS(); }
533 void disableMemoryTagging() { Cache
.disableMemoryTagging(); }
535 void unmapTestOnly() { Cache
.unmapTestOnly(); }
537 void getStats(ScopedString
*Str
);
540 typename
Config::Secondary::template CacheT
<Config
> Cache
;
542 mutable HybridMutex Mutex
;
543 DoublyLinkedList
<LargeBlock::Header
> InUseBlocks
GUARDED_BY(Mutex
);
544 uptr AllocatedBytes
GUARDED_BY(Mutex
) = 0;
545 uptr FreedBytes
GUARDED_BY(Mutex
) = 0;
546 uptr FragmentedBytes
GUARDED_BY(Mutex
) = 0;
547 uptr LargestSize
GUARDED_BY(Mutex
) = 0;
548 u32 NumberOfAllocs
GUARDED_BY(Mutex
) = 0;
549 u32 NumberOfFrees
GUARDED_BY(Mutex
) = 0;
550 LocalStats Stats
GUARDED_BY(Mutex
);
553 // As with the Primary, the size passed to this function includes any desired
554 // alignment, so that the frontend can align the user allocation. The hint
555 // parameter allows us to unmap spurious memory when dealing with larger
556 // (greater than a page) alignments on 32-bit platforms.
557 // Due to the sparsity of address space available on those platforms, requesting
558 // an allocation from the Secondary with a large alignment would end up wasting
559 // VA space (even though we are not committing the whole thing), hence the need
560 // to trim off some of the reserved space.
561 // For allocations requested with an alignment greater than or equal to a page,
562 // the committed memory will amount to something close to Size - AlignmentHint
563 // (pending rounding and headers).
564 template <typename Config
>
565 void *MapAllocator
<Config
>::allocate(const Options
&Options
, uptr Size
,
566 uptr Alignment
, uptr
*BlockEndPtr
,
567 FillContentsMode FillContents
) {
568 if (Options
.get(OptionBit::AddLargeAllocationSlack
))
569 Size
+= 1UL << SCUDO_MIN_ALIGNMENT_LOG
;
570 Alignment
= Max(Alignment
, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG
);
571 const uptr PageSize
= getPageSizeCached();
573 // Note that cached blocks may have aligned address already. Thus we simply
574 // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
575 const uptr MinNeededSizeForCache
= roundUp(Size
+ getHeadersSize(), PageSize
);
577 if (Alignment
< PageSize
&& Cache
.canCache(MinNeededSizeForCache
)) {
578 LargeBlock::Header
*H
;
580 if (Cache
.retrieve(Options
, Size
, Alignment
, getHeadersSize(), &H
,
582 const uptr BlockEnd
= H
->CommitBase
+ H
->CommitSize
;
584 *BlockEndPtr
= BlockEnd
;
585 uptr HInt
= reinterpret_cast<uptr
>(H
);
586 if (allocatorSupportsMemoryTagging
<Config
>())
587 HInt
= untagPointer(HInt
);
588 const uptr PtrInt
= HInt
+ LargeBlock::getHeaderSize();
589 void *Ptr
= reinterpret_cast<void *>(PtrInt
);
590 if (FillContents
&& !Zeroed
)
591 memset(Ptr
, FillContents
== ZeroFill
? 0 : PatternFillByte
,
595 InUseBlocks
.push_back(H
);
596 AllocatedBytes
+= H
->CommitSize
;
597 FragmentedBytes
+= H
->MemMap
.getCapacity() - H
->CommitSize
;
599 Stats
.add(StatAllocated
, H
->CommitSize
);
600 Stats
.add(StatMapped
, H
->MemMap
.getCapacity());
607 roundUp(roundUp(Size
, Alignment
) + getHeadersSize(), PageSize
);
608 if (Alignment
> PageSize
)
609 RoundedSize
+= Alignment
- PageSize
;
611 ReservedMemoryT ReservedMemory
;
612 const uptr MapSize
= RoundedSize
+ 2 * PageSize
;
613 if (UNLIKELY(!ReservedMemory
.create(/*Addr=*/0U, MapSize
, nullptr,
618 // Take the entire ownership of reserved region.
619 MemMapT MemMap
= ReservedMemory
.dispatch(ReservedMemory
.getBase(),
620 ReservedMemory
.getCapacity());
621 uptr MapBase
= MemMap
.getBase();
622 uptr CommitBase
= MapBase
+ PageSize
;
623 uptr MapEnd
= MapBase
+ MapSize
;
625 // In the unlikely event of alignments larger than a page, adjust the amount
626 // of memory we want to commit, and trim the extra memory.
627 if (UNLIKELY(Alignment
>= PageSize
)) {
628 // For alignments greater than or equal to a page, the user pointer (eg: the
629 // pointer that is returned by the C or C++ allocation APIs) ends up on a
630 // page boundary , and our headers will live in the preceding page.
631 CommitBase
= roundUp(MapBase
+ PageSize
+ 1, Alignment
) - PageSize
;
632 const uptr NewMapBase
= CommitBase
- PageSize
;
633 DCHECK_GE(NewMapBase
, MapBase
);
634 // We only trim the extra memory on 32-bit platforms: 64-bit platforms
635 // are less constrained memory wise, and that saves us two syscalls.
636 if (SCUDO_WORDSIZE
== 32U && NewMapBase
!= MapBase
) {
637 MemMap
.unmap(MapBase
, NewMapBase
- MapBase
);
638 MapBase
= NewMapBase
;
640 const uptr NewMapEnd
=
641 CommitBase
+ PageSize
+ roundUp(Size
, PageSize
) + PageSize
;
642 DCHECK_LE(NewMapEnd
, MapEnd
);
643 if (SCUDO_WORDSIZE
== 32U && NewMapEnd
!= MapEnd
) {
644 MemMap
.unmap(NewMapEnd
, MapEnd
- NewMapEnd
);
649 const uptr CommitSize
= MapEnd
- PageSize
- CommitBase
;
650 const uptr AllocPos
= roundDown(CommitBase
+ CommitSize
- Size
, Alignment
);
651 if (!mapSecondary
<Config
>(Options
, CommitBase
, CommitSize
, AllocPos
, 0,
653 MemMap
.unmap(MemMap
.getBase(), MemMap
.getCapacity());
656 const uptr HeaderPos
= AllocPos
- getHeadersSize();
657 LargeBlock::Header
*H
= reinterpret_cast<LargeBlock::Header
*>(
658 LargeBlock::addHeaderTag
<Config
>(HeaderPos
));
659 if (useMemoryTagging
<Config
>(Options
))
660 storeTags(LargeBlock::addHeaderTag
<Config
>(CommitBase
),
661 reinterpret_cast<uptr
>(H
+ 1));
662 H
->CommitBase
= CommitBase
;
663 H
->CommitSize
= CommitSize
;
666 *BlockEndPtr
= CommitBase
+ CommitSize
;
669 InUseBlocks
.push_back(H
);
670 AllocatedBytes
+= CommitSize
;
671 FragmentedBytes
+= H
->MemMap
.getCapacity() - CommitSize
;
672 if (LargestSize
< CommitSize
)
673 LargestSize
= CommitSize
;
675 Stats
.add(StatAllocated
, CommitSize
);
676 Stats
.add(StatMapped
, H
->MemMap
.getCapacity());
678 return reinterpret_cast<void *>(HeaderPos
+ LargeBlock::getHeaderSize());
681 template <typename Config
>
682 void MapAllocator
<Config
>::deallocate(const Options
&Options
, void *Ptr
)
684 LargeBlock::Header
*H
= LargeBlock::getHeader
<Config
>(Ptr
);
685 const uptr CommitSize
= H
->CommitSize
;
688 InUseBlocks
.remove(H
);
689 FreedBytes
+= CommitSize
;
690 FragmentedBytes
-= H
->MemMap
.getCapacity() - CommitSize
;
692 Stats
.sub(StatAllocated
, CommitSize
);
693 Stats
.sub(StatMapped
, H
->MemMap
.getCapacity());
695 Cache
.store(Options
, H
);
698 template <typename Config
>
699 void MapAllocator
<Config
>::getStats(ScopedString
*Str
) EXCLUDES(Mutex
) {
701 Str
->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
702 "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
703 NumberOfAllocs
, AllocatedBytes
>> 10, NumberOfFrees
,
704 FreedBytes
>> 10, NumberOfAllocs
- NumberOfFrees
,
705 (AllocatedBytes
- FreedBytes
) >> 10, LargestSize
>> 20,
706 FragmentedBytes
>> 10);
712 #endif // SCUDO_SECONDARY_H_