1 //===-- combined.h ----------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_COMBINED_H_
10 #define SCUDO_COMBINED_H_
12 #include "allocator_config_wrapper.h"
13 #include "atomic_helpers.h"
17 #include "flags_parser.h"
18 #include "local_cache.h"
23 #include "quarantine.h"
25 #include "secondary.h"
26 #include "stack_depot.h"
27 #include "string_utils.h"
30 #include "scudo/interface.h"
33 #include "gwp_asan/guarded_pool_allocator.h"
34 #include "gwp_asan/optional/backtrace.h"
35 #include "gwp_asan/optional/segv_handler.h"
36 #endif // GWP_ASAN_HOOKS
38 extern "C" inline void EmptyCallback() {}
40 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
41 // This function is not part of the NDK so it does not appear in any public
42 // header files. We only declare/use it when targeting the platform.
43 extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr
*buf
,
49 template <class Config
, void (*PostInitCallback
)(void) = EmptyCallback
>
52 using AllocatorConfig
= BaseConfig
<Config
>;
54 typename
AllocatorConfig::template PrimaryT
<PrimaryConfig
<Config
>>;
56 typename
AllocatorConfig::template SecondaryT
<SecondaryConfig
<Config
>>;
57 using CacheT
= typename
PrimaryT::CacheT
;
58 typedef Allocator
<Config
, PostInitCallback
> ThisT
;
59 typedef typename
AllocatorConfig::template TSDRegistryT
<ThisT
> TSDRegistryT
;
61 void callPostInitCallback() {
62 pthread_once(&PostInitNonce
, PostInitCallback
);
65 struct QuarantineCallback
{
66 explicit QuarantineCallback(ThisT
&Instance
, CacheT
&LocalCache
)
67 : Allocator(Instance
), Cache(LocalCache
) {}
69 // Chunk recycling function, returns a quarantined chunk to the backend,
70 // first making sure it hasn't been tampered with.
71 void recycle(void *Ptr
) {
72 Chunk::UnpackedHeader Header
;
73 Chunk::loadHeader(Allocator
.Cookie
, Ptr
, &Header
);
74 if (UNLIKELY(Header
.State
!= Chunk::State::Quarantined
))
75 reportInvalidChunkState(AllocatorAction::Recycling
, Ptr
);
77 Header
.State
= Chunk::State::Available
;
78 Chunk::storeHeader(Allocator
.Cookie
, Ptr
, &Header
);
80 if (allocatorSupportsMemoryTagging
<AllocatorConfig
>())
81 Ptr
= untagPointer(Ptr
);
82 void *BlockBegin
= Allocator::getBlockBegin(Ptr
, &Header
);
83 Cache
.deallocate(Header
.ClassId
, BlockBegin
);
86 // We take a shortcut when allocating a quarantine batch by working with the
87 // appropriate class ID instead of using Size. The compiler should optimize
88 // the class ID computation and work with the associated cache directly.
89 void *allocate(UNUSED uptr Size
) {
90 const uptr QuarantineClassId
= SizeClassMap::getClassIdBySize(
91 sizeof(QuarantineBatch
) + Chunk::getHeaderSize());
92 void *Ptr
= Cache
.allocate(QuarantineClassId
);
93 // Quarantine batch allocation failure is fatal.
95 reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId
));
97 Ptr
= reinterpret_cast<void *>(reinterpret_cast<uptr
>(Ptr
) +
98 Chunk::getHeaderSize());
99 Chunk::UnpackedHeader Header
= {};
100 Header
.ClassId
= QuarantineClassId
& Chunk::ClassIdMask
;
101 Header
.SizeOrUnusedBytes
= sizeof(QuarantineBatch
);
102 Header
.State
= Chunk::State::Allocated
;
103 Chunk::storeHeader(Allocator
.Cookie
, Ptr
, &Header
);
105 // Reset tag to 0 as this chunk may have been previously used for a tagged
107 if (UNLIKELY(useMemoryTagging
<AllocatorConfig
>(
108 Allocator
.Primary
.Options
.load())))
109 storeTags(reinterpret_cast<uptr
>(Ptr
),
110 reinterpret_cast<uptr
>(Ptr
) + sizeof(QuarantineBatch
));
115 void deallocate(void *Ptr
) {
116 const uptr QuarantineClassId
= SizeClassMap::getClassIdBySize(
117 sizeof(QuarantineBatch
) + Chunk::getHeaderSize());
118 Chunk::UnpackedHeader Header
;
119 Chunk::loadHeader(Allocator
.Cookie
, Ptr
, &Header
);
121 if (UNLIKELY(Header
.State
!= Chunk::State::Allocated
))
122 reportInvalidChunkState(AllocatorAction::Deallocating
, Ptr
);
123 DCHECK_EQ(Header
.ClassId
, QuarantineClassId
);
124 DCHECK_EQ(Header
.Offset
, 0);
125 DCHECK_EQ(Header
.SizeOrUnusedBytes
, sizeof(QuarantineBatch
));
127 Header
.State
= Chunk::State::Available
;
128 Chunk::storeHeader(Allocator
.Cookie
, Ptr
, &Header
);
129 Cache
.deallocate(QuarantineClassId
,
130 reinterpret_cast<void *>(reinterpret_cast<uptr
>(Ptr
) -
131 Chunk::getHeaderSize()));
139 typedef GlobalQuarantine
<QuarantineCallback
, void> QuarantineT
;
140 typedef typename
QuarantineT::CacheT QuarantineCacheT
;
143 // Make sure that the page size is initialized if it's not a constant.
144 CHECK_NE(getPageSizeCached(), 0U);
146 performSanityChecks();
148 // Check if hardware CRC32 is supported in the binary and by the platform,
149 // if so, opt for the CRC32 hardware version of the checksum.
150 if (&computeHardwareCRC32
&& hasHardwareCRC32())
151 HashAlgorithm
= Checksum::HardwareCRC32
;
153 if (UNLIKELY(!getRandom(&Cookie
, sizeof(Cookie
))))
154 Cookie
= static_cast<u32
>(getMonotonicTime() ^
155 (reinterpret_cast<uptr
>(this) >> 4));
158 reportUnrecognizedFlags();
160 // Store some flags locally.
161 if (getFlags()->may_return_null
)
162 Primary
.Options
.set(OptionBit::MayReturnNull
);
163 if (getFlags()->zero_contents
)
164 Primary
.Options
.setFillContentsMode(ZeroFill
);
165 else if (getFlags()->pattern_fill_contents
)
166 Primary
.Options
.setFillContentsMode(PatternOrZeroFill
);
167 if (getFlags()->dealloc_type_mismatch
)
168 Primary
.Options
.set(OptionBit::DeallocTypeMismatch
);
169 if (getFlags()->delete_size_mismatch
)
170 Primary
.Options
.set(OptionBit::DeleteSizeMismatch
);
171 if (allocatorSupportsMemoryTagging
<AllocatorConfig
>() &&
172 systemSupportsMemoryTagging())
173 Primary
.Options
.set(OptionBit::UseMemoryTagging
);
175 QuarantineMaxChunkSize
=
176 static_cast<u32
>(getFlags()->quarantine_max_chunk_size
);
179 // TODO(chiahungduan): Given that we support setting the default value in
180 // the PrimaryConfig and CacheConfig, consider to deprecate the use of
181 // `release_to_os_interval_ms` flag.
182 const s32 ReleaseToOsIntervalMs
= getFlags()->release_to_os_interval_ms
;
183 Primary
.init(ReleaseToOsIntervalMs
);
184 Secondary
.init(&Stats
, ReleaseToOsIntervalMs
);
186 static_cast<uptr
>(getFlags()->quarantine_size_kb
<< 10),
187 static_cast<uptr
>(getFlags()->thread_local_quarantine_size_kb
<< 10));
190 void enableRingBuffer() NO_THREAD_SAFETY_ANALYSIS
{
191 AllocationRingBuffer
*RB
= getRingBuffer();
194 RingBufferInitLock
.unlock();
197 void disableRingBuffer() NO_THREAD_SAFETY_ANALYSIS
{
198 RingBufferInitLock
.lock();
199 AllocationRingBuffer
*RB
= getRingBuffer();
201 RB
->Depot
->disable();
204 // Initialize the embedded GWP-ASan instance. Requires the main allocator to
205 // be functional, best called from PostInitCallback.
207 #ifdef GWP_ASAN_HOOKS
208 gwp_asan::options::Options Opt
;
209 Opt
.Enabled
= getFlags()->GWP_ASAN_Enabled
;
210 Opt
.MaxSimultaneousAllocations
=
211 getFlags()->GWP_ASAN_MaxSimultaneousAllocations
;
212 Opt
.SampleRate
= getFlags()->GWP_ASAN_SampleRate
;
213 Opt
.InstallSignalHandlers
= getFlags()->GWP_ASAN_InstallSignalHandlers
;
214 Opt
.Recoverable
= getFlags()->GWP_ASAN_Recoverable
;
215 // Embedded GWP-ASan is locked through the Scudo atfork handler (via
216 // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
218 Opt
.InstallForkHandlers
= false;
219 Opt
.Backtrace
= gwp_asan::backtrace::getBacktraceFunction();
220 GuardedAlloc
.init(Opt
);
222 if (Opt
.InstallSignalHandlers
)
223 gwp_asan::segv_handler::installSignalHandlers(
224 &GuardedAlloc
, Printf
,
225 gwp_asan::backtrace::getPrintBacktraceFunction(),
226 gwp_asan::backtrace::getSegvBacktraceFunction(),
229 GuardedAllocSlotSize
=
230 GuardedAlloc
.getAllocatorState()->maximumAllocationSize();
231 Stats
.add(StatFree
, static_cast<uptr
>(Opt
.MaxSimultaneousAllocations
) *
232 GuardedAllocSlotSize
);
233 #endif // GWP_ASAN_HOOKS
236 #ifdef GWP_ASAN_HOOKS
237 const gwp_asan::AllocationMetadata
*getGwpAsanAllocationMetadata() {
238 return GuardedAlloc
.getMetadataRegion();
241 const gwp_asan::AllocatorState
*getGwpAsanAllocatorState() {
242 return GuardedAlloc
.getAllocatorState();
244 #endif // GWP_ASAN_HOOKS
246 ALWAYS_INLINE
void initThreadMaybe(bool MinimalInit
= false) {
247 TSDRegistry
.initThreadMaybe(this, MinimalInit
);
250 void unmapTestOnly() {
252 TSDRegistry
.unmapTestOnly(this);
253 Primary
.unmapTestOnly();
254 Secondary
.unmapTestOnly();
255 #ifdef GWP_ASAN_HOOKS
256 if (getFlags()->GWP_ASAN_InstallSignalHandlers
)
257 gwp_asan::segv_handler::uninstallSignalHandlers();
258 GuardedAlloc
.uninitTestOnly();
259 #endif // GWP_ASAN_HOOKS
262 TSDRegistryT
*getTSDRegistry() { return &TSDRegistry
; }
263 QuarantineT
*getQuarantine() { return &Quarantine
; }
265 // The Cache must be provided zero-initialized.
266 void initCache(CacheT
*Cache
) { Cache
->init(&Stats
, &Primary
); }
268 // Release the resources used by a TSD, which involves:
269 // - draining the local quarantine cache to the global quarantine;
270 // - releasing the cached pointers back to the Primary;
271 // - unlinking the local stats from the global ones (destroying the cache does
272 // the last two items).
273 void commitBack(TSD
<ThisT
> *TSD
) {
274 TSD
->assertLocked(/*BypassCheck=*/true);
275 Quarantine
.drain(&TSD
->getQuarantineCache(),
276 QuarantineCallback(*this, TSD
->getCache()));
277 TSD
->getCache().destroy(&Stats
);
280 void drainCache(TSD
<ThisT
> *TSD
) {
281 TSD
->assertLocked(/*BypassCheck=*/true);
282 Quarantine
.drainAndRecycle(&TSD
->getQuarantineCache(),
283 QuarantineCallback(*this, TSD
->getCache()));
284 TSD
->getCache().drain();
286 void drainCaches() { TSDRegistry
.drainCaches(this); }
288 ALWAYS_INLINE
void *getHeaderTaggedPointer(void *Ptr
) {
289 if (!allocatorSupportsMemoryTagging
<AllocatorConfig
>())
291 auto UntaggedPtr
= untagPointer(Ptr
);
292 if (UntaggedPtr
!= Ptr
)
294 // Secondary, or pointer allocated while memory tagging is unsupported or
295 // disabled. The tag mismatch is okay in the latter case because tags will
297 return addHeaderTag(Ptr
);
300 ALWAYS_INLINE uptr
addHeaderTag(uptr Ptr
) {
301 if (!allocatorSupportsMemoryTagging
<AllocatorConfig
>())
303 return addFixedTag(Ptr
, 2);
306 ALWAYS_INLINE
void *addHeaderTag(void *Ptr
) {
307 return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr
>(Ptr
)));
310 NOINLINE u32
collectStackTrace(UNUSED StackDepot
*Depot
) {
311 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
312 // Discard collectStackTrace() frame and allocator function frame.
313 constexpr uptr DiscardFrames
= 2;
314 uptr Stack
[MaxTraceSize
+ DiscardFrames
];
316 android_unsafe_frame_pointer_chase(Stack
, MaxTraceSize
+ DiscardFrames
);
317 Size
= Min
<uptr
>(Size
, MaxTraceSize
+ DiscardFrames
);
318 return Depot
->insert(Stack
+ Min
<uptr
>(DiscardFrames
, Size
), Stack
+ Size
);
324 uptr
computeOddEvenMaskForPointerMaybe(const Options
&Options
, uptr Ptr
,
326 if (!Options
.get(OptionBit::UseOddEvenTags
))
329 // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
330 // even, and vice versa. Blocks are laid out Size bytes apart, and adding
331 // Size to Ptr will flip the least significant set bit of Size in Ptr, so
332 // that bit will have the pattern 010101... for consecutive blocks, which we
333 // can use to determine which tag mask to use.
334 return 0x5555U
<< ((Ptr
>> SizeClassMap::getSizeLSBByClassId(ClassId
)) & 1);
337 NOINLINE
void *allocate(uptr Size
, Chunk::Origin Origin
,
338 uptr Alignment
= MinAlignment
,
339 bool ZeroContents
= false) NO_THREAD_SAFETY_ANALYSIS
{
342 const Options Options
= Primary
.Options
.load();
343 if (UNLIKELY(Alignment
> MaxAlignment
)) {
344 if (Options
.get(OptionBit::MayReturnNull
))
346 reportAlignmentTooBig(Alignment
, MaxAlignment
);
348 if (Alignment
< MinAlignment
)
349 Alignment
= MinAlignment
;
351 #ifdef GWP_ASAN_HOOKS
352 if (UNLIKELY(GuardedAlloc
.shouldSample())) {
353 if (void *Ptr
= GuardedAlloc
.allocate(Size
, Alignment
)) {
355 Stats
.add(StatAllocated
, GuardedAllocSlotSize
);
356 Stats
.sub(StatFree
, GuardedAllocSlotSize
);
361 #endif // GWP_ASAN_HOOKS
363 const FillContentsMode FillContents
= ZeroContents
? ZeroFill
364 : TSDRegistry
.getDisableMemInit()
366 : Options
.getFillContentsMode();
368 // If the requested size happens to be 0 (more common than you might think),
369 // allocate MinAlignment bytes on top of the header. Then add the extra
370 // bytes required to fulfill the alignment requirements: we allocate enough
371 // to be sure that there will be an address in the block that will satisfy
373 const uptr NeededSize
=
374 roundUp(Size
, MinAlignment
) +
375 ((Alignment
> MinAlignment
) ? Alignment
: Chunk::getHeaderSize());
377 // Takes care of extravagantly large sizes as well as integer overflows.
378 static_assert(MaxAllowedMallocSize
< UINTPTR_MAX
- MaxAlignment
, "");
379 if (UNLIKELY(Size
>= MaxAllowedMallocSize
)) {
380 if (Options
.get(OptionBit::MayReturnNull
))
382 reportAllocationSizeTooBig(Size
, NeededSize
, MaxAllowedMallocSize
);
384 DCHECK_LE(Size
, NeededSize
);
386 void *Block
= nullptr;
388 uptr SecondaryBlockEnd
= 0;
389 if (LIKELY(PrimaryT::canAllocate(NeededSize
))) {
390 ClassId
= SizeClassMap::getClassIdBySize(NeededSize
);
391 DCHECK_NE(ClassId
, 0U);
392 typename
TSDRegistryT::ScopedTSD
TSD(TSDRegistry
);
393 Block
= TSD
->getCache().allocate(ClassId
);
394 // If the allocation failed, retry in each successively larger class until
395 // it fits. If it fails to fit in the largest class, fallback to the
397 if (UNLIKELY(!Block
)) {
398 while (ClassId
< SizeClassMap::LargestClassId
&& !Block
)
399 Block
= TSD
->getCache().allocate(++ClassId
);
404 if (UNLIKELY(ClassId
== 0)) {
405 Block
= Secondary
.allocate(Options
, Size
, Alignment
, &SecondaryBlockEnd
,
409 if (UNLIKELY(!Block
)) {
410 if (Options
.get(OptionBit::MayReturnNull
))
413 reportOutOfMemory(NeededSize
);
416 const uptr UserPtr
= roundUp(
417 reinterpret_cast<uptr
>(Block
) + Chunk::getHeaderSize(), Alignment
);
418 const uptr SizeOrUnusedBytes
=
419 ClassId
? Size
: SecondaryBlockEnd
- (UserPtr
+ Size
);
421 if (LIKELY(!useMemoryTagging
<AllocatorConfig
>(Options
))) {
422 return initChunk(ClassId
, Origin
, Block
, UserPtr
, SizeOrUnusedBytes
,
426 return initChunkWithMemoryTagging(ClassId
, Origin
, Block
, UserPtr
, Size
,
427 SizeOrUnusedBytes
, FillContents
);
430 NOINLINE
void deallocate(void *Ptr
, Chunk::Origin Origin
, uptr DeleteSize
= 0,
431 UNUSED uptr Alignment
= MinAlignment
) {
435 // For a deallocation, we only ensure minimal initialization, meaning thread
436 // local data will be left uninitialized for now (when using ELF TLS). The
437 // fallback cache will be used instead. This is a workaround for a situation
438 // where the only heap operation performed in a thread would be a free past
439 // the TLS destructors, ending up in initialized thread specific data never
440 // being destroyed properly. Any other heap operation will do a full init.
441 initThreadMaybe(/*MinimalInit=*/true);
443 #ifdef GWP_ASAN_HOOKS
444 if (UNLIKELY(GuardedAlloc
.pointerIsMine(Ptr
))) {
445 GuardedAlloc
.deallocate(Ptr
);
447 Stats
.add(StatFree
, GuardedAllocSlotSize
);
448 Stats
.sub(StatAllocated
, GuardedAllocSlotSize
);
452 #endif // GWP_ASAN_HOOKS
454 if (UNLIKELY(!isAligned(reinterpret_cast<uptr
>(Ptr
), MinAlignment
)))
455 reportMisalignedPointer(AllocatorAction::Deallocating
, Ptr
);
457 void *TaggedPtr
= Ptr
;
458 Ptr
= getHeaderTaggedPointer(Ptr
);
460 Chunk::UnpackedHeader Header
;
461 Chunk::loadHeader(Cookie
, Ptr
, &Header
);
463 if (UNLIKELY(Header
.State
!= Chunk::State::Allocated
))
464 reportInvalidChunkState(AllocatorAction::Deallocating
, Ptr
);
466 const Options Options
= Primary
.Options
.load();
467 if (Options
.get(OptionBit::DeallocTypeMismatch
)) {
468 if (UNLIKELY(Header
.OriginOrWasZeroed
!= Origin
)) {
469 // With the exception of memalign'd chunks, that can be still be free'd.
470 if (Header
.OriginOrWasZeroed
!= Chunk::Origin::Memalign
||
471 Origin
!= Chunk::Origin::Malloc
)
472 reportDeallocTypeMismatch(AllocatorAction::Deallocating
, Ptr
,
473 Header
.OriginOrWasZeroed
, Origin
);
477 const uptr Size
= getSize(Ptr
, &Header
);
478 if (DeleteSize
&& Options
.get(OptionBit::DeleteSizeMismatch
)) {
479 if (UNLIKELY(DeleteSize
!= Size
))
480 reportDeleteSizeMismatch(Ptr
, DeleteSize
, Size
);
483 quarantineOrDeallocateChunk(Options
, TaggedPtr
, &Header
, Size
);
486 void *reallocate(void *OldPtr
, uptr NewSize
, uptr Alignment
= MinAlignment
) {
489 const Options Options
= Primary
.Options
.load();
490 if (UNLIKELY(NewSize
>= MaxAllowedMallocSize
)) {
491 if (Options
.get(OptionBit::MayReturnNull
))
493 reportAllocationSizeTooBig(NewSize
, 0, MaxAllowedMallocSize
);
496 // The following cases are handled by the C wrappers.
497 DCHECK_NE(OldPtr
, nullptr);
498 DCHECK_NE(NewSize
, 0);
500 #ifdef GWP_ASAN_HOOKS
501 if (UNLIKELY(GuardedAlloc
.pointerIsMine(OldPtr
))) {
502 uptr OldSize
= GuardedAlloc
.getSize(OldPtr
);
503 void *NewPtr
= allocate(NewSize
, Chunk::Origin::Malloc
, Alignment
);
505 memcpy(NewPtr
, OldPtr
, (NewSize
< OldSize
) ? NewSize
: OldSize
);
506 GuardedAlloc
.deallocate(OldPtr
);
508 Stats
.add(StatFree
, GuardedAllocSlotSize
);
509 Stats
.sub(StatAllocated
, GuardedAllocSlotSize
);
513 #endif // GWP_ASAN_HOOKS
515 void *OldTaggedPtr
= OldPtr
;
516 OldPtr
= getHeaderTaggedPointer(OldPtr
);
518 if (UNLIKELY(!isAligned(reinterpret_cast<uptr
>(OldPtr
), MinAlignment
)))
519 reportMisalignedPointer(AllocatorAction::Reallocating
, OldPtr
);
521 Chunk::UnpackedHeader Header
;
522 Chunk::loadHeader(Cookie
, OldPtr
, &Header
);
524 if (UNLIKELY(Header
.State
!= Chunk::State::Allocated
))
525 reportInvalidChunkState(AllocatorAction::Reallocating
, OldPtr
);
527 // Pointer has to be allocated with a malloc-type function. Some
528 // applications think that it is OK to realloc a memalign'ed pointer, which
529 // will trigger this check. It really isn't.
530 if (Options
.get(OptionBit::DeallocTypeMismatch
)) {
531 if (UNLIKELY(Header
.OriginOrWasZeroed
!= Chunk::Origin::Malloc
))
532 reportDeallocTypeMismatch(AllocatorAction::Reallocating
, OldPtr
,
533 Header
.OriginOrWasZeroed
,
534 Chunk::Origin::Malloc
);
537 void *BlockBegin
= getBlockBegin(OldTaggedPtr
, &Header
);
540 const uptr ClassId
= Header
.ClassId
;
541 if (LIKELY(ClassId
)) {
542 BlockEnd
= reinterpret_cast<uptr
>(BlockBegin
) +
543 SizeClassMap::getSizeByClassId(ClassId
);
544 OldSize
= Header
.SizeOrUnusedBytes
;
546 BlockEnd
= SecondaryT::getBlockEnd(BlockBegin
);
547 OldSize
= BlockEnd
- (reinterpret_cast<uptr
>(OldTaggedPtr
) +
548 Header
.SizeOrUnusedBytes
);
550 // If the new chunk still fits in the previously allocated block (with a
551 // reasonable delta), we just keep the old block, and update the chunk
552 // header to reflect the size change.
553 if (reinterpret_cast<uptr
>(OldTaggedPtr
) + NewSize
<= BlockEnd
) {
554 if (NewSize
> OldSize
|| (OldSize
- NewSize
) < getPageSizeCached()) {
555 // If we have reduced the size, set the extra bytes to the fill value
556 // so that we are ready to grow it again in the future.
557 if (NewSize
< OldSize
) {
558 const FillContentsMode FillContents
=
559 TSDRegistry
.getDisableMemInit() ? NoFill
560 : Options
.getFillContentsMode();
561 if (FillContents
!= NoFill
) {
562 memset(reinterpret_cast<char *>(OldTaggedPtr
) + NewSize
,
563 FillContents
== ZeroFill
? 0 : PatternFillByte
,
568 Header
.SizeOrUnusedBytes
=
571 (reinterpret_cast<uptr
>(OldTaggedPtr
) + NewSize
)) &
572 Chunk::SizeOrUnusedBytesMask
;
573 Chunk::storeHeader(Cookie
, OldPtr
, &Header
);
574 if (UNLIKELY(useMemoryTagging
<AllocatorConfig
>(Options
))) {
576 resizeTaggedChunk(reinterpret_cast<uptr
>(OldTaggedPtr
) + OldSize
,
577 reinterpret_cast<uptr
>(OldTaggedPtr
) + NewSize
,
578 NewSize
, untagPointer(BlockEnd
));
579 storePrimaryAllocationStackMaybe(Options
, OldPtr
);
581 storeSecondaryAllocationStackMaybe(Options
, OldPtr
, NewSize
);
588 // Otherwise we allocate a new one, and deallocate the old one. Some
589 // allocators will allocate an even larger chunk (by a fixed factor) to
590 // allow for potential further in-place realloc. The gains of such a trick
591 // are currently unclear.
592 void *NewPtr
= allocate(NewSize
, Chunk::Origin::Malloc
, Alignment
);
593 if (LIKELY(NewPtr
)) {
594 memcpy(NewPtr
, OldTaggedPtr
, Min(NewSize
, OldSize
));
595 quarantineOrDeallocateChunk(Options
, OldTaggedPtr
, &Header
, OldSize
);
600 // TODO(kostyak): disable() is currently best-effort. There are some small
601 // windows of time when an allocation could still succeed after
602 // this function finishes. We will revisit that later.
603 void disable() NO_THREAD_SAFETY_ANALYSIS
{
605 #ifdef GWP_ASAN_HOOKS
606 GuardedAlloc
.disable();
608 TSDRegistry
.disable();
610 Quarantine
.disable();
616 void enable() NO_THREAD_SAFETY_ANALYSIS
{
623 TSDRegistry
.enable();
624 #ifdef GWP_ASAN_HOOKS
625 GuardedAlloc
.enable();
629 // The function returns the amount of bytes required to store the statistics,
630 // which might be larger than the amount of bytes provided. Note that the
631 // statistics buffer is not necessarily constant between calls to this
632 // function. This can be called with a null buffer or zero size for buffer
634 uptr
getStats(char *Buffer
, uptr Size
) {
636 const uptr Length
= getStats(&Str
) + 1;
639 if (Buffer
&& Size
) {
640 memcpy(Buffer
, Str
.data(), Size
);
641 Buffer
[Size
- 1] = '\0';
652 void printFragmentationInfo() {
654 Primary
.getFragmentationInfo(&Str
);
655 // Secondary allocator dumps the fragmentation data in getStats().
659 void releaseToOS(ReleaseToOS ReleaseType
) {
661 if (ReleaseType
== ReleaseToOS::ForceAll
)
663 Primary
.releaseToOS(ReleaseType
);
664 Secondary
.releaseToOS();
667 // Iterate over all chunks and call a callback for all busy chunks located
668 // within the provided memory range. Said callback must not use this allocator
669 // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
670 void iterateOverChunks(uptr Base
, uptr Size
, iterate_callback Callback
,
673 if (archSupportsMemoryTagging())
674 Base
= untagPointer(Base
);
675 const uptr From
= Base
;
676 const uptr To
= Base
+ Size
;
677 bool MayHaveTaggedPrimary
=
678 allocatorSupportsMemoryTagging
<AllocatorConfig
>() &&
679 systemSupportsMemoryTagging();
680 auto Lambda
= [this, From
, To
, MayHaveTaggedPrimary
, Callback
,
682 if (Block
< From
|| Block
>= To
)
685 Chunk::UnpackedHeader Header
;
686 if (MayHaveTaggedPrimary
) {
687 // A chunk header can either have a zero tag (tagged primary) or the
688 // header tag (secondary, or untagged primary). We don't know which so
690 ScopedDisableMemoryTagChecks x
;
691 if (!getChunkFromBlock(Block
, &Chunk
, &Header
) &&
692 !getChunkFromBlock(addHeaderTag(Block
), &Chunk
, &Header
))
695 if (!getChunkFromBlock(addHeaderTag(Block
), &Chunk
, &Header
))
698 if (Header
.State
== Chunk::State::Allocated
) {
699 uptr TaggedChunk
= Chunk
;
700 if (allocatorSupportsMemoryTagging
<AllocatorConfig
>())
701 TaggedChunk
= untagPointer(TaggedChunk
);
702 if (useMemoryTagging
<AllocatorConfig
>(Primary
.Options
.load()))
703 TaggedChunk
= loadTag(Chunk
);
704 Callback(TaggedChunk
, getSize(reinterpret_cast<void *>(Chunk
), &Header
),
708 Primary
.iterateOverBlocks(Lambda
);
709 Secondary
.iterateOverBlocks(Lambda
);
710 #ifdef GWP_ASAN_HOOKS
711 GuardedAlloc
.iterate(reinterpret_cast<void *>(Base
), Size
, Callback
, Arg
);
715 bool canReturnNull() {
717 return Primary
.Options
.load().get(OptionBit::MayReturnNull
);
720 bool setOption(Option O
, sptr Value
) {
722 if (O
== Option::MemtagTuning
) {
723 // Enabling odd/even tags involves a tradeoff between use-after-free
724 // detection and buffer overflow detection. Odd/even tags make it more
725 // likely for buffer overflows to be detected by increasing the size of
726 // the guaranteed "red zone" around the allocation, but on the other hand
727 // use-after-free is less likely to be detected because the tag space for
728 // any particular chunk is cut in half. Therefore we use this tuning
729 // setting to control whether odd/even tags are enabled.
730 if (Value
== M_MEMTAG_TUNING_BUFFER_OVERFLOW
)
731 Primary
.Options
.set(OptionBit::UseOddEvenTags
);
732 else if (Value
== M_MEMTAG_TUNING_UAF
)
733 Primary
.Options
.clear(OptionBit::UseOddEvenTags
);
736 // We leave it to the various sub-components to decide whether or not they
737 // want to handle the option, but we do not want to short-circuit
738 // execution if one of the setOption was to return false.
739 const bool PrimaryResult
= Primary
.setOption(O
, Value
);
740 const bool SecondaryResult
= Secondary
.setOption(O
, Value
);
741 const bool RegistryResult
= TSDRegistry
.setOption(O
, Value
);
742 return PrimaryResult
&& SecondaryResult
&& RegistryResult
;
747 // Return the usable size for a given chunk. Technically we lie, as we just
748 // report the actual size of a chunk. This is done to counteract code actively
749 // writing past the end of a chunk (like sqlite3) when the usable size allows
750 // for it, which then forces realloc to copy the usable size of a chunk as
751 // opposed to its actual size.
752 uptr
getUsableSize(const void *Ptr
) {
756 return getAllocSize(Ptr
);
759 uptr
getAllocSize(const void *Ptr
) {
762 #ifdef GWP_ASAN_HOOKS
763 if (UNLIKELY(GuardedAlloc
.pointerIsMine(Ptr
)))
764 return GuardedAlloc
.getSize(Ptr
);
765 #endif // GWP_ASAN_HOOKS
767 Ptr
= getHeaderTaggedPointer(const_cast<void *>(Ptr
));
768 Chunk::UnpackedHeader Header
;
769 Chunk::loadHeader(Cookie
, Ptr
, &Header
);
771 // Getting the alloc size of a chunk only makes sense if it's allocated.
772 if (UNLIKELY(Header
.State
!= Chunk::State::Allocated
))
773 reportInvalidChunkState(AllocatorAction::Sizing
, const_cast<void *>(Ptr
));
775 return getSize(Ptr
, &Header
);
778 void getStats(StatCounters S
) {
783 // Returns true if the pointer provided was allocated by the current
784 // allocator instance, which is compliant with tcmalloc's ownership concept.
785 // A corrupted chunk will not be reported as owned, which is WAI.
786 bool isOwned(const void *Ptr
) {
788 // If the allocation is not owned, the tags could be wrong.
789 ScopedDisableMemoryTagChecks
x(
790 useMemoryTagging
<AllocatorConfig
>(Primary
.Options
.load()));
791 #ifdef GWP_ASAN_HOOKS
792 if (GuardedAlloc
.pointerIsMine(Ptr
))
794 #endif // GWP_ASAN_HOOKS
795 if (!Ptr
|| !isAligned(reinterpret_cast<uptr
>(Ptr
), MinAlignment
))
797 Ptr
= getHeaderTaggedPointer(const_cast<void *>(Ptr
));
798 Chunk::UnpackedHeader Header
;
799 return Chunk::isValid(Cookie
, Ptr
, &Header
) &&
800 Header
.State
== Chunk::State::Allocated
;
803 bool useMemoryTaggingTestOnly() const {
804 return useMemoryTagging
<AllocatorConfig
>(Primary
.Options
.load());
806 void disableMemoryTagging() {
807 // If we haven't been initialized yet, we need to initialize now in order to
808 // prevent a future call to initThreadMaybe() from enabling memory tagging
809 // based on feature detection. But don't call initThreadMaybe() because it
810 // may end up calling the allocator (via pthread_atfork, via the post-init
811 // callback), which may cause mappings to be created with memory tagging
813 TSDRegistry
.initOnceMaybe(this);
814 if (allocatorSupportsMemoryTagging
<AllocatorConfig
>()) {
815 Secondary
.disableMemoryTagging();
816 Primary
.Options
.clear(OptionBit::UseMemoryTagging
);
820 void setTrackAllocationStacks(bool Track
) {
822 if (getFlags()->allocation_ring_buffer_size
<= 0) {
823 DCHECK(!Primary
.Options
.load().get(OptionBit::TrackAllocationStacks
));
828 initRingBufferMaybe();
829 Primary
.Options
.set(OptionBit::TrackAllocationStacks
);
831 Primary
.Options
.clear(OptionBit::TrackAllocationStacks
);
834 void setFillContents(FillContentsMode FillContents
) {
836 Primary
.Options
.setFillContentsMode(FillContents
);
839 void setAddLargeAllocationSlack(bool AddSlack
) {
842 Primary
.Options
.set(OptionBit::AddLargeAllocationSlack
);
844 Primary
.Options
.clear(OptionBit::AddLargeAllocationSlack
);
847 const char *getStackDepotAddress() {
849 AllocationRingBuffer
*RB
= getRingBuffer();
850 return RB
? reinterpret_cast<char *>(RB
->Depot
) : nullptr;
853 uptr
getStackDepotSize() {
855 AllocationRingBuffer
*RB
= getRingBuffer();
856 return RB
? RB
->StackDepotSize
: 0;
859 const char *getRegionInfoArrayAddress() const {
860 return Primary
.getRegionInfoArrayAddress();
863 static uptr
getRegionInfoArraySize() {
864 return PrimaryT::getRegionInfoArraySize();
867 const char *getRingBufferAddress() {
869 return reinterpret_cast<char *>(getRingBuffer());
872 uptr
getRingBufferSize() {
874 AllocationRingBuffer
*RB
= getRingBuffer();
875 return RB
&& RB
->RingBufferElements
876 ? ringBufferSizeInBytes(RB
->RingBufferElements
)
880 static const uptr MaxTraceSize
= 64;
882 static void collectTraceMaybe(const StackDepot
*Depot
,
883 uintptr_t (&Trace
)[MaxTraceSize
], u32 Hash
) {
885 if (!Depot
->find(Hash
, &RingPos
, &Size
))
887 for (unsigned I
= 0; I
!= Size
&& I
!= MaxTraceSize
; ++I
)
888 Trace
[I
] = static_cast<uintptr_t>(Depot
->at(RingPos
+ I
));
891 static void getErrorInfo(struct scudo_error_info
*ErrorInfo
,
892 uintptr_t FaultAddr
, const char *DepotPtr
,
893 size_t DepotSize
, const char *RegionInfoPtr
,
894 const char *RingBufferPtr
, size_t RingBufferSize
,
895 const char *Memory
, const char *MemoryTags
,
896 uintptr_t MemoryAddr
, size_t MemorySize
) {
897 // N.B. we need to support corrupted data in any of the buffers here. We get
898 // this information from an external process (the crashing process) that
899 // should not be able to crash the crash dumper (crash_dump on Android).
900 // See also the get_error_info_fuzzer.
902 if (!allocatorSupportsMemoryTagging
<AllocatorConfig
>() ||
903 MemoryAddr
+ MemorySize
< MemoryAddr
)
906 const StackDepot
*Depot
= nullptr;
908 // check for corrupted StackDepot. First we need to check whether we can
909 // read the metadata, then whether the metadata matches the size.
910 if (DepotSize
< sizeof(*Depot
))
912 Depot
= reinterpret_cast<const StackDepot
*>(DepotPtr
);
913 if (!Depot
->isValid(DepotSize
))
917 size_t NextErrorReport
= 0;
919 // Check for OOB in the current block and the two surrounding blocks. Beyond
920 // that, UAF is more likely.
921 if (extractTag(FaultAddr
) != 0)
922 getInlineErrorInfo(ErrorInfo
, NextErrorReport
, FaultAddr
, Depot
,
923 RegionInfoPtr
, Memory
, MemoryTags
, MemoryAddr
,
926 // Check the ring buffer. For primary allocations this will only find UAF;
927 // for secondary allocations we can find either UAF or OOB.
928 getRingBufferErrorInfo(ErrorInfo
, NextErrorReport
, FaultAddr
, Depot
,
929 RingBufferPtr
, RingBufferSize
);
931 // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
932 // Beyond that we are likely to hit false positives.
933 if (extractTag(FaultAddr
) != 0)
934 getInlineErrorInfo(ErrorInfo
, NextErrorReport
, FaultAddr
, Depot
,
935 RegionInfoPtr
, Memory
, MemoryTags
, MemoryAddr
,
940 typedef typename
PrimaryT::SizeClassMap SizeClassMap
;
942 static const uptr MinAlignmentLog
= SCUDO_MIN_ALIGNMENT_LOG
;
943 static const uptr MaxAlignmentLog
= 24U; // 16 MB seems reasonable.
944 static const uptr MinAlignment
= 1UL << MinAlignmentLog
;
945 static const uptr MaxAlignment
= 1UL << MaxAlignmentLog
;
946 static const uptr MaxAllowedMallocSize
=
947 FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
949 static_assert(MinAlignment
>= sizeof(Chunk::PackedHeader
),
950 "Minimal alignment must at least cover a chunk header.");
951 static_assert(!allocatorSupportsMemoryTagging
<AllocatorConfig
>() ||
952 MinAlignment
>= archMemoryTagGranuleSize(),
955 static const u32 BlockMarker
= 0x44554353U
;
957 // These are indexes into an "array" of 32-bit values that store information
958 // inline with a chunk that is relevant to diagnosing memory tag faults, where
959 // 0 corresponds to the address of the user memory. This means that only
960 // negative indexes may be used. The smallest index that may be used is -2,
961 // which corresponds to 8 bytes before the user memory, because the chunk
962 // header size is 8 bytes and in allocators that support memory tagging the
963 // minimum alignment is at least the tag granule size (16 on aarch64).
964 static const sptr MemTagAllocationTraceIndex
= -2;
965 static const sptr MemTagAllocationTidIndex
= -1;
968 u32 QuarantineMaxChunkSize
= 0;
972 SecondaryT Secondary
;
973 QuarantineT Quarantine
;
974 TSDRegistryT TSDRegistry
;
975 pthread_once_t PostInitNonce
= PTHREAD_ONCE_INIT
;
977 #ifdef GWP_ASAN_HOOKS
978 gwp_asan::GuardedPoolAllocator GuardedAlloc
;
979 uptr GuardedAllocSlotSize
= 0;
980 #endif // GWP_ASAN_HOOKS
982 struct AllocationRingBuffer
{
985 atomic_uptr AllocationSize
;
986 atomic_u32 AllocationTrace
;
987 atomic_u32 AllocationTid
;
988 atomic_u32 DeallocationTrace
;
989 atomic_u32 DeallocationTid
;
991 StackDepot
*Depot
= nullptr;
992 uptr StackDepotSize
= 0;
993 MemMapT RawRingBufferMap
;
994 MemMapT RawStackDepotMap
;
995 u32 RingBufferElements
= 0;
997 // An array of Size (at least one) elements of type Entry is immediately
998 // following to this struct.
1000 static_assert(sizeof(AllocationRingBuffer
) %
1001 alignof(typename
AllocationRingBuffer::Entry
) ==
1003 "invalid alignment");
1005 // Lock to initialize the RingBuffer
1006 HybridMutex RingBufferInitLock
;
1008 // Pointer to memory mapped area starting with AllocationRingBuffer struct,
1009 // and immediately followed by Size elements of type Entry.
1010 atomic_uptr RingBufferAddress
= {};
1012 AllocationRingBuffer
*getRingBuffer() {
1013 return reinterpret_cast<AllocationRingBuffer
*>(
1014 atomic_load(&RingBufferAddress
, memory_order_acquire
));
1017 // The following might get optimized out by the compiler.
1018 NOINLINE
void performSanityChecks() {
1019 // Verify that the header offset field can hold the maximum offset. In the
1020 // case of the Secondary allocator, it takes care of alignment and the
1021 // offset will always be small. In the case of the Primary, the worst case
1022 // scenario happens in the last size class, when the backend allocation
1023 // would already be aligned on the requested alignment, which would happen
1024 // to be the maximum alignment that would fit in that size class. As a
1025 // result, the maximum offset will be at most the maximum alignment for the
1026 // last size class minus the header size, in multiples of MinAlignment.
1027 Chunk::UnpackedHeader Header
= {};
1028 const uptr MaxPrimaryAlignment
= 1UL << getMostSignificantSetBitIndex(
1029 SizeClassMap::MaxSize
- MinAlignment
);
1030 const uptr MaxOffset
=
1031 (MaxPrimaryAlignment
- Chunk::getHeaderSize()) >> MinAlignmentLog
;
1032 Header
.Offset
= MaxOffset
& Chunk::OffsetMask
;
1033 if (UNLIKELY(Header
.Offset
!= MaxOffset
))
1034 reportSanityCheckError("offset");
1036 // Verify that we can fit the maximum size or amount of unused bytes in the
1037 // header. Given that the Secondary fits the allocation to a page, the worst
1038 // case scenario happens in the Primary. It will depend on the second to
1039 // last and last class sizes, as well as the dynamic base for the Primary.
1040 // The following is an over-approximation that works for our needs.
1041 const uptr MaxSizeOrUnusedBytes
= SizeClassMap::MaxSize
- 1;
1042 Header
.SizeOrUnusedBytes
= MaxSizeOrUnusedBytes
;
1043 if (UNLIKELY(Header
.SizeOrUnusedBytes
!= MaxSizeOrUnusedBytes
))
1044 reportSanityCheckError("size (or unused bytes)");
1046 const uptr LargestClassId
= SizeClassMap::LargestClassId
;
1047 Header
.ClassId
= LargestClassId
;
1048 if (UNLIKELY(Header
.ClassId
!= LargestClassId
))
1049 reportSanityCheckError("class ID");
1052 static inline void *getBlockBegin(const void *Ptr
,
1053 Chunk::UnpackedHeader
*Header
) {
1054 return reinterpret_cast<void *>(
1055 reinterpret_cast<uptr
>(Ptr
) - Chunk::getHeaderSize() -
1056 (static_cast<uptr
>(Header
->Offset
) << MinAlignmentLog
));
1059 // Return the size of a chunk as requested during its allocation.
1060 inline uptr
getSize(const void *Ptr
, Chunk::UnpackedHeader
*Header
) {
1061 const uptr SizeOrUnusedBytes
= Header
->SizeOrUnusedBytes
;
1062 if (LIKELY(Header
->ClassId
))
1063 return SizeOrUnusedBytes
;
1064 if (allocatorSupportsMemoryTagging
<AllocatorConfig
>())
1065 Ptr
= untagPointer(const_cast<void *>(Ptr
));
1066 return SecondaryT::getBlockEnd(getBlockBegin(Ptr
, Header
)) -
1067 reinterpret_cast<uptr
>(Ptr
) - SizeOrUnusedBytes
;
1070 ALWAYS_INLINE
void *initChunk(const uptr ClassId
, const Chunk::Origin Origin
,
1071 void *Block
, const uptr UserPtr
,
1072 const uptr SizeOrUnusedBytes
,
1073 const FillContentsMode FillContents
) {
1074 // Compute the default pointer before adding the header tag
1075 const uptr DefaultAlignedPtr
=
1076 reinterpret_cast<uptr
>(Block
) + Chunk::getHeaderSize();
1078 Block
= addHeaderTag(Block
);
1079 // Only do content fill when it's from primary allocator because secondary
1080 // allocator has filled the content.
1081 if (ClassId
!= 0 && UNLIKELY(FillContents
!= NoFill
)) {
1082 // This condition is not necessarily unlikely, but since memset is
1083 // costly, we might as well mark it as such.
1084 memset(Block
, FillContents
== ZeroFill
? 0 : PatternFillByte
,
1085 PrimaryT::getSizeByClassId(ClassId
));
1088 Chunk::UnpackedHeader Header
= {};
1090 if (UNLIKELY(DefaultAlignedPtr
!= UserPtr
)) {
1091 const uptr Offset
= UserPtr
- DefaultAlignedPtr
;
1092 DCHECK_GE(Offset
, 2 * sizeof(u32
));
1093 // The BlockMarker has no security purpose, but is specifically meant for
1094 // the chunk iteration function that can be used in debugging situations.
1095 // It is the only situation where we have to locate the start of a chunk
1096 // based on its block address.
1097 reinterpret_cast<u32
*>(Block
)[0] = BlockMarker
;
1098 reinterpret_cast<u32
*>(Block
)[1] = static_cast<u32
>(Offset
);
1099 Header
.Offset
= (Offset
>> MinAlignmentLog
) & Chunk::OffsetMask
;
1102 Header
.ClassId
= ClassId
& Chunk::ClassIdMask
;
1103 Header
.State
= Chunk::State::Allocated
;
1104 Header
.OriginOrWasZeroed
= Origin
& Chunk::OriginMask
;
1105 Header
.SizeOrUnusedBytes
= SizeOrUnusedBytes
& Chunk::SizeOrUnusedBytesMask
;
1106 Chunk::storeHeader(Cookie
, reinterpret_cast<void *>(addHeaderTag(UserPtr
)),
1109 return reinterpret_cast<void *>(UserPtr
);
1113 initChunkWithMemoryTagging(const uptr ClassId
, const Chunk::Origin Origin
,
1114 void *Block
, const uptr UserPtr
, const uptr Size
,
1115 const uptr SizeOrUnusedBytes
,
1116 const FillContentsMode FillContents
) {
1117 const Options Options
= Primary
.Options
.load();
1118 DCHECK(useMemoryTagging
<AllocatorConfig
>(Options
));
1120 // Compute the default pointer before adding the header tag
1121 const uptr DefaultAlignedPtr
=
1122 reinterpret_cast<uptr
>(Block
) + Chunk::getHeaderSize();
1124 void *Ptr
= reinterpret_cast<void *>(UserPtr
);
1125 void *TaggedPtr
= Ptr
;
1127 if (LIKELY(ClassId
)) {
1128 // Init the primary chunk.
1130 // We only need to zero or tag the contents for Primary backed
1131 // allocations. We only set tags for primary allocations in order to avoid
1132 // faulting potentially large numbers of pages for large secondary
1133 // allocations. We assume that guard pages are enough to protect these
1136 // FIXME: When the kernel provides a way to set the background tag of a
1137 // mapping, we should be able to tag secondary allocations as well.
1139 // When memory tagging is enabled, zeroing the contents is done as part of
1142 Chunk::UnpackedHeader Header
;
1143 const uptr BlockSize
= PrimaryT::getSizeByClassId(ClassId
);
1144 const uptr BlockUptr
= reinterpret_cast<uptr
>(Block
);
1145 const uptr BlockEnd
= BlockUptr
+ BlockSize
;
1146 // If possible, try to reuse the UAF tag that was set by deallocate().
1147 // For simplicity, only reuse tags if we have the same start address as
1148 // the previous allocation. This handles the majority of cases since
1149 // most allocations will not be more aligned than the minimum alignment.
1151 // We need to handle situations involving reclaimed chunks, and retag
1152 // the reclaimed portions if necessary. In the case where the chunk is
1153 // fully reclaimed, the chunk's header will be zero, which will trigger
1154 // the code path for new mappings and invalid chunks that prepares the
1155 // chunk from scratch. There are three possibilities for partial
1158 // (1) Header was reclaimed, data was partially reclaimed.
1159 // (2) Header was not reclaimed, all data was reclaimed (e.g. because
1160 // data started on a page boundary).
1161 // (3) Header was not reclaimed, data was partially reclaimed.
1163 // Case (1) will be handled in the same way as for full reclaiming,
1164 // since the header will be zero.
1166 // We can detect case (2) by loading the tag from the start
1167 // of the chunk. If it is zero, it means that either all data was
1168 // reclaimed (since we never use zero as the chunk tag), or that the
1169 // previous allocation was of size zero. Either way, we need to prepare
1170 // a new chunk from scratch.
1172 // We can detect case (3) by moving to the next page (if covered by the
1173 // chunk) and loading the tag of its first granule. If it is zero, it
1174 // means that all following pages may need to be retagged. On the other
1175 // hand, if it is nonzero, we can assume that all following pages are
1176 // still tagged, according to the logic that if any of the pages
1177 // following the next page were reclaimed, the next page would have been
1178 // reclaimed as well.
1181 if (getChunkFromBlock(BlockUptr
, &PrevUserPtr
, &Header
) &&
1182 PrevUserPtr
== UserPtr
&&
1183 (TaggedUserPtr
= loadTag(UserPtr
)) != UserPtr
) {
1184 uptr PrevEnd
= TaggedUserPtr
+ Header
.SizeOrUnusedBytes
;
1185 const uptr NextPage
= roundUp(TaggedUserPtr
, getPageSizeCached());
1186 if (NextPage
< PrevEnd
&& loadTag(NextPage
) != NextPage
)
1188 TaggedPtr
= reinterpret_cast<void *>(TaggedUserPtr
);
1189 resizeTaggedChunk(PrevEnd
, TaggedUserPtr
+ Size
, Size
, BlockEnd
);
1190 if (UNLIKELY(FillContents
!= NoFill
&& !Header
.OriginOrWasZeroed
)) {
1191 // If an allocation needs to be zeroed (i.e. calloc) we can normally
1192 // avoid zeroing the memory now since we can rely on memory having
1193 // been zeroed on free, as this is normally done while setting the
1194 // UAF tag. But if tagging was disabled per-thread when the memory
1195 // was freed, it would not have been retagged and thus zeroed, and
1196 // therefore it needs to be zeroed now.
1197 memset(TaggedPtr
, 0,
1198 Min(Size
, roundUp(PrevEnd
- TaggedUserPtr
,
1199 archMemoryTagGranuleSize())));
1201 // Clear any stack metadata that may have previously been stored in
1203 memset(TaggedPtr
, 0, archMemoryTagGranuleSize());
1206 const uptr OddEvenMask
=
1207 computeOddEvenMaskForPointerMaybe(Options
, BlockUptr
, ClassId
);
1208 TaggedPtr
= prepareTaggedChunk(Ptr
, Size
, OddEvenMask
, BlockEnd
);
1210 storePrimaryAllocationStackMaybe(Options
, Ptr
);
1212 // Init the secondary chunk.
1214 Block
= addHeaderTag(Block
);
1215 Ptr
= addHeaderTag(Ptr
);
1216 storeTags(reinterpret_cast<uptr
>(Block
), reinterpret_cast<uptr
>(Ptr
));
1217 storeSecondaryAllocationStackMaybe(Options
, Ptr
, Size
);
1220 Chunk::UnpackedHeader Header
= {};
1222 if (UNLIKELY(DefaultAlignedPtr
!= UserPtr
)) {
1223 const uptr Offset
= UserPtr
- DefaultAlignedPtr
;
1224 DCHECK_GE(Offset
, 2 * sizeof(u32
));
1225 // The BlockMarker has no security purpose, but is specifically meant for
1226 // the chunk iteration function that can be used in debugging situations.
1227 // It is the only situation where we have to locate the start of a chunk
1228 // based on its block address.
1229 reinterpret_cast<u32
*>(Block
)[0] = BlockMarker
;
1230 reinterpret_cast<u32
*>(Block
)[1] = static_cast<u32
>(Offset
);
1231 Header
.Offset
= (Offset
>> MinAlignmentLog
) & Chunk::OffsetMask
;
1234 Header
.ClassId
= ClassId
& Chunk::ClassIdMask
;
1235 Header
.State
= Chunk::State::Allocated
;
1236 Header
.OriginOrWasZeroed
= Origin
& Chunk::OriginMask
;
1237 Header
.SizeOrUnusedBytes
= SizeOrUnusedBytes
& Chunk::SizeOrUnusedBytesMask
;
1238 Chunk::storeHeader(Cookie
, Ptr
, &Header
);
1243 void quarantineOrDeallocateChunk(const Options
&Options
, void *TaggedPtr
,
1244 Chunk::UnpackedHeader
*Header
,
1245 uptr Size
) NO_THREAD_SAFETY_ANALYSIS
{
1246 void *Ptr
= getHeaderTaggedPointer(TaggedPtr
);
1247 // If the quarantine is disabled, the actual size of a chunk is 0 or larger
1248 // than the maximum allowed, we return a chunk directly to the backend.
1249 // This purposefully underflows for Size == 0.
1250 const bool BypassQuarantine
= !Quarantine
.getCacheSize() ||
1251 ((Size
- 1) >= QuarantineMaxChunkSize
) ||
1253 if (BypassQuarantine
)
1254 Header
->State
= Chunk::State::Available
;
1256 Header
->State
= Chunk::State::Quarantined
;
1258 if (LIKELY(!useMemoryTagging
<AllocatorConfig
>(Options
)))
1259 Header
->OriginOrWasZeroed
= 0U;
1261 Header
->OriginOrWasZeroed
=
1262 Header
->ClassId
&& !TSDRegistry
.getDisableMemInit();
1265 Chunk::storeHeader(Cookie
, Ptr
, Header
);
1267 if (BypassQuarantine
) {
1269 if (LIKELY(!useMemoryTagging
<AllocatorConfig
>(Options
))) {
1270 // Must do this after storeHeader because loadHeader uses a tagged ptr.
1271 if (allocatorSupportsMemoryTagging
<AllocatorConfig
>())
1272 Ptr
= untagPointer(Ptr
);
1273 BlockBegin
= getBlockBegin(Ptr
, Header
);
1275 BlockBegin
= retagBlock(Options
, TaggedPtr
, Ptr
, Header
, Size
, true);
1278 const uptr ClassId
= Header
->ClassId
;
1279 if (LIKELY(ClassId
)) {
1282 typename
TSDRegistryT::ScopedTSD
TSD(TSDRegistry
);
1283 CacheDrained
= TSD
->getCache().deallocate(ClassId
, BlockBegin
);
1285 // When we have drained some blocks back to the Primary from TSD, that
1286 // implies that we may have the chance to release some pages as well.
1287 // Note that in order not to block other thread's accessing the TSD,
1288 // release the TSD first then try the page release.
1290 Primary
.tryReleaseToOS(ClassId
, ReleaseToOS::Normal
);
1292 Secondary
.deallocate(Options
, BlockBegin
);
1295 if (UNLIKELY(useMemoryTagging
<AllocatorConfig
>(Options
)))
1296 retagBlock(Options
, TaggedPtr
, Ptr
, Header
, Size
, false);
1297 typename
TSDRegistryT::ScopedTSD
TSD(TSDRegistry
);
1298 Quarantine
.put(&TSD
->getQuarantineCache(),
1299 QuarantineCallback(*this, TSD
->getCache()), Ptr
, Size
);
1303 NOINLINE
void *retagBlock(const Options
&Options
, void *TaggedPtr
, void *&Ptr
,
1304 Chunk::UnpackedHeader
*Header
, const uptr Size
,
1305 bool BypassQuarantine
) {
1306 DCHECK(useMemoryTagging
<AllocatorConfig
>(Options
));
1308 const u8 PrevTag
= extractTag(reinterpret_cast<uptr
>(TaggedPtr
));
1309 storeDeallocationStackMaybe(Options
, Ptr
, PrevTag
, Size
);
1310 if (Header
->ClassId
&& !TSDRegistry
.getDisableMemInit()) {
1311 uptr TaggedBegin
, TaggedEnd
;
1312 const uptr OddEvenMask
= computeOddEvenMaskForPointerMaybe(
1313 Options
, reinterpret_cast<uptr
>(getBlockBegin(Ptr
, Header
)),
1315 // Exclude the previous tag so that immediate use after free is
1316 // detected 100% of the time.
1317 setRandomTag(Ptr
, Size
, OddEvenMask
| (1UL << PrevTag
), &TaggedBegin
,
1321 Ptr
= untagPointer(Ptr
);
1322 void *BlockBegin
= getBlockBegin(Ptr
, Header
);
1323 if (BypassQuarantine
&& !Header
->ClassId
) {
1324 storeTags(reinterpret_cast<uptr
>(BlockBegin
),
1325 reinterpret_cast<uptr
>(Ptr
));
1331 bool getChunkFromBlock(uptr Block
, uptr
*Chunk
,
1332 Chunk::UnpackedHeader
*Header
) {
1334 Block
+ getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block
));
1335 return Chunk::isValid(Cookie
, reinterpret_cast<void *>(*Chunk
), Header
);
1338 static uptr
getChunkOffsetFromBlock(const char *Block
) {
1340 if (reinterpret_cast<const u32
*>(Block
)[0] == BlockMarker
)
1341 Offset
= reinterpret_cast<const u32
*>(Block
)[1];
1342 return Offset
+ Chunk::getHeaderSize();
1345 // Set the tag of the granule past the end of the allocation to 0, to catch
1346 // linear overflows even if a previous larger allocation used the same block
1347 // and tag. Only do this if the granule past the end is in our block, because
1348 // this would otherwise lead to a SEGV if the allocation covers the entire
1349 // block and our block is at the end of a mapping. The tag of the next block's
1350 // header granule will be set to 0, so it will serve the purpose of catching
1351 // linear overflows in this case.
1353 // For allocations of size 0 we do not end up storing the address tag to the
1354 // memory tag space, which getInlineErrorInfo() normally relies on to match
1355 // address tags against chunks. To allow matching in this case we store the
1356 // address tag in the first byte of the chunk.
1357 void storeEndMarker(uptr End
, uptr Size
, uptr BlockEnd
) {
1358 DCHECK_EQ(BlockEnd
, untagPointer(BlockEnd
));
1359 uptr UntaggedEnd
= untagPointer(End
);
1360 if (UntaggedEnd
!= BlockEnd
) {
1361 storeTag(UntaggedEnd
);
1363 *reinterpret_cast<u8
*>(UntaggedEnd
) = extractTag(End
);
1367 void *prepareTaggedChunk(void *Ptr
, uptr Size
, uptr ExcludeMask
,
1369 // Prepare the granule before the chunk to store the chunk header by setting
1370 // its tag to 0. Normally its tag will already be 0, but in the case where a
1371 // chunk holding a low alignment allocation is reused for a higher alignment
1372 // allocation, the chunk may already have a non-zero tag from the previous
1374 storeTag(reinterpret_cast<uptr
>(Ptr
) - archMemoryTagGranuleSize());
1376 uptr TaggedBegin
, TaggedEnd
;
1377 setRandomTag(Ptr
, Size
, ExcludeMask
, &TaggedBegin
, &TaggedEnd
);
1379 storeEndMarker(TaggedEnd
, Size
, BlockEnd
);
1380 return reinterpret_cast<void *>(TaggedBegin
);
1383 void resizeTaggedChunk(uptr OldPtr
, uptr NewPtr
, uptr NewSize
,
1385 uptr RoundOldPtr
= roundUp(OldPtr
, archMemoryTagGranuleSize());
1387 if (RoundOldPtr
>= NewPtr
) {
1388 // If the allocation is shrinking we just need to set the tag past the end
1389 // of the allocation to 0. See explanation in storeEndMarker() above.
1390 RoundNewPtr
= roundUp(NewPtr
, archMemoryTagGranuleSize());
1392 // Set the memory tag of the region
1393 // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
1394 // to the pointer tag stored in OldPtr.
1395 RoundNewPtr
= storeTags(RoundOldPtr
, NewPtr
);
1397 storeEndMarker(RoundNewPtr
, NewSize
, BlockEnd
);
1400 void storePrimaryAllocationStackMaybe(const Options
&Options
, void *Ptr
) {
1401 if (!UNLIKELY(Options
.get(OptionBit::TrackAllocationStacks
)))
1403 AllocationRingBuffer
*RB
= getRingBuffer();
1406 auto *Ptr32
= reinterpret_cast<u32
*>(Ptr
);
1407 Ptr32
[MemTagAllocationTraceIndex
] = collectStackTrace(RB
->Depot
);
1408 Ptr32
[MemTagAllocationTidIndex
] = getThreadID();
1411 void storeRingBufferEntry(AllocationRingBuffer
*RB
, void *Ptr
,
1412 u32 AllocationTrace
, u32 AllocationTid
,
1413 uptr AllocationSize
, u32 DeallocationTrace
,
1414 u32 DeallocationTid
) {
1415 uptr Pos
= atomic_fetch_add(&RB
->Pos
, 1, memory_order_relaxed
);
1416 typename
AllocationRingBuffer::Entry
*Entry
=
1417 getRingBufferEntry(RB
, Pos
% RB
->RingBufferElements
);
1419 // First invalidate our entry so that we don't attempt to interpret a
1420 // partially written state in getSecondaryErrorInfo(). The fences below
1421 // ensure that the compiler does not move the stores to Ptr in between the
1422 // stores to the other fields.
1423 atomic_store_relaxed(&Entry
->Ptr
, 0);
1425 __atomic_signal_fence(__ATOMIC_SEQ_CST
);
1426 atomic_store_relaxed(&Entry
->AllocationTrace
, AllocationTrace
);
1427 atomic_store_relaxed(&Entry
->AllocationTid
, AllocationTid
);
1428 atomic_store_relaxed(&Entry
->AllocationSize
, AllocationSize
);
1429 atomic_store_relaxed(&Entry
->DeallocationTrace
, DeallocationTrace
);
1430 atomic_store_relaxed(&Entry
->DeallocationTid
, DeallocationTid
);
1431 __atomic_signal_fence(__ATOMIC_SEQ_CST
);
1433 atomic_store_relaxed(&Entry
->Ptr
, reinterpret_cast<uptr
>(Ptr
));
1436 void storeSecondaryAllocationStackMaybe(const Options
&Options
, void *Ptr
,
1438 if (!UNLIKELY(Options
.get(OptionBit::TrackAllocationStacks
)))
1440 AllocationRingBuffer
*RB
= getRingBuffer();
1443 u32 Trace
= collectStackTrace(RB
->Depot
);
1444 u32 Tid
= getThreadID();
1446 auto *Ptr32
= reinterpret_cast<u32
*>(Ptr
);
1447 Ptr32
[MemTagAllocationTraceIndex
] = Trace
;
1448 Ptr32
[MemTagAllocationTidIndex
] = Tid
;
1450 storeRingBufferEntry(RB
, untagPointer(Ptr
), Trace
, Tid
, Size
, 0, 0);
1453 void storeDeallocationStackMaybe(const Options
&Options
, void *Ptr
,
1454 u8 PrevTag
, uptr Size
) {
1455 if (!UNLIKELY(Options
.get(OptionBit::TrackAllocationStacks
)))
1457 AllocationRingBuffer
*RB
= getRingBuffer();
1460 auto *Ptr32
= reinterpret_cast<u32
*>(Ptr
);
1461 u32 AllocationTrace
= Ptr32
[MemTagAllocationTraceIndex
];
1462 u32 AllocationTid
= Ptr32
[MemTagAllocationTidIndex
];
1464 u32 DeallocationTrace
= collectStackTrace(RB
->Depot
);
1465 u32 DeallocationTid
= getThreadID();
1467 storeRingBufferEntry(RB
, addFixedTag(untagPointer(Ptr
), PrevTag
),
1468 AllocationTrace
, AllocationTid
, Size
,
1469 DeallocationTrace
, DeallocationTid
);
1472 static const size_t NumErrorReports
=
1473 sizeof(((scudo_error_info
*)nullptr)->reports
) /
1474 sizeof(((scudo_error_info
*)nullptr)->reports
[0]);
1476 static void getInlineErrorInfo(struct scudo_error_info
*ErrorInfo
,
1477 size_t &NextErrorReport
, uintptr_t FaultAddr
,
1478 const StackDepot
*Depot
,
1479 const char *RegionInfoPtr
, const char *Memory
,
1480 const char *MemoryTags
, uintptr_t MemoryAddr
,
1481 size_t MemorySize
, size_t MinDistance
,
1482 size_t MaxDistance
) {
1483 uptr UntaggedFaultAddr
= untagPointer(FaultAddr
);
1484 u8 FaultAddrTag
= extractTag(FaultAddr
);
1486 PrimaryT::findNearestBlock(RegionInfoPtr
, UntaggedFaultAddr
);
1488 auto GetGranule
= [&](uptr Addr
, const char **Data
, uint8_t *Tag
) -> bool {
1489 if (Addr
< MemoryAddr
|| Addr
+ archMemoryTagGranuleSize() < Addr
||
1490 Addr
+ archMemoryTagGranuleSize() > MemoryAddr
+ MemorySize
)
1492 *Data
= &Memory
[Addr
- MemoryAddr
];
1493 *Tag
= static_cast<u8
>(
1494 MemoryTags
[(Addr
- MemoryAddr
) / archMemoryTagGranuleSize()]);
1498 auto ReadBlock
= [&](uptr Addr
, uptr
*ChunkAddr
,
1499 Chunk::UnpackedHeader
*Header
, const u32
**Data
,
1501 const char *BlockBegin
;
1503 if (!GetGranule(Addr
, &BlockBegin
, &BlockBeginTag
))
1505 uptr ChunkOffset
= getChunkOffsetFromBlock(BlockBegin
);
1506 *ChunkAddr
= Addr
+ ChunkOffset
;
1508 const char *ChunkBegin
;
1509 if (!GetGranule(*ChunkAddr
, &ChunkBegin
, Tag
))
1511 *Header
= *reinterpret_cast<const Chunk::UnpackedHeader
*>(
1512 ChunkBegin
- Chunk::getHeaderSize());
1513 *Data
= reinterpret_cast<const u32
*>(ChunkBegin
);
1515 // Allocations of size 0 will have stashed the tag in the first byte of
1516 // the chunk, see storeEndMarker().
1517 if (Header
->SizeOrUnusedBytes
== 0)
1518 *Tag
= static_cast<u8
>(*ChunkBegin
);
1523 if (NextErrorReport
== NumErrorReports
)
1526 auto CheckOOB
= [&](uptr BlockAddr
) {
1527 if (BlockAddr
< Info
.RegionBegin
|| BlockAddr
>= Info
.RegionEnd
)
1531 Chunk::UnpackedHeader Header
;
1534 if (!ReadBlock(BlockAddr
, &ChunkAddr
, &Header
, &Data
, &Tag
) ||
1535 Header
.State
!= Chunk::State::Allocated
|| Tag
!= FaultAddrTag
)
1538 auto *R
= &ErrorInfo
->reports
[NextErrorReport
++];
1540 UntaggedFaultAddr
< ChunkAddr
? BUFFER_UNDERFLOW
: BUFFER_OVERFLOW
;
1541 R
->allocation_address
= ChunkAddr
;
1542 R
->allocation_size
= Header
.SizeOrUnusedBytes
;
1544 collectTraceMaybe(Depot
, R
->allocation_trace
,
1545 Data
[MemTagAllocationTraceIndex
]);
1547 R
->allocation_tid
= Data
[MemTagAllocationTidIndex
];
1548 return NextErrorReport
== NumErrorReports
;
1551 if (MinDistance
== 0 && CheckOOB(Info
.BlockBegin
))
1554 for (size_t I
= Max
<size_t>(MinDistance
, 1); I
!= MaxDistance
; ++I
)
1555 if (CheckOOB(Info
.BlockBegin
+ I
* Info
.BlockSize
) ||
1556 CheckOOB(Info
.BlockBegin
- I
* Info
.BlockSize
))
1560 static void getRingBufferErrorInfo(struct scudo_error_info
*ErrorInfo
,
1561 size_t &NextErrorReport
,
1562 uintptr_t FaultAddr
,
1563 const StackDepot
*Depot
,
1564 const char *RingBufferPtr
,
1565 size_t RingBufferSize
) {
1567 reinterpret_cast<const AllocationRingBuffer
*>(RingBufferPtr
);
1568 size_t RingBufferElements
= ringBufferElementsFromBytes(RingBufferSize
);
1569 if (!RingBuffer
|| RingBufferElements
== 0 || !Depot
)
1571 uptr Pos
= atomic_load_relaxed(&RingBuffer
->Pos
);
1573 for (uptr I
= Pos
- 1; I
!= Pos
- 1 - RingBufferElements
&&
1574 NextErrorReport
!= NumErrorReports
;
1576 auto *Entry
= getRingBufferEntry(RingBuffer
, I
% RingBufferElements
);
1577 uptr EntryPtr
= atomic_load_relaxed(&Entry
->Ptr
);
1581 uptr UntaggedEntryPtr
= untagPointer(EntryPtr
);
1582 uptr EntrySize
= atomic_load_relaxed(&Entry
->AllocationSize
);
1583 u32 AllocationTrace
= atomic_load_relaxed(&Entry
->AllocationTrace
);
1584 u32 AllocationTid
= atomic_load_relaxed(&Entry
->AllocationTid
);
1585 u32 DeallocationTrace
= atomic_load_relaxed(&Entry
->DeallocationTrace
);
1586 u32 DeallocationTid
= atomic_load_relaxed(&Entry
->DeallocationTid
);
1588 if (DeallocationTid
) {
1589 // For UAF we only consider in-bounds fault addresses because
1590 // out-of-bounds UAF is rare and attempting to detect it is very likely
1591 // to result in false positives.
1592 if (FaultAddr
< EntryPtr
|| FaultAddr
>= EntryPtr
+ EntrySize
)
1595 // Ring buffer OOB is only possible with secondary allocations. In this
1596 // case we are guaranteed a guard region of at least a page on either
1597 // side of the allocation (guard page on the right, guard page + tagged
1598 // region on the left), so ignore any faults outside of that range.
1599 if (FaultAddr
< EntryPtr
- getPageSizeCached() ||
1600 FaultAddr
>= EntryPtr
+ EntrySize
+ getPageSizeCached())
1603 // For UAF the ring buffer will contain two entries, one for the
1604 // allocation and another for the deallocation. Don't report buffer
1605 // overflow/underflow using the allocation entry if we have already
1606 // collected a report from the deallocation entry.
1608 for (uptr J
= 0; J
!= NextErrorReport
; ++J
) {
1609 if (ErrorInfo
->reports
[J
].allocation_address
== UntaggedEntryPtr
) {
1618 auto *R
= &ErrorInfo
->reports
[NextErrorReport
++];
1619 if (DeallocationTid
)
1620 R
->error_type
= USE_AFTER_FREE
;
1621 else if (FaultAddr
< EntryPtr
)
1622 R
->error_type
= BUFFER_UNDERFLOW
;
1624 R
->error_type
= BUFFER_OVERFLOW
;
1626 R
->allocation_address
= UntaggedEntryPtr
;
1627 R
->allocation_size
= EntrySize
;
1628 collectTraceMaybe(Depot
, R
->allocation_trace
, AllocationTrace
);
1629 R
->allocation_tid
= AllocationTid
;
1630 collectTraceMaybe(Depot
, R
->deallocation_trace
, DeallocationTrace
);
1631 R
->deallocation_tid
= DeallocationTid
;
1635 uptr
getStats(ScopedString
*Str
) {
1636 Primary
.getStats(Str
);
1637 Secondary
.getStats(Str
);
1638 Quarantine
.getStats(Str
);
1639 TSDRegistry
.getStats(Str
);
1640 return Str
->length();
1643 static typename
AllocationRingBuffer::Entry
*
1644 getRingBufferEntry(AllocationRingBuffer
*RB
, uptr N
) {
1645 char *RBEntryStart
=
1646 &reinterpret_cast<char *>(RB
)[sizeof(AllocationRingBuffer
)];
1647 return &reinterpret_cast<typename
AllocationRingBuffer::Entry
*>(
1650 static const typename
AllocationRingBuffer::Entry
*
1651 getRingBufferEntry(const AllocationRingBuffer
*RB
, uptr N
) {
1652 const char *RBEntryStart
=
1653 &reinterpret_cast<const char *>(RB
)[sizeof(AllocationRingBuffer
)];
1654 return &reinterpret_cast<const typename
AllocationRingBuffer::Entry
*>(
1658 void initRingBufferMaybe() {
1659 ScopedLock
L(RingBufferInitLock
);
1660 if (getRingBuffer() != nullptr)
1663 int ring_buffer_size
= getFlags()->allocation_ring_buffer_size
;
1664 if (ring_buffer_size
<= 0)
1667 u32 AllocationRingBufferSize
= static_cast<u32
>(ring_buffer_size
);
1669 // We store alloc and free stacks for each entry.
1670 constexpr u32 kStacksPerRingBufferEntry
= 2;
1671 constexpr u32 kMaxU32Pow2
= ~(UINT32_MAX
>> 1);
1672 static_assert(isPowerOfTwo(kMaxU32Pow2
));
1673 // On Android we always have 3 frames at the bottom: __start_main,
1674 // __libc_init, main, and 3 at the top: malloc, scudo_malloc and
1675 // Allocator::allocate. This leaves 10 frames for the user app. The next
1676 // smallest power of two (8) would only leave 2, which is clearly too
1678 constexpr u32 kFramesPerStack
= 16;
1679 static_assert(isPowerOfTwo(kFramesPerStack
));
1681 if (AllocationRingBufferSize
> kMaxU32Pow2
/ kStacksPerRingBufferEntry
)
1683 u32 TabSize
= static_cast<u32
>(roundUpPowerOfTwo(kStacksPerRingBufferEntry
*
1684 AllocationRingBufferSize
));
1685 if (TabSize
> UINT32_MAX
/ kFramesPerStack
)
1687 u32 RingSize
= static_cast<u32
>(TabSize
* kFramesPerStack
);
1689 uptr StackDepotSize
= sizeof(StackDepot
) + sizeof(atomic_u64
) * RingSize
+
1690 sizeof(atomic_u32
) * TabSize
;
1693 /*Addr=*/0U, roundUp(StackDepotSize
, getPageSizeCached()),
1694 "scudo:stack_depot");
1695 auto *Depot
= reinterpret_cast<StackDepot
*>(DepotMap
.getBase());
1696 Depot
->init(RingSize
, TabSize
);
1701 roundUp(ringBufferSizeInBytes(AllocationRingBufferSize
),
1702 getPageSizeCached()),
1703 "scudo:ring_buffer");
1704 auto *RB
= reinterpret_cast<AllocationRingBuffer
*>(MemMap
.getBase());
1705 RB
->RawRingBufferMap
= MemMap
;
1706 RB
->RingBufferElements
= AllocationRingBufferSize
;
1708 RB
->StackDepotSize
= StackDepotSize
;
1709 RB
->RawStackDepotMap
= DepotMap
;
1711 atomic_store(&RingBufferAddress
, reinterpret_cast<uptr
>(RB
),
1712 memory_order_release
);
1715 void unmapRingBuffer() {
1716 AllocationRingBuffer
*RB
= getRingBuffer();
1719 // N.B. because RawStackDepotMap is part of RawRingBufferMap, the order
1720 // is very important.
1721 RB
->RawStackDepotMap
.unmap();
1722 // Note that the `RB->RawRingBufferMap` is stored on the pages managed by
1723 // itself. Take over the ownership before calling unmap() so that any
1724 // operation along with unmap() won't touch inaccessible pages.
1725 MemMapT RawRingBufferMap
= RB
->RawRingBufferMap
;
1726 RawRingBufferMap
.unmap();
1727 atomic_store(&RingBufferAddress
, 0, memory_order_release
);
1730 static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements
) {
1731 return sizeof(AllocationRingBuffer
) +
1732 RingBufferElements
* sizeof(typename
AllocationRingBuffer::Entry
);
1735 static constexpr size_t ringBufferElementsFromBytes(size_t Bytes
) {
1736 if (Bytes
< sizeof(AllocationRingBuffer
)) {
1739 return (Bytes
- sizeof(AllocationRingBuffer
)) /
1740 sizeof(typename
AllocationRingBuffer::Entry
);
1744 } // namespace scudo
1746 #endif // SCUDO_COMBINED_H_