1 //===-- asan_allocator.cpp ------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Implementation of ASan's memory allocator, 2-nd version.
12 // This variant uses the allocator from sanitizer_common, i.e. the one shared
13 // with ThreadSanitizer and MemorySanitizer.
15 //===----------------------------------------------------------------------===//
17 #include "asan_allocator.h"
19 #include "asan_internal.h"
20 #include "asan_mapping.h"
21 #include "asan_poisoning.h"
22 #include "asan_report.h"
23 #include "asan_stack.h"
24 #include "asan_thread.h"
25 #include "lsan/lsan_common.h"
26 #include "sanitizer_common/sanitizer_allocator_checks.h"
27 #include "sanitizer_common/sanitizer_allocator_interface.h"
28 #include "sanitizer_common/sanitizer_common.h"
29 #include "sanitizer_common/sanitizer_errno.h"
30 #include "sanitizer_common/sanitizer_flags.h"
31 #include "sanitizer_common/sanitizer_internal_defs.h"
32 #include "sanitizer_common/sanitizer_list.h"
33 #include "sanitizer_common/sanitizer_quarantine.h"
34 #include "sanitizer_common/sanitizer_stackdepot.h"
38 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
39 // We use adaptive redzones: for larger allocation larger redzones are used.
40 static u32
RZLog2Size(u32 rz_log
) {
45 static u32
RZSize2Log(u32 rz_size
) {
46 CHECK_GE(rz_size
, 16);
47 CHECK_LE(rz_size
, 2048);
48 CHECK(IsPowerOfTwo(rz_size
));
49 u32 res
= Log2(rz_size
) - 4;
50 CHECK_EQ(rz_size
, RZLog2Size(res
));
54 static AsanAllocator
&get_allocator();
56 static void AtomicContextStore(volatile atomic_uint64_t
*atomic_context
,
61 atomic_store(atomic_context
, context
, memory_order_relaxed
);
64 static void AtomicContextLoad(const volatile atomic_uint64_t
*atomic_context
,
65 u32
&tid
, u32
&stack
) {
66 u64 context
= atomic_load(atomic_context
, memory_order_relaxed
);
72 // The memory chunk allocated from the underlying allocator looks like this:
73 // L L L L L L H H U U U U U U R R
74 // L -- left redzone words (0 or more bytes)
75 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
77 // R -- right redzone (0 or more bytes)
78 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
81 // If the left redzone is greater than the ChunkHeader size we store a magic
82 // value in the first uptr word of the memory block and store the address of
83 // ChunkBase in the next uptr.
84 // M B L L L L L L L L L H H U U U U U U
86 // ---------------------|
87 // M -- magic value kAllocBegMagic
88 // B -- address of ChunkHeader pointing to the first 'H'
92 atomic_uint8_t chunk_state
;
97 // else -> log2(min(align, 512)) - 2
98 u8 user_requested_alignment_log
: 3;
101 u16 user_requested_size_hi
;
102 u32 user_requested_size_lo
;
103 atomic_uint64_t alloc_context_id
;
106 uptr
UsedSize() const {
107 static_assert(sizeof(user_requested_size_lo
) == 4,
108 "Expression below requires this");
109 return FIRST_32_SECOND_64(0, ((uptr
)user_requested_size_hi
<< 32)) +
110 user_requested_size_lo
;
113 void SetUsedSize(uptr size
) {
114 user_requested_size_lo
= size
;
115 static_assert(sizeof(user_requested_size_lo
) == 4,
116 "Expression below requires this");
117 user_requested_size_hi
= FIRST_32_SECOND_64(0, size
>> 32);
118 CHECK_EQ(UsedSize(), size
);
121 void SetAllocContext(u32 tid
, u32 stack
) {
122 AtomicContextStore(&alloc_context_id
, tid
, stack
);
125 void GetAllocContext(u32
&tid
, u32
&stack
) const {
126 AtomicContextLoad(&alloc_context_id
, tid
, stack
);
130 class ChunkBase
: public ChunkHeader
{
131 atomic_uint64_t free_context_id
;
134 void SetFreeContext(u32 tid
, u32 stack
) {
135 AtomicContextStore(&free_context_id
, tid
, stack
);
138 void GetFreeContext(u32
&tid
, u32
&stack
) const {
139 AtomicContextLoad(&free_context_id
, tid
, stack
);
143 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
144 static const uptr kChunkHeader2Size
= sizeof(ChunkBase
) - kChunkHeaderSize
;
145 COMPILER_CHECK(kChunkHeaderSize
== 16);
146 COMPILER_CHECK(kChunkHeader2Size
<= 16);
149 // Either just allocated by underlying allocator, but AsanChunk is not yet
150 // ready, or almost returned to undelying allocator and AsanChunk is already
153 // The chunk is allocated and not yet freed.
155 // The chunk was freed and put into quarantine zone.
156 CHUNK_QUARANTINE
= 3,
159 class AsanChunk
: public ChunkBase
{
161 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
162 bool AddrIsInside(uptr addr
) {
163 return (addr
>= Beg()) && (addr
< Beg() + UsedSize());
167 class LargeChunkHeader
{
168 static constexpr uptr kAllocBegMagic
=
169 FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL
);
170 atomic_uintptr_t magic
;
171 AsanChunk
*chunk_header
;
174 AsanChunk
*Get() const {
175 return atomic_load(&magic
, memory_order_acquire
) == kAllocBegMagic
180 void Set(AsanChunk
*p
) {
183 atomic_store(&magic
, kAllocBegMagic
, memory_order_release
);
187 uptr old
= kAllocBegMagic
;
188 if (!atomic_compare_exchange_strong(&magic
, &old
, 0,
189 memory_order_release
)) {
190 CHECK_EQ(old
, kAllocBegMagic
);
195 static void FillChunk(AsanChunk
*m
) {
196 // FIXME: Use ReleaseMemoryPagesToOS.
197 Flags
&fl
= *flags();
199 if (fl
.max_free_fill_size
> 0) {
200 // We have to skip the chunk header, it contains free_context_id.
201 uptr scribble_start
= (uptr
)m
+ kChunkHeaderSize
+ kChunkHeader2Size
;
202 if (m
->UsedSize() >= kChunkHeader2Size
) { // Skip Header2 in user area.
203 uptr size_to_fill
= m
->UsedSize() - kChunkHeader2Size
;
204 size_to_fill
= Min(size_to_fill
, (uptr
)fl
.max_free_fill_size
);
205 REAL(memset
)((void *)scribble_start
, fl
.free_fill_byte
, size_to_fill
);
210 struct QuarantineCallback
{
211 QuarantineCallback(AllocatorCache
*cache
, BufferedStackTrace
*stack
)
216 void PreQuarantine(AsanChunk
*m
) const {
218 // Poison the region.
219 PoisonShadow(m
->Beg(), RoundUpTo(m
->UsedSize(), ASAN_SHADOW_GRANULARITY
),
223 void Recycle(AsanChunk
*m
) const {
224 void *p
= get_allocator().GetBlockBegin(m
);
226 // The secondary will immediately unpoison and unmap the memory, so this
227 // branch is unnecessary.
228 if (get_allocator().FromPrimary(p
)) {
230 // Clear the magic value, as allocator internals may overwrite the
231 // contents of deallocated chunk, confusing GetAsanChunk lookup.
232 reinterpret_cast<LargeChunkHeader
*>(p
)->Set(nullptr);
235 u8 old_chunk_state
= CHUNK_QUARANTINE
;
236 if (!atomic_compare_exchange_strong(&m
->chunk_state
, &old_chunk_state
,
238 memory_order_acquire
)) {
239 CHECK_EQ(old_chunk_state
, CHUNK_QUARANTINE
);
242 PoisonShadow(m
->Beg(), RoundUpTo(m
->UsedSize(), ASAN_SHADOW_GRANULARITY
),
243 kAsanHeapLeftRedzoneMagic
);
247 AsanStats
&thread_stats
= GetCurrentThreadStats();
248 thread_stats
.real_frees
++;
249 thread_stats
.really_freed
+= m
->UsedSize();
251 get_allocator().Deallocate(cache_
, p
);
254 void RecyclePassThrough(AsanChunk
*m
) const {
255 // Recycle for the secondary will immediately unpoison and unmap the
256 // memory, so quarantine preparation is unnecessary.
257 if (get_allocator().FromPrimary(m
)) {
258 // The primary allocation may need pattern fill if enabled.
264 void *Allocate(uptr size
) const {
265 void *res
= get_allocator().Allocate(cache_
, size
, 1);
266 // TODO(alekseys): Consider making quarantine OOM-friendly.
268 ReportOutOfMemory(size
, stack_
);
272 void Deallocate(void *p
) const { get_allocator().Deallocate(cache_
, p
); }
275 AllocatorCache
* const cache_
;
276 BufferedStackTrace
* const stack_
;
279 typedef Quarantine
<QuarantineCallback
, AsanChunk
> AsanQuarantine
;
280 typedef AsanQuarantine::Cache QuarantineCache
;
282 void AsanMapUnmapCallback::OnMap(uptr p
, uptr size
) const {
283 PoisonShadow(p
, size
, kAsanHeapLeftRedzoneMagic
);
285 AsanStats
&thread_stats
= GetCurrentThreadStats();
286 thread_stats
.mmaps
++;
287 thread_stats
.mmaped
+= size
;
290 void AsanMapUnmapCallback::OnMapSecondary(uptr p
, uptr size
, uptr user_begin
,
291 uptr user_size
) const {
292 uptr user_end
= RoundDownTo(user_begin
+ user_size
, ASAN_SHADOW_GRANULARITY
);
293 user_begin
= RoundUpTo(user_begin
, ASAN_SHADOW_GRANULARITY
);
294 // The secondary mapping will be immediately returned to user, no value
295 // poisoning that with non-zero just before unpoisoning by Allocate(). So just
296 // poison head/tail invisible to Allocate().
297 PoisonShadow(p
, user_begin
- p
, kAsanHeapLeftRedzoneMagic
);
298 PoisonShadow(user_end
, size
- (user_end
- p
), kAsanHeapLeftRedzoneMagic
);
300 AsanStats
&thread_stats
= GetCurrentThreadStats();
301 thread_stats
.mmaps
++;
302 thread_stats
.mmaped
+= size
;
305 void AsanMapUnmapCallback::OnUnmap(uptr p
, uptr size
) const {
306 PoisonShadow(p
, size
, 0);
307 // We are about to unmap a chunk of user memory.
308 // Mark the corresponding shadow memory as not needed.
309 FlushUnneededASanShadowMemory(p
, size
);
311 AsanStats
&thread_stats
= GetCurrentThreadStats();
312 thread_stats
.munmaps
++;
313 thread_stats
.munmaped
+= size
;
316 // We can not use THREADLOCAL because it is not supported on some of the
317 // platforms we care about (OSX 10.6, Android).
318 // static THREADLOCAL AllocatorCache cache;
319 AllocatorCache
*GetAllocatorCache(AsanThreadLocalMallocStorage
*ms
) {
321 return &ms
->allocator_cache
;
324 QuarantineCache
*GetQuarantineCache(AsanThreadLocalMallocStorage
*ms
) {
326 CHECK_LE(sizeof(QuarantineCache
), sizeof(ms
->quarantine_cache
));
327 return reinterpret_cast<QuarantineCache
*>(ms
->quarantine_cache
);
330 void AllocatorOptions::SetFrom(const Flags
*f
, const CommonFlags
*cf
) {
331 quarantine_size_mb
= f
->quarantine_size_mb
;
332 thread_local_quarantine_size_kb
= f
->thread_local_quarantine_size_kb
;
333 min_redzone
= f
->redzone
;
334 max_redzone
= f
->max_redzone
;
335 may_return_null
= cf
->allocator_may_return_null
;
336 alloc_dealloc_mismatch
= f
->alloc_dealloc_mismatch
;
337 release_to_os_interval_ms
= cf
->allocator_release_to_os_interval_ms
;
340 void AllocatorOptions::CopyTo(Flags
*f
, CommonFlags
*cf
) {
341 f
->quarantine_size_mb
= quarantine_size_mb
;
342 f
->thread_local_quarantine_size_kb
= thread_local_quarantine_size_kb
;
343 f
->redzone
= min_redzone
;
344 f
->max_redzone
= max_redzone
;
345 cf
->allocator_may_return_null
= may_return_null
;
346 f
->alloc_dealloc_mismatch
= alloc_dealloc_mismatch
;
347 cf
->allocator_release_to_os_interval_ms
= release_to_os_interval_ms
;
351 static const uptr kMaxAllowedMallocSize
=
352 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
354 AsanAllocator allocator
;
355 AsanQuarantine quarantine
;
356 StaticSpinMutex fallback_mutex
;
357 AllocatorCache fallback_allocator_cache
;
358 QuarantineCache fallback_quarantine_cache
;
360 uptr max_user_defined_malloc_size
;
362 // ------------------- Options --------------------------
363 atomic_uint16_t min_redzone
;
364 atomic_uint16_t max_redzone
;
365 atomic_uint8_t alloc_dealloc_mismatch
;
367 // ------------------- Initialization ------------------------
368 explicit Allocator(LinkerInitialized
)
369 : quarantine(LINKER_INITIALIZED
),
370 fallback_quarantine_cache(LINKER_INITIALIZED
) {}
372 void CheckOptions(const AllocatorOptions
&options
) const {
373 CHECK_GE(options
.min_redzone
, 16);
374 CHECK_GE(options
.max_redzone
, options
.min_redzone
);
375 CHECK_LE(options
.max_redzone
, 2048);
376 CHECK(IsPowerOfTwo(options
.min_redzone
));
377 CHECK(IsPowerOfTwo(options
.max_redzone
));
380 void SharedInitCode(const AllocatorOptions
&options
) {
381 CheckOptions(options
);
382 quarantine
.Init((uptr
)options
.quarantine_size_mb
<< 20,
383 (uptr
)options
.thread_local_quarantine_size_kb
<< 10);
384 atomic_store(&alloc_dealloc_mismatch
, options
.alloc_dealloc_mismatch
,
385 memory_order_release
);
386 atomic_store(&min_redzone
, options
.min_redzone
, memory_order_release
);
387 atomic_store(&max_redzone
, options
.max_redzone
, memory_order_release
);
390 void InitLinkerInitialized(const AllocatorOptions
&options
) {
391 SetAllocatorMayReturnNull(options
.may_return_null
);
392 allocator
.InitLinkerInitialized(options
.release_to_os_interval_ms
);
393 SharedInitCode(options
);
394 max_user_defined_malloc_size
= common_flags()->max_allocation_size_mb
395 ? common_flags()->max_allocation_size_mb
397 : kMaxAllowedMallocSize
;
400 void RePoisonChunk(uptr chunk
) {
401 // This could be a user-facing chunk (with redzones), or some internal
402 // housekeeping chunk, like TransferBatch. Start by assuming the former.
403 AsanChunk
*ac
= GetAsanChunk((void *)chunk
);
404 uptr allocated_size
= allocator
.GetActuallyAllocatedSize((void *)chunk
);
405 if (ac
&& atomic_load(&ac
->chunk_state
, memory_order_acquire
) ==
407 uptr beg
= ac
->Beg();
408 uptr end
= ac
->Beg() + ac
->UsedSize();
409 uptr chunk_end
= chunk
+ allocated_size
;
410 if (chunk
< beg
&& beg
< end
&& end
<= chunk_end
) {
411 // Looks like a valid AsanChunk in use, poison redzones only.
412 PoisonShadow(chunk
, beg
- chunk
, kAsanHeapLeftRedzoneMagic
);
413 uptr end_aligned_down
= RoundDownTo(end
, ASAN_SHADOW_GRANULARITY
);
414 FastPoisonShadowPartialRightRedzone(
415 end_aligned_down
, end
- end_aligned_down
,
416 chunk_end
- end_aligned_down
, kAsanHeapLeftRedzoneMagic
);
421 // This is either not an AsanChunk or freed or quarantined AsanChunk.
422 // In either case, poison everything.
423 PoisonShadow(chunk
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
426 void ReInitialize(const AllocatorOptions
&options
) {
427 SetAllocatorMayReturnNull(options
.may_return_null
);
428 allocator
.SetReleaseToOSIntervalMs(options
.release_to_os_interval_ms
);
429 SharedInitCode(options
);
431 // Poison all existing allocation's redzones.
432 if (CanPoisonMemory()) {
433 allocator
.ForceLock();
434 allocator
.ForEachChunk(
435 [](uptr chunk
, void *alloc
) {
436 ((Allocator
*)alloc
)->RePoisonChunk(chunk
);
439 allocator
.ForceUnlock();
443 void GetOptions(AllocatorOptions
*options
) const {
444 options
->quarantine_size_mb
= quarantine
.GetMaxSize() >> 20;
445 options
->thread_local_quarantine_size_kb
=
446 quarantine
.GetMaxCacheSize() >> 10;
447 options
->min_redzone
= atomic_load(&min_redzone
, memory_order_acquire
);
448 options
->max_redzone
= atomic_load(&max_redzone
, memory_order_acquire
);
449 options
->may_return_null
= AllocatorMayReturnNull();
450 options
->alloc_dealloc_mismatch
=
451 atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
);
452 options
->release_to_os_interval_ms
= allocator
.ReleaseToOSIntervalMs();
455 // -------------------- Helper methods. -------------------------
456 uptr
ComputeRZLog(uptr user_requested_size
) {
457 u32 rz_log
= user_requested_size
<= 64 - 16 ? 0
458 : user_requested_size
<= 128 - 32 ? 1
459 : user_requested_size
<= 512 - 64 ? 2
460 : user_requested_size
<= 4096 - 128 ? 3
461 : user_requested_size
<= (1 << 14) - 256 ? 4
462 : user_requested_size
<= (1 << 15) - 512 ? 5
463 : user_requested_size
<= (1 << 16) - 1024 ? 6
465 u32 hdr_log
= RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader
)));
466 u32 min_log
= RZSize2Log(atomic_load(&min_redzone
, memory_order_acquire
));
467 u32 max_log
= RZSize2Log(atomic_load(&max_redzone
, memory_order_acquire
));
468 return Min(Max(rz_log
, Max(min_log
, hdr_log
)), Max(max_log
, hdr_log
));
471 static uptr
ComputeUserRequestedAlignmentLog(uptr user_requested_alignment
) {
472 if (user_requested_alignment
< 8)
474 if (user_requested_alignment
> 512)
475 user_requested_alignment
= 512;
476 return Log2(user_requested_alignment
) - 2;
479 static uptr
ComputeUserAlignment(uptr user_requested_alignment_log
) {
480 if (user_requested_alignment_log
== 0)
482 return 1LL << (user_requested_alignment_log
+ 2);
485 // We have an address between two chunks, and we want to report just one.
486 AsanChunk
*ChooseChunk(uptr addr
, AsanChunk
*left_chunk
,
487 AsanChunk
*right_chunk
) {
492 // Prefer an allocated chunk over freed chunk and freed chunk
493 // over available chunk.
494 u8 left_state
= atomic_load(&left_chunk
->chunk_state
, memory_order_relaxed
);
496 atomic_load(&right_chunk
->chunk_state
, memory_order_relaxed
);
497 if (left_state
!= right_state
) {
498 if (left_state
== CHUNK_ALLOCATED
)
500 if (right_state
== CHUNK_ALLOCATED
)
502 if (left_state
== CHUNK_QUARANTINE
)
504 if (right_state
== CHUNK_QUARANTINE
)
507 // Same chunk_state: choose based on offset.
508 sptr l_offset
= 0, r_offset
= 0;
509 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
510 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
511 if (l_offset
< r_offset
)
516 bool UpdateAllocationStack(uptr addr
, BufferedStackTrace
*stack
) {
517 AsanChunk
*m
= GetAsanChunkByAddr(addr
);
518 if (!m
) return false;
519 if (atomic_load(&m
->chunk_state
, memory_order_acquire
) != CHUNK_ALLOCATED
)
521 if (m
->Beg() != addr
) return false;
522 AsanThread
*t
= GetCurrentThread();
523 m
->SetAllocContext(t
? t
->tid() : kMainTid
, StackDepotPut(*stack
));
527 // -------------------- Allocation/Deallocation routines ---------------
528 void *Allocate(uptr size
, uptr alignment
, BufferedStackTrace
*stack
,
529 AllocType alloc_type
, bool can_fill
) {
530 if (UNLIKELY(!AsanInited()))
532 if (UNLIKELY(IsRssLimitExceeded())) {
533 if (AllocatorMayReturnNull())
535 ReportRssLimitExceeded(stack
);
537 Flags
&fl
= *flags();
539 const uptr min_alignment
= ASAN_SHADOW_GRANULARITY
;
540 const uptr user_requested_alignment_log
=
541 ComputeUserRequestedAlignmentLog(alignment
);
542 if (alignment
< min_alignment
)
543 alignment
= min_alignment
;
545 // We'd be happy to avoid allocating memory for zero-size requests, but
546 // some programs/tests depend on this behavior and assume that malloc
547 // would not return NULL even for zero-size allocations. Moreover, it
548 // looks like operator new should never return NULL, and results of
549 // consecutive "new" calls must be different even if the allocated size
553 CHECK(IsPowerOfTwo(alignment
));
554 uptr rz_log
= ComputeRZLog(size
);
555 uptr rz_size
= RZLog2Size(rz_log
);
556 uptr rounded_size
= RoundUpTo(Max(size
, kChunkHeader2Size
), alignment
);
557 uptr needed_size
= rounded_size
+ rz_size
;
558 if (alignment
> min_alignment
)
559 needed_size
+= alignment
;
560 bool from_primary
= PrimaryAllocator::CanAllocate(needed_size
, alignment
);
561 // If we are allocating from the secondary allocator, there will be no
562 // automatic right redzone, so add the right redzone manually.
564 needed_size
+= rz_size
;
565 CHECK(IsAligned(needed_size
, min_alignment
));
566 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
||
567 size
> max_user_defined_malloc_size
) {
568 if (AllocatorMayReturnNull()) {
569 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
574 Min(kMaxAllowedMallocSize
, max_user_defined_malloc_size
);
575 ReportAllocationSizeTooBig(size
, needed_size
, malloc_limit
, stack
);
578 AsanThread
*t
= GetCurrentThread();
581 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
582 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
584 SpinMutexLock
l(&fallback_mutex
);
585 AllocatorCache
*cache
= &fallback_allocator_cache
;
586 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
588 if (UNLIKELY(!allocated
)) {
589 SetAllocatorOutOfMemory();
590 if (AllocatorMayReturnNull())
592 ReportOutOfMemory(size
, stack
);
595 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
596 uptr alloc_end
= alloc_beg
+ needed_size
;
597 uptr user_beg
= alloc_beg
+ rz_size
;
598 if (!IsAligned(user_beg
, alignment
))
599 user_beg
= RoundUpTo(user_beg
, alignment
);
600 uptr user_end
= user_beg
+ size
;
601 CHECK_LE(user_end
, alloc_end
);
602 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
603 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
604 m
->alloc_type
= alloc_type
;
606 m
->SetUsedSize(size
);
607 m
->user_requested_alignment_log
= user_requested_alignment_log
;
609 m
->SetAllocContext(t
? t
->tid() : kMainTid
, StackDepotPut(*stack
));
611 if (!from_primary
|| *(u8
*)MEM_TO_SHADOW((uptr
)allocated
) == 0) {
612 // The allocator provides an unpoisoned chunk. This is possible for the
613 // secondary allocator, or if CanPoisonMemory() was false for some time,
614 // for example, due to flags()->start_disabled. Anyway, poison left and
615 // right of the block before using it for anything else.
616 uptr tail_beg
= RoundUpTo(user_end
, ASAN_SHADOW_GRANULARITY
);
617 uptr tail_end
= alloc_beg
+ allocator
.GetActuallyAllocatedSize(allocated
);
618 PoisonShadow(alloc_beg
, user_beg
- alloc_beg
, kAsanHeapLeftRedzoneMagic
);
619 PoisonShadow(tail_beg
, tail_end
- tail_beg
, kAsanHeapLeftRedzoneMagic
);
622 uptr size_rounded_down_to_granularity
=
623 RoundDownTo(size
, ASAN_SHADOW_GRANULARITY
);
624 // Unpoison the bulk of the memory region.
625 if (size_rounded_down_to_granularity
)
626 PoisonShadow(user_beg
, size_rounded_down_to_granularity
, 0);
627 // Deal with the end of the region if size is not aligned to granularity.
628 if (size
!= size_rounded_down_to_granularity
&& CanPoisonMemory()) {
630 (u8
*)MemToShadow(user_beg
+ size_rounded_down_to_granularity
);
631 *shadow
= fl
.poison_partial
? (size
& (ASAN_SHADOW_GRANULARITY
- 1)) : 0;
634 AsanStats
&thread_stats
= GetCurrentThreadStats();
635 thread_stats
.mallocs
++;
636 thread_stats
.malloced
+= size
;
637 thread_stats
.malloced_redzones
+= needed_size
- size
;
638 if (needed_size
> SizeClassMap::kMaxSize
)
639 thread_stats
.malloc_large
++;
641 thread_stats
.malloced_by_size
[SizeClassMap::ClassID(needed_size
)]++;
643 void *res
= reinterpret_cast<void *>(user_beg
);
644 if (can_fill
&& fl
.max_malloc_fill_size
) {
645 uptr fill_size
= Min(size
, (uptr
)fl
.max_malloc_fill_size
);
646 REAL(memset
)(res
, fl
.malloc_fill_byte
, fill_size
);
648 #if CAN_SANITIZE_LEAKS
649 m
->lsan_tag
= __lsan::DisabledInThisThread() ? __lsan::kIgnored
650 : __lsan::kDirectlyLeaked
;
652 // Must be the last mutation of metadata in this function.
653 atomic_store(&m
->chunk_state
, CHUNK_ALLOCATED
, memory_order_release
);
654 if (alloc_beg
!= chunk_beg
) {
655 CHECK_LE(alloc_beg
+ sizeof(LargeChunkHeader
), chunk_beg
);
656 reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Set(m
);
658 RunMallocHooks(res
, size
);
662 // Set quarantine flag if chunk is allocated, issue ASan error report on
663 // available and quarantined chunks. Return true on success, false otherwise.
664 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk
*m
, void *ptr
,
665 BufferedStackTrace
*stack
) {
666 u8 old_chunk_state
= CHUNK_ALLOCATED
;
667 // Flip the chunk_state atomically to avoid race on double-free.
668 if (!atomic_compare_exchange_strong(&m
->chunk_state
, &old_chunk_state
,
670 memory_order_acquire
)) {
671 ReportInvalidFree(ptr
, old_chunk_state
, stack
);
672 // It's not safe to push a chunk in quarantine on invalid free.
675 CHECK_EQ(CHUNK_ALLOCATED
, old_chunk_state
);
676 // It was a user data.
677 m
->SetFreeContext(kInvalidTid
, 0);
681 // Expects the chunk to already be marked as quarantined by using
682 // AtomicallySetQuarantineFlagIfAllocated.
683 void QuarantineChunk(AsanChunk
*m
, void *ptr
, BufferedStackTrace
*stack
) {
684 CHECK_EQ(atomic_load(&m
->chunk_state
, memory_order_relaxed
),
686 AsanThread
*t
= GetCurrentThread();
687 m
->SetFreeContext(t
? t
->tid() : 0, StackDepotPut(*stack
));
689 // Push into quarantine.
691 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
692 AllocatorCache
*ac
= GetAllocatorCache(ms
);
693 quarantine
.Put(GetQuarantineCache(ms
), QuarantineCallback(ac
, stack
), m
,
696 SpinMutexLock
l(&fallback_mutex
);
697 AllocatorCache
*ac
= &fallback_allocator_cache
;
698 quarantine
.Put(&fallback_quarantine_cache
, QuarantineCallback(ac
, stack
),
703 void Deallocate(void *ptr
, uptr delete_size
, uptr delete_alignment
,
704 BufferedStackTrace
*stack
, AllocType alloc_type
) {
705 uptr p
= reinterpret_cast<uptr
>(ptr
);
708 uptr chunk_beg
= p
- kChunkHeaderSize
;
709 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
711 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
712 // malloc. Don't report an invalid free in this case.
713 if (SANITIZER_WINDOWS
&&
714 !get_allocator().PointerIsMine(ptr
)) {
715 if (!IsSystemHeapAddress(p
))
716 ReportFreeNotMalloced(p
, stack
);
722 // Must mark the chunk as quarantined before any changes to its metadata.
723 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
724 if (!AtomicallySetQuarantineFlagIfAllocated(m
, ptr
, stack
)) return;
726 if (m
->alloc_type
!= alloc_type
) {
727 if (atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
)) {
728 ReportAllocTypeMismatch((uptr
)ptr
, stack
, (AllocType
)m
->alloc_type
,
729 (AllocType
)alloc_type
);
732 if (flags()->new_delete_type_mismatch
&&
733 (alloc_type
== FROM_NEW
|| alloc_type
== FROM_NEW_BR
) &&
734 ((delete_size
&& delete_size
!= m
->UsedSize()) ||
735 ComputeUserRequestedAlignmentLog(delete_alignment
) !=
736 m
->user_requested_alignment_log
)) {
737 ReportNewDeleteTypeMismatch(p
, delete_size
, delete_alignment
, stack
);
741 AsanStats
&thread_stats
= GetCurrentThreadStats();
742 thread_stats
.frees
++;
743 thread_stats
.freed
+= m
->UsedSize();
745 QuarantineChunk(m
, ptr
, stack
);
748 void *Reallocate(void *old_ptr
, uptr new_size
, BufferedStackTrace
*stack
) {
749 CHECK(old_ptr
&& new_size
);
750 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
751 uptr chunk_beg
= p
- kChunkHeaderSize
;
752 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
754 AsanStats
&thread_stats
= GetCurrentThreadStats();
755 thread_stats
.reallocs
++;
756 thread_stats
.realloced
+= new_size
;
758 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
, true);
760 u8 chunk_state
= atomic_load(&m
->chunk_state
, memory_order_acquire
);
761 if (chunk_state
!= CHUNK_ALLOCATED
)
762 ReportInvalidFree(old_ptr
, chunk_state
, stack
);
763 CHECK_NE(REAL(memcpy
), nullptr);
764 uptr memcpy_size
= Min(new_size
, m
->UsedSize());
765 // If realloc() races with free(), we may start copying freed memory.
766 // However, we will report racy double-free later anyway.
767 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
768 Deallocate(old_ptr
, 0, 0, stack
, FROM_MALLOC
);
773 void *Calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
774 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
775 if (AllocatorMayReturnNull())
777 ReportCallocOverflow(nmemb
, size
, stack
);
779 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
, false);
780 // If the memory comes from the secondary allocator no need to clear it
781 // as it comes directly from mmap.
782 if (ptr
&& allocator
.FromPrimary(ptr
))
783 REAL(memset
)(ptr
, 0, nmemb
* size
);
787 void ReportInvalidFree(void *ptr
, u8 chunk_state
, BufferedStackTrace
*stack
) {
788 if (chunk_state
== CHUNK_QUARANTINE
)
789 ReportDoubleFree((uptr
)ptr
, stack
);
791 ReportFreeNotMalloced((uptr
)ptr
, stack
);
794 void CommitBack(AsanThreadLocalMallocStorage
*ms
, BufferedStackTrace
*stack
) {
795 AllocatorCache
*ac
= GetAllocatorCache(ms
);
796 quarantine
.Drain(GetQuarantineCache(ms
), QuarantineCallback(ac
, stack
));
797 allocator
.SwallowCache(ac
);
800 // -------------------------- Chunk lookup ----------------------
802 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
803 // Returns nullptr if AsanChunk is not yet initialized just after
804 // get_allocator().Allocate(), or is being destroyed just before
805 // get_allocator().Deallocate().
806 AsanChunk
*GetAsanChunk(void *alloc_beg
) {
809 AsanChunk
*p
= reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Get();
811 if (!allocator
.FromPrimary(alloc_beg
))
813 p
= reinterpret_cast<AsanChunk
*>(alloc_beg
);
815 u8 state
= atomic_load(&p
->chunk_state
, memory_order_relaxed
);
816 // It does not guaranty that Chunk is initialized, but it's
817 // definitely not for any other value.
818 if (state
== CHUNK_ALLOCATED
|| state
== CHUNK_QUARANTINE
)
823 AsanChunk
*GetAsanChunkByAddr(uptr p
) {
824 void *alloc_beg
= allocator
.GetBlockBegin(reinterpret_cast<void *>(p
));
825 return GetAsanChunk(alloc_beg
);
828 // Allocator must be locked when this function is called.
829 AsanChunk
*GetAsanChunkByAddrFastLocked(uptr p
) {
831 allocator
.GetBlockBeginFastLocked(reinterpret_cast<void *>(p
));
832 return GetAsanChunk(alloc_beg
);
835 uptr
AllocationSize(uptr p
) {
836 AsanChunk
*m
= GetAsanChunkByAddr(p
);
838 if (atomic_load(&m
->chunk_state
, memory_order_acquire
) != CHUNK_ALLOCATED
)
840 if (m
->Beg() != p
) return 0;
841 return m
->UsedSize();
844 uptr
AllocationSizeFast(uptr p
) {
845 return reinterpret_cast<AsanChunk
*>(p
- kChunkHeaderSize
)->UsedSize();
848 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
849 AsanChunk
*m1
= GetAsanChunkByAddr(addr
);
851 if (!m1
|| AsanChunkView(m1
).AddrIsAtLeft(addr
, 1, &offset
)) {
852 // The address is in the chunk's left redzone, so maybe it is actually
853 // a right buffer overflow from the other chunk before.
854 // Search a bit before to see if there is another chunk.
855 AsanChunk
*m2
= nullptr;
856 for (uptr l
= 1; l
< GetPageSizeCached(); l
++) {
857 m2
= GetAsanChunkByAddr(addr
- l
);
858 if (m2
== m1
) continue; // Still the same chunk.
861 if (m2
&& AsanChunkView(m2
).AddrIsAtRight(addr
, 1, &offset
))
862 m1
= ChooseChunk(addr
, m2
, m1
);
864 return AsanChunkView(m1
);
867 void Purge(BufferedStackTrace
*stack
) {
868 AsanThread
*t
= GetCurrentThread();
870 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
871 quarantine
.DrainAndRecycle(GetQuarantineCache(ms
),
872 QuarantineCallback(GetAllocatorCache(ms
),
876 SpinMutexLock
l(&fallback_mutex
);
877 quarantine
.DrainAndRecycle(&fallback_quarantine_cache
,
878 QuarantineCallback(&fallback_allocator_cache
,
882 allocator
.ForceReleaseToOS();
886 allocator
.PrintStats();
887 quarantine
.PrintStats();
890 void ForceLock() SANITIZER_ACQUIRE(fallback_mutex
) {
891 allocator
.ForceLock();
892 fallback_mutex
.Lock();
895 void ForceUnlock() SANITIZER_RELEASE(fallback_mutex
) {
896 fallback_mutex
.Unlock();
897 allocator
.ForceUnlock();
901 static Allocator
instance(LINKER_INITIALIZED
);
903 static AsanAllocator
&get_allocator() {
904 return instance
.allocator
;
907 bool AsanChunkView::IsValid() const {
908 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) !=
911 bool AsanChunkView::IsAllocated() const {
912 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) ==
915 bool AsanChunkView::IsQuarantined() const {
916 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) ==
919 uptr
AsanChunkView::Beg() const { return chunk_
->Beg(); }
920 uptr
AsanChunkView::End() const { return Beg() + UsedSize(); }
921 uptr
AsanChunkView::UsedSize() const { return chunk_
->UsedSize(); }
922 u32
AsanChunkView::UserRequestedAlignment() const {
923 return Allocator::ComputeUserAlignment(chunk_
->user_requested_alignment_log
);
926 uptr
AsanChunkView::AllocTid() const {
929 chunk_
->GetAllocContext(tid
, stack
);
933 uptr
AsanChunkView::FreeTid() const {
934 if (!IsQuarantined())
938 chunk_
->GetFreeContext(tid
, stack
);
942 AllocType
AsanChunkView::GetAllocType() const {
943 return (AllocType
)chunk_
->alloc_type
;
946 u32
AsanChunkView::GetAllocStackId() const {
949 chunk_
->GetAllocContext(tid
, stack
);
953 u32
AsanChunkView::GetFreeStackId() const {
954 if (!IsQuarantined())
958 chunk_
->GetFreeContext(tid
, stack
);
962 void InitializeAllocator(const AllocatorOptions
&options
) {
963 instance
.InitLinkerInitialized(options
);
966 void ReInitializeAllocator(const AllocatorOptions
&options
) {
967 instance
.ReInitialize(options
);
970 void GetAllocatorOptions(AllocatorOptions
*options
) {
971 instance
.GetOptions(options
);
974 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
975 return instance
.FindHeapChunkByAddress(addr
);
977 AsanChunkView
FindHeapChunkByAllocBeg(uptr addr
) {
978 return AsanChunkView(instance
.GetAsanChunk(reinterpret_cast<void*>(addr
)));
981 void AsanThreadLocalMallocStorage::CommitBack() {
982 GET_STACK_TRACE_MALLOC
;
983 instance
.CommitBack(this, &stack
);
986 void PrintInternalAllocatorStats() {
987 instance
.PrintStats();
990 void asan_free(void *ptr
, BufferedStackTrace
*stack
, AllocType alloc_type
) {
991 instance
.Deallocate(ptr
, 0, 0, stack
, alloc_type
);
994 void asan_delete(void *ptr
, uptr size
, uptr alignment
,
995 BufferedStackTrace
*stack
, AllocType alloc_type
) {
996 instance
.Deallocate(ptr
, size
, alignment
, stack
, alloc_type
);
999 void *asan_malloc(uptr size
, BufferedStackTrace
*stack
) {
1000 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
1003 void *asan_calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
1004 return SetErrnoOnNull(instance
.Calloc(nmemb
, size
, stack
));
1007 void *asan_reallocarray(void *p
, uptr nmemb
, uptr size
,
1008 BufferedStackTrace
*stack
) {
1009 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
1010 errno
= errno_ENOMEM
;
1011 if (AllocatorMayReturnNull())
1013 ReportReallocArrayOverflow(nmemb
, size
, stack
);
1015 return asan_realloc(p
, nmemb
* size
, stack
);
1018 void *asan_realloc(void *p
, uptr size
, BufferedStackTrace
*stack
) {
1020 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
1022 if (flags()->allocator_frees_and_returns_null_on_realloc_zero
) {
1023 instance
.Deallocate(p
, 0, 0, stack
, FROM_MALLOC
);
1026 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
1029 return SetErrnoOnNull(instance
.Reallocate(p
, size
, stack
));
1032 void *asan_valloc(uptr size
, BufferedStackTrace
*stack
) {
1033 return SetErrnoOnNull(
1034 instance
.Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
, true));
1037 void *asan_pvalloc(uptr size
, BufferedStackTrace
*stack
) {
1038 uptr PageSize
= GetPageSizeCached();
1039 if (UNLIKELY(CheckForPvallocOverflow(size
, PageSize
))) {
1040 errno
= errno_ENOMEM
;
1041 if (AllocatorMayReturnNull())
1043 ReportPvallocOverflow(size
, stack
);
1045 // pvalloc(0) should allocate one page.
1046 size
= size
? RoundUpTo(size
, PageSize
) : PageSize
;
1047 return SetErrnoOnNull(
1048 instance
.Allocate(size
, PageSize
, stack
, FROM_MALLOC
, true));
1051 void *asan_memalign(uptr alignment
, uptr size
, BufferedStackTrace
*stack
,
1052 AllocType alloc_type
) {
1053 if (UNLIKELY(!IsPowerOfTwo(alignment
))) {
1054 errno
= errno_EINVAL
;
1055 if (AllocatorMayReturnNull())
1057 ReportInvalidAllocationAlignment(alignment
, stack
);
1059 return SetErrnoOnNull(
1060 instance
.Allocate(size
, alignment
, stack
, alloc_type
, true));
1063 void *asan_aligned_alloc(uptr alignment
, uptr size
, BufferedStackTrace
*stack
) {
1064 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment
, size
))) {
1065 errno
= errno_EINVAL
;
1066 if (AllocatorMayReturnNull())
1068 ReportInvalidAlignedAllocAlignment(size
, alignment
, stack
);
1070 return SetErrnoOnNull(
1071 instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true));
1074 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
1075 BufferedStackTrace
*stack
) {
1076 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment
))) {
1077 if (AllocatorMayReturnNull())
1078 return errno_EINVAL
;
1079 ReportInvalidPosixMemalignAlignment(alignment
, stack
);
1081 void *ptr
= instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true);
1083 // OOM error is already taken care of by Allocate.
1084 return errno_ENOMEM
;
1085 CHECK(IsAligned((uptr
)ptr
, alignment
));
1090 uptr
asan_malloc_usable_size(const void *ptr
, uptr pc
, uptr bp
) {
1092 uptr usable_size
= instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
1093 if (flags()->check_malloc_usable_size
&& (usable_size
== 0)) {
1094 GET_STACK_TRACE_FATAL(pc
, bp
);
1095 ReportMallocUsableSizeNotOwned((uptr
)ptr
, &stack
);
1100 uptr
asan_mz_size(const void *ptr
) {
1101 return instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
1104 void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
1105 instance
.ForceLock();
1108 void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
1109 instance
.ForceUnlock();
1112 } // namespace __asan
1114 // --- Implementation of LSan-specific functions --- {{{1
1116 void LockAllocator() {
1117 __asan::get_allocator().ForceLock();
1120 void UnlockAllocator() {
1121 __asan::get_allocator().ForceUnlock();
1124 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
) {
1125 *begin
= (uptr
)&__asan::get_allocator();
1126 *end
= *begin
+ sizeof(__asan::get_allocator());
1129 uptr
PointsIntoChunk(void *p
) {
1130 uptr addr
= reinterpret_cast<uptr
>(p
);
1131 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(addr
);
1132 if (!m
|| atomic_load(&m
->chunk_state
, memory_order_acquire
) !=
1133 __asan::CHUNK_ALLOCATED
)
1135 uptr chunk
= m
->Beg();
1136 if (m
->AddrIsInside(addr
))
1138 if (IsSpecialCaseOfOperatorNew0(chunk
, m
->UsedSize(), addr
))
1143 uptr
GetUserBegin(uptr chunk
) {
1144 // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
1146 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(chunk
);
1147 return m
? m
->Beg() : 0;
1150 uptr
GetUserAddr(uptr chunk
) {
1154 LsanMetadata::LsanMetadata(uptr chunk
) {
1155 metadata_
= chunk
? reinterpret_cast<void *>(chunk
- __asan::kChunkHeaderSize
)
1159 bool LsanMetadata::allocated() const {
1162 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1163 return atomic_load(&m
->chunk_state
, memory_order_relaxed
) ==
1164 __asan::CHUNK_ALLOCATED
;
1167 ChunkTag
LsanMetadata::tag() const {
1168 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1169 return static_cast<ChunkTag
>(m
->lsan_tag
);
1172 void LsanMetadata::set_tag(ChunkTag value
) {
1173 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1174 m
->lsan_tag
= value
;
1177 uptr
LsanMetadata::requested_size() const {
1178 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1179 return m
->UsedSize();
1182 u32
LsanMetadata::stack_trace_id() const {
1183 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1186 m
->GetAllocContext(tid
, stack
);
1190 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
1191 __asan::get_allocator().ForEachChunk(callback
, arg
);
1194 IgnoreObjectResult
IgnoreObject(const void *p
) {
1195 uptr addr
= reinterpret_cast<uptr
>(p
);
1196 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddr(addr
);
1198 (atomic_load(&m
->chunk_state
, memory_order_acquire
) !=
1199 __asan::CHUNK_ALLOCATED
) ||
1200 !m
->AddrIsInside(addr
)) {
1201 return kIgnoreObjectInvalid
;
1203 if (m
->lsan_tag
== kIgnored
)
1204 return kIgnoreObjectAlreadyIgnored
;
1205 m
->lsan_tag
= __lsan::kIgnored
;
1206 return kIgnoreObjectSuccess
;
1209 } // namespace __lsan
1211 // ---------------------- Interface ---------------- {{{1
1212 using namespace __asan
;
1214 static const void *AllocationBegin(const void *p
) {
1215 AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddr((uptr
)p
);
1218 if (atomic_load(&m
->chunk_state
, memory_order_acquire
) != CHUNK_ALLOCATED
)
1220 if (m
->UsedSize() == 0)
1222 return (const void *)(m
->Beg());
1225 // ASan allocator doesn't reserve extra bytes, so normally we would
1226 // just return "size". We don't want to expose our redzone sizes, etc here.
1227 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
1231 int __sanitizer_get_ownership(const void *p
) {
1232 uptr ptr
= reinterpret_cast<uptr
>(p
);
1233 return instance
.AllocationSize(ptr
) > 0;
1236 uptr
__sanitizer_get_allocated_size(const void *p
) {
1238 uptr ptr
= reinterpret_cast<uptr
>(p
);
1239 uptr allocated_size
= instance
.AllocationSize(ptr
);
1240 // Die if p is not malloced or if it is already freed.
1241 if (allocated_size
== 0) {
1242 GET_STACK_TRACE_FATAL_HERE
;
1243 ReportSanitizerGetAllocatedSizeNotOwned(ptr
, &stack
);
1245 return allocated_size
;
1248 uptr
__sanitizer_get_allocated_size_fast(const void *p
) {
1249 DCHECK_EQ(p
, __sanitizer_get_allocated_begin(p
));
1250 uptr ret
= instance
.AllocationSizeFast(reinterpret_cast<uptr
>(p
));
1251 DCHECK_EQ(ret
, __sanitizer_get_allocated_size(p
));
1255 const void *__sanitizer_get_allocated_begin(const void *p
) {
1256 return AllocationBegin(p
);
1259 void __sanitizer_purge_allocator() {
1260 GET_STACK_TRACE_MALLOC
;
1261 instance
.Purge(&stack
);
1264 int __asan_update_allocation_context(void* addr
) {
1265 GET_STACK_TRACE_MALLOC
;
1266 return instance
.UpdateAllocationStack((uptr
)addr
, &stack
);