1 //===-- asan_allocator.cpp ------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Implementation of ASan's memory allocator, 2-nd version.
12 // This variant uses the allocator from sanitizer_common, i.e. the one shared
13 // with ThreadSanitizer and MemorySanitizer.
15 //===----------------------------------------------------------------------===//
17 #include "asan_allocator.h"
19 #include "asan_internal.h"
20 #include "asan_mapping.h"
21 #include "asan_poisoning.h"
22 #include "asan_report.h"
23 #include "asan_stack.h"
24 #include "asan_thread.h"
25 #include "lsan/lsan_common.h"
26 #include "sanitizer_common/sanitizer_allocator_checks.h"
27 #include "sanitizer_common/sanitizer_allocator_interface.h"
28 #include "sanitizer_common/sanitizer_common.h"
29 #include "sanitizer_common/sanitizer_errno.h"
30 #include "sanitizer_common/sanitizer_flags.h"
31 #include "sanitizer_common/sanitizer_internal_defs.h"
32 #include "sanitizer_common/sanitizer_list.h"
33 #include "sanitizer_common/sanitizer_quarantine.h"
34 #include "sanitizer_common/sanitizer_stackdepot.h"
38 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
39 // We use adaptive redzones: for larger allocation larger redzones are used.
40 static u32
RZLog2Size(u32 rz_log
) {
45 static u32
RZSize2Log(u32 rz_size
) {
46 CHECK_GE(rz_size
, 16);
47 CHECK_LE(rz_size
, 2048);
48 CHECK(IsPowerOfTwo(rz_size
));
49 u32 res
= Log2(rz_size
) - 4;
50 CHECK_EQ(rz_size
, RZLog2Size(res
));
54 static AsanAllocator
&get_allocator();
56 static void AtomicContextStore(volatile atomic_uint64_t
*atomic_context
,
61 atomic_store(atomic_context
, context
, memory_order_relaxed
);
64 static void AtomicContextLoad(const volatile atomic_uint64_t
*atomic_context
,
65 u32
&tid
, u32
&stack
) {
66 u64 context
= atomic_load(atomic_context
, memory_order_relaxed
);
72 // The memory chunk allocated from the underlying allocator looks like this:
73 // L L L L L L H H U U U U U U R R
74 // L -- left redzone words (0 or more bytes)
75 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
77 // R -- right redzone (0 or more bytes)
78 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
81 // If the left redzone is greater than the ChunkHeader size we store a magic
82 // value in the first uptr word of the memory block and store the address of
83 // ChunkBase in the next uptr.
84 // M B L L L L L L L L L H H U U U U U U
86 // ---------------------|
87 // M -- magic value kAllocBegMagic
88 // B -- address of ChunkHeader pointing to the first 'H'
92 atomic_uint8_t chunk_state
;
97 // else -> log2(min(align, 512)) - 2
98 u8 user_requested_alignment_log
: 3;
101 u16 user_requested_size_hi
;
102 u32 user_requested_size_lo
;
103 atomic_uint64_t alloc_context_id
;
106 uptr
UsedSize() const {
107 static_assert(sizeof(user_requested_size_lo
) == 4,
108 "Expression below requires this");
109 return FIRST_32_SECOND_64(0, ((uptr
)user_requested_size_hi
<< 32)) +
110 user_requested_size_lo
;
113 void SetUsedSize(uptr size
) {
114 user_requested_size_lo
= size
;
115 static_assert(sizeof(user_requested_size_lo
) == 4,
116 "Expression below requires this");
117 user_requested_size_hi
= FIRST_32_SECOND_64(0, size
>> 32);
118 CHECK_EQ(UsedSize(), size
);
121 void SetAllocContext(u32 tid
, u32 stack
) {
122 AtomicContextStore(&alloc_context_id
, tid
, stack
);
125 void GetAllocContext(u32
&tid
, u32
&stack
) const {
126 AtomicContextLoad(&alloc_context_id
, tid
, stack
);
130 class ChunkBase
: public ChunkHeader
{
131 atomic_uint64_t free_context_id
;
134 void SetFreeContext(u32 tid
, u32 stack
) {
135 AtomicContextStore(&free_context_id
, tid
, stack
);
138 void GetFreeContext(u32
&tid
, u32
&stack
) const {
139 AtomicContextLoad(&free_context_id
, tid
, stack
);
143 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
144 static const uptr kChunkHeader2Size
= sizeof(ChunkBase
) - kChunkHeaderSize
;
145 COMPILER_CHECK(kChunkHeaderSize
== 16);
146 COMPILER_CHECK(kChunkHeader2Size
<= 16);
149 // Either just allocated by underlying allocator, but AsanChunk is not yet
150 // ready, or almost returned to undelying allocator and AsanChunk is already
153 // The chunk is allocated and not yet freed.
155 // The chunk was freed and put into quarantine zone.
156 CHUNK_QUARANTINE
= 3,
159 class AsanChunk
: public ChunkBase
{
161 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
162 bool AddrIsInside(uptr addr
) {
163 return (addr
>= Beg()) && (addr
< Beg() + UsedSize());
167 class LargeChunkHeader
{
168 static constexpr uptr kAllocBegMagic
=
169 FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL
);
170 atomic_uintptr_t magic
;
171 AsanChunk
*chunk_header
;
174 AsanChunk
*Get() const {
175 return atomic_load(&magic
, memory_order_acquire
) == kAllocBegMagic
180 void Set(AsanChunk
*p
) {
183 atomic_store(&magic
, kAllocBegMagic
, memory_order_release
);
187 uptr old
= kAllocBegMagic
;
188 if (!atomic_compare_exchange_strong(&magic
, &old
, 0,
189 memory_order_release
)) {
190 CHECK_EQ(old
, kAllocBegMagic
);
195 static void FillChunk(AsanChunk
*m
) {
196 // FIXME: Use ReleaseMemoryPagesToOS.
197 Flags
&fl
= *flags();
199 if (fl
.max_free_fill_size
> 0) {
200 // We have to skip the chunk header, it contains free_context_id.
201 uptr scribble_start
= (uptr
)m
+ kChunkHeaderSize
+ kChunkHeader2Size
;
202 if (m
->UsedSize() >= kChunkHeader2Size
) { // Skip Header2 in user area.
203 uptr size_to_fill
= m
->UsedSize() - kChunkHeader2Size
;
204 size_to_fill
= Min(size_to_fill
, (uptr
)fl
.max_free_fill_size
);
205 REAL(memset
)((void *)scribble_start
, fl
.free_fill_byte
, size_to_fill
);
210 struct QuarantineCallback
{
211 QuarantineCallback(AllocatorCache
*cache
, BufferedStackTrace
*stack
)
216 void PreQuarantine(AsanChunk
*m
) const {
218 // Poison the region.
219 PoisonShadow(m
->Beg(), RoundUpTo(m
->UsedSize(), ASAN_SHADOW_GRANULARITY
),
223 void Recycle(AsanChunk
*m
) const {
224 void *p
= get_allocator().GetBlockBegin(m
);
226 // The secondary will immediately unpoison and unmap the memory, so this
227 // branch is unnecessary.
228 if (get_allocator().FromPrimary(p
)) {
230 // Clear the magic value, as allocator internals may overwrite the
231 // contents of deallocated chunk, confusing GetAsanChunk lookup.
232 reinterpret_cast<LargeChunkHeader
*>(p
)->Set(nullptr);
235 u8 old_chunk_state
= CHUNK_QUARANTINE
;
236 if (!atomic_compare_exchange_strong(&m
->chunk_state
, &old_chunk_state
,
238 memory_order_acquire
)) {
239 CHECK_EQ(old_chunk_state
, CHUNK_QUARANTINE
);
242 PoisonShadow(m
->Beg(), RoundUpTo(m
->UsedSize(), ASAN_SHADOW_GRANULARITY
),
243 kAsanHeapLeftRedzoneMagic
);
247 AsanStats
&thread_stats
= GetCurrentThreadStats();
248 thread_stats
.real_frees
++;
249 thread_stats
.really_freed
+= m
->UsedSize();
251 get_allocator().Deallocate(cache_
, p
);
254 void RecyclePassThrough(AsanChunk
*m
) const {
255 // Recycle for the secondary will immediately unpoison and unmap the
256 // memory, so quarantine preparation is unnecessary.
257 if (get_allocator().FromPrimary(m
)) {
258 // The primary allocation may need pattern fill if enabled.
264 void *Allocate(uptr size
) const {
265 void *res
= get_allocator().Allocate(cache_
, size
, 1);
266 // TODO(alekseys): Consider making quarantine OOM-friendly.
268 ReportOutOfMemory(size
, stack_
);
272 void Deallocate(void *p
) const { get_allocator().Deallocate(cache_
, p
); }
275 AllocatorCache
* const cache_
;
276 BufferedStackTrace
* const stack_
;
279 typedef Quarantine
<QuarantineCallback
, AsanChunk
> AsanQuarantine
;
280 typedef AsanQuarantine::Cache QuarantineCache
;
282 void AsanMapUnmapCallback::OnMap(uptr p
, uptr size
) const {
283 PoisonShadow(p
, size
, kAsanHeapLeftRedzoneMagic
);
285 AsanStats
&thread_stats
= GetCurrentThreadStats();
286 thread_stats
.mmaps
++;
287 thread_stats
.mmaped
+= size
;
290 void AsanMapUnmapCallback::OnMapSecondary(uptr p
, uptr size
, uptr user_begin
,
291 uptr user_size
) const {
292 uptr user_end
= RoundDownTo(user_begin
+ user_size
, ASAN_SHADOW_GRANULARITY
);
293 user_begin
= RoundUpTo(user_begin
, ASAN_SHADOW_GRANULARITY
);
294 // The secondary mapping will be immediately returned to user, no value
295 // poisoning that with non-zero just before unpoisoning by Allocate(). So just
296 // poison head/tail invisible to Allocate().
297 PoisonShadow(p
, user_begin
- p
, kAsanHeapLeftRedzoneMagic
);
298 PoisonShadow(user_end
, size
- (user_end
- p
), kAsanHeapLeftRedzoneMagic
);
300 AsanStats
&thread_stats
= GetCurrentThreadStats();
301 thread_stats
.mmaps
++;
302 thread_stats
.mmaped
+= size
;
305 void AsanMapUnmapCallback::OnUnmap(uptr p
, uptr size
) const {
306 PoisonShadow(p
, size
, 0);
307 // We are about to unmap a chunk of user memory.
308 // Mark the corresponding shadow memory as not needed.
309 FlushUnneededASanShadowMemory(p
, size
);
311 AsanStats
&thread_stats
= GetCurrentThreadStats();
312 thread_stats
.munmaps
++;
313 thread_stats
.munmaped
+= size
;
316 // We can not use THREADLOCAL because it is not supported on some of the
317 // platforms we care about (OSX 10.6, Android).
318 // static THREADLOCAL AllocatorCache cache;
319 AllocatorCache
*GetAllocatorCache(AsanThreadLocalMallocStorage
*ms
) {
321 return &ms
->allocator_cache
;
324 QuarantineCache
*GetQuarantineCache(AsanThreadLocalMallocStorage
*ms
) {
326 CHECK_LE(sizeof(QuarantineCache
), sizeof(ms
->quarantine_cache
));
327 return reinterpret_cast<QuarantineCache
*>(ms
->quarantine_cache
);
330 void AllocatorOptions::SetFrom(const Flags
*f
, const CommonFlags
*cf
) {
331 quarantine_size_mb
= f
->quarantine_size_mb
;
332 thread_local_quarantine_size_kb
= f
->thread_local_quarantine_size_kb
;
333 min_redzone
= f
->redzone
;
334 max_redzone
= f
->max_redzone
;
335 may_return_null
= cf
->allocator_may_return_null
;
336 alloc_dealloc_mismatch
= f
->alloc_dealloc_mismatch
;
337 release_to_os_interval_ms
= cf
->allocator_release_to_os_interval_ms
;
340 void AllocatorOptions::CopyTo(Flags
*f
, CommonFlags
*cf
) {
341 f
->quarantine_size_mb
= quarantine_size_mb
;
342 f
->thread_local_quarantine_size_kb
= thread_local_quarantine_size_kb
;
343 f
->redzone
= min_redzone
;
344 f
->max_redzone
= max_redzone
;
345 cf
->allocator_may_return_null
= may_return_null
;
346 f
->alloc_dealloc_mismatch
= alloc_dealloc_mismatch
;
347 cf
->allocator_release_to_os_interval_ms
= release_to_os_interval_ms
;
351 static const uptr kMaxAllowedMallocSize
=
352 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
354 AsanAllocator allocator
;
355 AsanQuarantine quarantine
;
356 StaticSpinMutex fallback_mutex
;
357 AllocatorCache fallback_allocator_cache
;
358 QuarantineCache fallback_quarantine_cache
;
360 uptr max_user_defined_malloc_size
;
362 // ------------------- Options --------------------------
363 atomic_uint16_t min_redzone
;
364 atomic_uint16_t max_redzone
;
365 atomic_uint8_t alloc_dealloc_mismatch
;
367 // ------------------- Initialization ------------------------
368 explicit Allocator(LinkerInitialized
)
369 : quarantine(LINKER_INITIALIZED
),
370 fallback_quarantine_cache(LINKER_INITIALIZED
) {}
372 void CheckOptions(const AllocatorOptions
&options
) const {
373 CHECK_GE(options
.min_redzone
, 16);
374 CHECK_GE(options
.max_redzone
, options
.min_redzone
);
375 CHECK_LE(options
.max_redzone
, 2048);
376 CHECK(IsPowerOfTwo(options
.min_redzone
));
377 CHECK(IsPowerOfTwo(options
.max_redzone
));
380 void SharedInitCode(const AllocatorOptions
&options
) {
381 CheckOptions(options
);
382 quarantine
.Init((uptr
)options
.quarantine_size_mb
<< 20,
383 (uptr
)options
.thread_local_quarantine_size_kb
<< 10);
384 atomic_store(&alloc_dealloc_mismatch
, options
.alloc_dealloc_mismatch
,
385 memory_order_release
);
386 atomic_store(&min_redzone
, options
.min_redzone
, memory_order_release
);
387 atomic_store(&max_redzone
, options
.max_redzone
, memory_order_release
);
390 void InitLinkerInitialized(const AllocatorOptions
&options
) {
391 SetAllocatorMayReturnNull(options
.may_return_null
);
392 allocator
.InitLinkerInitialized(options
.release_to_os_interval_ms
);
393 SharedInitCode(options
);
394 max_user_defined_malloc_size
= common_flags()->max_allocation_size_mb
395 ? common_flags()->max_allocation_size_mb
397 : kMaxAllowedMallocSize
;
400 void RePoisonChunk(uptr chunk
) {
401 // This could be a user-facing chunk (with redzones), or some internal
402 // housekeeping chunk, like TransferBatch. Start by assuming the former.
403 AsanChunk
*ac
= GetAsanChunk((void *)chunk
);
404 uptr allocated_size
= allocator
.GetActuallyAllocatedSize((void *)chunk
);
405 if (ac
&& atomic_load(&ac
->chunk_state
, memory_order_acquire
) ==
407 uptr beg
= ac
->Beg();
408 uptr end
= ac
->Beg() + ac
->UsedSize();
409 uptr chunk_end
= chunk
+ allocated_size
;
410 if (chunk
< beg
&& beg
< end
&& end
<= chunk_end
) {
411 // Looks like a valid AsanChunk in use, poison redzones only.
412 PoisonShadow(chunk
, beg
- chunk
, kAsanHeapLeftRedzoneMagic
);
413 uptr end_aligned_down
= RoundDownTo(end
, ASAN_SHADOW_GRANULARITY
);
414 FastPoisonShadowPartialRightRedzone(
415 end_aligned_down
, end
- end_aligned_down
,
416 chunk_end
- end_aligned_down
, kAsanHeapLeftRedzoneMagic
);
421 // This is either not an AsanChunk or freed or quarantined AsanChunk.
422 // In either case, poison everything.
423 PoisonShadow(chunk
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
426 void ReInitialize(const AllocatorOptions
&options
) {
427 SetAllocatorMayReturnNull(options
.may_return_null
);
428 allocator
.SetReleaseToOSIntervalMs(options
.release_to_os_interval_ms
);
429 SharedInitCode(options
);
431 // Poison all existing allocation's redzones.
432 if (CanPoisonMemory()) {
433 allocator
.ForceLock();
434 allocator
.ForEachChunk(
435 [](uptr chunk
, void *alloc
) {
436 ((Allocator
*)alloc
)->RePoisonChunk(chunk
);
439 allocator
.ForceUnlock();
443 void GetOptions(AllocatorOptions
*options
) const {
444 options
->quarantine_size_mb
= quarantine
.GetMaxSize() >> 20;
445 options
->thread_local_quarantine_size_kb
=
446 quarantine
.GetMaxCacheSize() >> 10;
447 options
->min_redzone
= atomic_load(&min_redzone
, memory_order_acquire
);
448 options
->max_redzone
= atomic_load(&max_redzone
, memory_order_acquire
);
449 options
->may_return_null
= AllocatorMayReturnNull();
450 options
->alloc_dealloc_mismatch
=
451 atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
);
452 options
->release_to_os_interval_ms
= allocator
.ReleaseToOSIntervalMs();
455 // -------------------- Helper methods. -------------------------
456 uptr
ComputeRZLog(uptr user_requested_size
) {
457 u32 rz_log
= user_requested_size
<= 64 - 16 ? 0
458 : user_requested_size
<= 128 - 32 ? 1
459 : user_requested_size
<= 512 - 64 ? 2
460 : user_requested_size
<= 4096 - 128 ? 3
461 : user_requested_size
<= (1 << 14) - 256 ? 4
462 : user_requested_size
<= (1 << 15) - 512 ? 5
463 : user_requested_size
<= (1 << 16) - 1024 ? 6
465 u32 hdr_log
= RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader
)));
466 u32 min_log
= RZSize2Log(atomic_load(&min_redzone
, memory_order_acquire
));
467 u32 max_log
= RZSize2Log(atomic_load(&max_redzone
, memory_order_acquire
));
468 return Min(Max(rz_log
, Max(min_log
, hdr_log
)), Max(max_log
, hdr_log
));
471 static uptr
ComputeUserRequestedAlignmentLog(uptr user_requested_alignment
) {
472 if (user_requested_alignment
< 8)
474 if (user_requested_alignment
> 512)
475 user_requested_alignment
= 512;
476 return Log2(user_requested_alignment
) - 2;
479 static uptr
ComputeUserAlignment(uptr user_requested_alignment_log
) {
480 if (user_requested_alignment_log
== 0)
482 return 1LL << (user_requested_alignment_log
+ 2);
485 // We have an address between two chunks, and we want to report just one.
486 AsanChunk
*ChooseChunk(uptr addr
, AsanChunk
*left_chunk
,
487 AsanChunk
*right_chunk
) {
492 // Prefer an allocated chunk over freed chunk and freed chunk
493 // over available chunk.
494 u8 left_state
= atomic_load(&left_chunk
->chunk_state
, memory_order_relaxed
);
496 atomic_load(&right_chunk
->chunk_state
, memory_order_relaxed
);
497 if (left_state
!= right_state
) {
498 if (left_state
== CHUNK_ALLOCATED
)
500 if (right_state
== CHUNK_ALLOCATED
)
502 if (left_state
== CHUNK_QUARANTINE
)
504 if (right_state
== CHUNK_QUARANTINE
)
507 // Same chunk_state: choose based on offset.
508 sptr l_offset
= 0, r_offset
= 0;
509 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
510 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
511 if (l_offset
< r_offset
)
516 bool UpdateAllocationStack(uptr addr
, BufferedStackTrace
*stack
) {
517 AsanChunk
*m
= GetAsanChunkByAddr(addr
);
518 if (!m
) return false;
519 if (atomic_load(&m
->chunk_state
, memory_order_acquire
) != CHUNK_ALLOCATED
)
521 if (m
->Beg() != addr
) return false;
522 AsanThread
*t
= GetCurrentThread();
523 m
->SetAllocContext(t
? t
->tid() : kMainTid
, StackDepotPut(*stack
));
527 // -------------------- Allocation/Deallocation routines ---------------
528 void *Allocate(uptr size
, uptr alignment
, BufferedStackTrace
*stack
,
529 AllocType alloc_type
, bool can_fill
) {
530 if (UNLIKELY(!AsanInited()))
532 if (UNLIKELY(IsRssLimitExceeded())) {
533 if (AllocatorMayReturnNull())
535 ReportRssLimitExceeded(stack
);
537 Flags
&fl
= *flags();
539 const uptr min_alignment
= ASAN_SHADOW_GRANULARITY
;
540 const uptr user_requested_alignment_log
=
541 ComputeUserRequestedAlignmentLog(alignment
);
542 if (alignment
< min_alignment
)
543 alignment
= min_alignment
;
545 // We'd be happy to avoid allocating memory for zero-size requests, but
546 // some programs/tests depend on this behavior and assume that malloc
547 // would not return NULL even for zero-size allocations. Moreover, it
548 // looks like operator new should never return NULL, and results of
549 // consecutive "new" calls must be different even if the allocated size
553 CHECK(IsPowerOfTwo(alignment
));
554 uptr rz_log
= ComputeRZLog(size
);
555 uptr rz_size
= RZLog2Size(rz_log
);
556 uptr rounded_size
= RoundUpTo(Max(size
, kChunkHeader2Size
), alignment
);
557 uptr needed_size
= rounded_size
+ rz_size
;
558 if (alignment
> min_alignment
)
559 needed_size
+= alignment
;
560 bool from_primary
= PrimaryAllocator::CanAllocate(needed_size
, alignment
);
561 // If we are allocating from the secondary allocator, there will be no
562 // automatic right redzone, so add the right redzone manually.
564 needed_size
+= rz_size
;
565 CHECK(IsAligned(needed_size
, min_alignment
));
566 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
||
567 size
> max_user_defined_malloc_size
) {
568 if (AllocatorMayReturnNull()) {
569 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
574 Min(kMaxAllowedMallocSize
, max_user_defined_malloc_size
);
575 ReportAllocationSizeTooBig(size
, needed_size
, malloc_limit
, stack
);
578 AsanThread
*t
= GetCurrentThread();
581 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
582 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
584 SpinMutexLock
l(&fallback_mutex
);
585 AllocatorCache
*cache
= &fallback_allocator_cache
;
586 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
588 if (UNLIKELY(!allocated
)) {
589 SetAllocatorOutOfMemory();
590 if (AllocatorMayReturnNull())
592 ReportOutOfMemory(size
, stack
);
595 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
596 uptr alloc_end
= alloc_beg
+ needed_size
;
597 uptr user_beg
= alloc_beg
+ rz_size
;
598 if (!IsAligned(user_beg
, alignment
))
599 user_beg
= RoundUpTo(user_beg
, alignment
);
600 uptr user_end
= user_beg
+ size
;
601 CHECK_LE(user_end
, alloc_end
);
602 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
603 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
604 m
->alloc_type
= alloc_type
;
606 m
->SetUsedSize(size
);
607 m
->user_requested_alignment_log
= user_requested_alignment_log
;
609 m
->SetAllocContext(t
? t
->tid() : kMainTid
, StackDepotPut(*stack
));
611 if (!from_primary
|| *(u8
*)MEM_TO_SHADOW((uptr
)allocated
) == 0) {
612 // The allocator provides an unpoisoned chunk. This is possible for the
613 // secondary allocator, or if CanPoisonMemory() was false for some time,
614 // for example, due to flags()->start_disabled. Anyway, poison left and
615 // right of the block before using it for anything else.
616 uptr tail_beg
= RoundUpTo(user_end
, ASAN_SHADOW_GRANULARITY
);
617 uptr tail_end
= alloc_beg
+ allocator
.GetActuallyAllocatedSize(allocated
);
618 PoisonShadow(alloc_beg
, user_beg
- alloc_beg
, kAsanHeapLeftRedzoneMagic
);
619 PoisonShadow(tail_beg
, tail_end
- tail_beg
, kAsanHeapLeftRedzoneMagic
);
622 uptr size_rounded_down_to_granularity
=
623 RoundDownTo(size
, ASAN_SHADOW_GRANULARITY
);
624 // Unpoison the bulk of the memory region.
625 if (size_rounded_down_to_granularity
)
626 PoisonShadow(user_beg
, size_rounded_down_to_granularity
, 0);
627 // Deal with the end of the region if size is not aligned to granularity.
628 if (size
!= size_rounded_down_to_granularity
&& CanPoisonMemory()) {
630 (u8
*)MemToShadow(user_beg
+ size_rounded_down_to_granularity
);
631 *shadow
= fl
.poison_partial
? (size
& (ASAN_SHADOW_GRANULARITY
- 1)) : 0;
634 AsanStats
&thread_stats
= GetCurrentThreadStats();
635 thread_stats
.mallocs
++;
636 thread_stats
.malloced
+= size
;
637 thread_stats
.malloced_redzones
+= needed_size
- size
;
638 if (needed_size
> SizeClassMap::kMaxSize
)
639 thread_stats
.malloc_large
++;
641 thread_stats
.malloced_by_size
[SizeClassMap::ClassID(needed_size
)]++;
643 void *res
= reinterpret_cast<void *>(user_beg
);
644 if (can_fill
&& fl
.max_malloc_fill_size
) {
645 uptr fill_size
= Min(size
, (uptr
)fl
.max_malloc_fill_size
);
646 REAL(memset
)(res
, fl
.malloc_fill_byte
, fill_size
);
648 #if CAN_SANITIZE_LEAKS
649 m
->lsan_tag
= __lsan::DisabledInThisThread() ? __lsan::kIgnored
650 : __lsan::kDirectlyLeaked
;
652 // Must be the last mutation of metadata in this function.
653 atomic_store(&m
->chunk_state
, CHUNK_ALLOCATED
, memory_order_release
);
654 if (alloc_beg
!= chunk_beg
) {
655 CHECK_LE(alloc_beg
+ sizeof(LargeChunkHeader
), chunk_beg
);
656 reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Set(m
);
658 RunMallocHooks(res
, size
);
662 // Set quarantine flag if chunk is allocated, issue ASan error report on
663 // available and quarantined chunks. Return true on success, false otherwise.
664 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk
*m
, void *ptr
,
665 BufferedStackTrace
*stack
) {
666 u8 old_chunk_state
= CHUNK_ALLOCATED
;
667 // Flip the chunk_state atomically to avoid race on double-free.
668 if (!atomic_compare_exchange_strong(&m
->chunk_state
, &old_chunk_state
,
670 memory_order_acquire
)) {
671 ReportInvalidFree(ptr
, old_chunk_state
, stack
);
672 // It's not safe to push a chunk in quarantine on invalid free.
675 CHECK_EQ(CHUNK_ALLOCATED
, old_chunk_state
);
676 // It was a user data.
677 m
->SetFreeContext(kInvalidTid
, 0);
681 // Expects the chunk to already be marked as quarantined by using
682 // AtomicallySetQuarantineFlagIfAllocated.
683 void QuarantineChunk(AsanChunk
*m
, void *ptr
, BufferedStackTrace
*stack
) {
684 CHECK_EQ(atomic_load(&m
->chunk_state
, memory_order_relaxed
),
686 AsanThread
*t
= GetCurrentThread();
687 m
->SetFreeContext(t
? t
->tid() : 0, StackDepotPut(*stack
));
689 // Push into quarantine.
691 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
692 AllocatorCache
*ac
= GetAllocatorCache(ms
);
693 quarantine
.Put(GetQuarantineCache(ms
), QuarantineCallback(ac
, stack
), m
,
696 SpinMutexLock
l(&fallback_mutex
);
697 AllocatorCache
*ac
= &fallback_allocator_cache
;
698 quarantine
.Put(&fallback_quarantine_cache
, QuarantineCallback(ac
, stack
),
703 void Deallocate(void *ptr
, uptr delete_size
, uptr delete_alignment
,
704 BufferedStackTrace
*stack
, AllocType alloc_type
) {
705 uptr p
= reinterpret_cast<uptr
>(ptr
);
708 uptr chunk_beg
= p
- kChunkHeaderSize
;
709 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
711 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
712 // malloc. Don't report an invalid free in this case.
713 if (SANITIZER_WINDOWS
&&
714 !get_allocator().PointerIsMine(ptr
)) {
715 if (!IsSystemHeapAddress(p
))
716 ReportFreeNotMalloced(p
, stack
);
720 if (RunFreeHooks(ptr
)) {
721 // Someone used __sanitizer_ignore_free_hook() and decided that they
722 // didn't want the memory to __sanitizer_ignore_free_hook freed right now.
723 // When they call free() on this pointer again at a later time, we should
724 // ignore the alloc-type mismatch and allow them to deallocate the pointer
725 // through free(), rather than the initial alloc type.
726 m
->alloc_type
= FROM_MALLOC
;
730 // Must mark the chunk as quarantined before any changes to its metadata.
731 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
732 if (!AtomicallySetQuarantineFlagIfAllocated(m
, ptr
, stack
)) return;
734 if (m
->alloc_type
!= alloc_type
) {
735 if (atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
)) {
736 ReportAllocTypeMismatch((uptr
)ptr
, stack
, (AllocType
)m
->alloc_type
,
737 (AllocType
)alloc_type
);
740 if (flags()->new_delete_type_mismatch
&&
741 (alloc_type
== FROM_NEW
|| alloc_type
== FROM_NEW_BR
) &&
742 ((delete_size
&& delete_size
!= m
->UsedSize()) ||
743 ComputeUserRequestedAlignmentLog(delete_alignment
) !=
744 m
->user_requested_alignment_log
)) {
745 ReportNewDeleteTypeMismatch(p
, delete_size
, delete_alignment
, stack
);
749 AsanStats
&thread_stats
= GetCurrentThreadStats();
750 thread_stats
.frees
++;
751 thread_stats
.freed
+= m
->UsedSize();
753 QuarantineChunk(m
, ptr
, stack
);
756 void *Reallocate(void *old_ptr
, uptr new_size
, BufferedStackTrace
*stack
) {
757 CHECK(old_ptr
&& new_size
);
758 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
759 uptr chunk_beg
= p
- kChunkHeaderSize
;
760 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
762 AsanStats
&thread_stats
= GetCurrentThreadStats();
763 thread_stats
.reallocs
++;
764 thread_stats
.realloced
+= new_size
;
766 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
, true);
768 u8 chunk_state
= atomic_load(&m
->chunk_state
, memory_order_acquire
);
769 if (chunk_state
!= CHUNK_ALLOCATED
)
770 ReportInvalidFree(old_ptr
, chunk_state
, stack
);
771 CHECK_NE(REAL(memcpy
), nullptr);
772 uptr memcpy_size
= Min(new_size
, m
->UsedSize());
773 // If realloc() races with free(), we may start copying freed memory.
774 // However, we will report racy double-free later anyway.
775 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
776 Deallocate(old_ptr
, 0, 0, stack
, FROM_MALLOC
);
781 void *Calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
782 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
783 if (AllocatorMayReturnNull())
785 ReportCallocOverflow(nmemb
, size
, stack
);
787 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
, false);
788 // If the memory comes from the secondary allocator no need to clear it
789 // as it comes directly from mmap.
790 if (ptr
&& allocator
.FromPrimary(ptr
))
791 REAL(memset
)(ptr
, 0, nmemb
* size
);
795 void ReportInvalidFree(void *ptr
, u8 chunk_state
, BufferedStackTrace
*stack
) {
796 if (chunk_state
== CHUNK_QUARANTINE
)
797 ReportDoubleFree((uptr
)ptr
, stack
);
799 ReportFreeNotMalloced((uptr
)ptr
, stack
);
802 void CommitBack(AsanThreadLocalMallocStorage
*ms
, BufferedStackTrace
*stack
) {
803 AllocatorCache
*ac
= GetAllocatorCache(ms
);
804 quarantine
.Drain(GetQuarantineCache(ms
), QuarantineCallback(ac
, stack
));
805 allocator
.SwallowCache(ac
);
808 // -------------------------- Chunk lookup ----------------------
810 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
811 // Returns nullptr if AsanChunk is not yet initialized just after
812 // get_allocator().Allocate(), or is being destroyed just before
813 // get_allocator().Deallocate().
814 AsanChunk
*GetAsanChunk(void *alloc_beg
) {
817 AsanChunk
*p
= reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Get();
819 if (!allocator
.FromPrimary(alloc_beg
))
821 p
= reinterpret_cast<AsanChunk
*>(alloc_beg
);
823 u8 state
= atomic_load(&p
->chunk_state
, memory_order_relaxed
);
824 // It does not guaranty that Chunk is initialized, but it's
825 // definitely not for any other value.
826 if (state
== CHUNK_ALLOCATED
|| state
== CHUNK_QUARANTINE
)
831 AsanChunk
*GetAsanChunkByAddr(uptr p
) {
832 void *alloc_beg
= allocator
.GetBlockBegin(reinterpret_cast<void *>(p
));
833 return GetAsanChunk(alloc_beg
);
836 // Allocator must be locked when this function is called.
837 AsanChunk
*GetAsanChunkByAddrFastLocked(uptr p
) {
839 allocator
.GetBlockBeginFastLocked(reinterpret_cast<void *>(p
));
840 return GetAsanChunk(alloc_beg
);
843 uptr
AllocationSize(uptr p
) {
844 AsanChunk
*m
= GetAsanChunkByAddr(p
);
846 if (atomic_load(&m
->chunk_state
, memory_order_acquire
) != CHUNK_ALLOCATED
)
848 if (m
->Beg() != p
) return 0;
849 return m
->UsedSize();
852 uptr
AllocationSizeFast(uptr p
) {
853 return reinterpret_cast<AsanChunk
*>(p
- kChunkHeaderSize
)->UsedSize();
856 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
857 AsanChunk
*m1
= GetAsanChunkByAddr(addr
);
859 if (!m1
|| AsanChunkView(m1
).AddrIsAtLeft(addr
, 1, &offset
)) {
860 // The address is in the chunk's left redzone, so maybe it is actually
861 // a right buffer overflow from the other chunk before.
862 // Search a bit before to see if there is another chunk.
863 AsanChunk
*m2
= nullptr;
864 for (uptr l
= 1; l
< GetPageSizeCached(); l
++) {
865 m2
= GetAsanChunkByAddr(addr
- l
);
866 if (m2
== m1
) continue; // Still the same chunk.
869 if (m2
&& AsanChunkView(m2
).AddrIsAtRight(addr
, 1, &offset
))
870 m1
= ChooseChunk(addr
, m2
, m1
);
872 return AsanChunkView(m1
);
875 void Purge(BufferedStackTrace
*stack
) {
876 AsanThread
*t
= GetCurrentThread();
878 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
879 quarantine
.DrainAndRecycle(GetQuarantineCache(ms
),
880 QuarantineCallback(GetAllocatorCache(ms
),
884 SpinMutexLock
l(&fallback_mutex
);
885 quarantine
.DrainAndRecycle(&fallback_quarantine_cache
,
886 QuarantineCallback(&fallback_allocator_cache
,
890 allocator
.ForceReleaseToOS();
894 allocator
.PrintStats();
895 quarantine
.PrintStats();
898 void ForceLock() SANITIZER_ACQUIRE(fallback_mutex
) {
899 allocator
.ForceLock();
900 fallback_mutex
.Lock();
903 void ForceUnlock() SANITIZER_RELEASE(fallback_mutex
) {
904 fallback_mutex
.Unlock();
905 allocator
.ForceUnlock();
909 static Allocator
instance(LINKER_INITIALIZED
);
911 static AsanAllocator
&get_allocator() {
912 return instance
.allocator
;
915 bool AsanChunkView::IsValid() const {
916 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) !=
919 bool AsanChunkView::IsAllocated() const {
920 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) ==
923 bool AsanChunkView::IsQuarantined() const {
924 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) ==
927 uptr
AsanChunkView::Beg() const { return chunk_
->Beg(); }
928 uptr
AsanChunkView::End() const { return Beg() + UsedSize(); }
929 uptr
AsanChunkView::UsedSize() const { return chunk_
->UsedSize(); }
930 u32
AsanChunkView::UserRequestedAlignment() const {
931 return Allocator::ComputeUserAlignment(chunk_
->user_requested_alignment_log
);
934 uptr
AsanChunkView::AllocTid() const {
937 chunk_
->GetAllocContext(tid
, stack
);
941 uptr
AsanChunkView::FreeTid() const {
942 if (!IsQuarantined())
946 chunk_
->GetFreeContext(tid
, stack
);
950 AllocType
AsanChunkView::GetAllocType() const {
951 return (AllocType
)chunk_
->alloc_type
;
954 u32
AsanChunkView::GetAllocStackId() const {
957 chunk_
->GetAllocContext(tid
, stack
);
961 u32
AsanChunkView::GetFreeStackId() const {
962 if (!IsQuarantined())
966 chunk_
->GetFreeContext(tid
, stack
);
970 void InitializeAllocator(const AllocatorOptions
&options
) {
971 instance
.InitLinkerInitialized(options
);
974 void ReInitializeAllocator(const AllocatorOptions
&options
) {
975 instance
.ReInitialize(options
);
978 void GetAllocatorOptions(AllocatorOptions
*options
) {
979 instance
.GetOptions(options
);
982 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
983 return instance
.FindHeapChunkByAddress(addr
);
985 AsanChunkView
FindHeapChunkByAllocBeg(uptr addr
) {
986 return AsanChunkView(instance
.GetAsanChunk(reinterpret_cast<void*>(addr
)));
989 void AsanThreadLocalMallocStorage::CommitBack() {
990 GET_STACK_TRACE_MALLOC
;
991 instance
.CommitBack(this, &stack
);
994 void PrintInternalAllocatorStats() {
995 instance
.PrintStats();
998 void asan_free(void *ptr
, BufferedStackTrace
*stack
, AllocType alloc_type
) {
999 instance
.Deallocate(ptr
, 0, 0, stack
, alloc_type
);
1002 void asan_delete(void *ptr
, uptr size
, uptr alignment
,
1003 BufferedStackTrace
*stack
, AllocType alloc_type
) {
1004 instance
.Deallocate(ptr
, size
, alignment
, stack
, alloc_type
);
1007 void *asan_malloc(uptr size
, BufferedStackTrace
*stack
) {
1008 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
1011 void *asan_calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
1012 return SetErrnoOnNull(instance
.Calloc(nmemb
, size
, stack
));
1015 void *asan_reallocarray(void *p
, uptr nmemb
, uptr size
,
1016 BufferedStackTrace
*stack
) {
1017 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
1018 errno
= errno_ENOMEM
;
1019 if (AllocatorMayReturnNull())
1021 ReportReallocArrayOverflow(nmemb
, size
, stack
);
1023 return asan_realloc(p
, nmemb
* size
, stack
);
1026 void *asan_realloc(void *p
, uptr size
, BufferedStackTrace
*stack
) {
1028 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
1030 if (flags()->allocator_frees_and_returns_null_on_realloc_zero
) {
1031 instance
.Deallocate(p
, 0, 0, stack
, FROM_MALLOC
);
1034 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
1037 return SetErrnoOnNull(instance
.Reallocate(p
, size
, stack
));
1040 void *asan_valloc(uptr size
, BufferedStackTrace
*stack
) {
1041 return SetErrnoOnNull(
1042 instance
.Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
, true));
1045 void *asan_pvalloc(uptr size
, BufferedStackTrace
*stack
) {
1046 uptr PageSize
= GetPageSizeCached();
1047 if (UNLIKELY(CheckForPvallocOverflow(size
, PageSize
))) {
1048 errno
= errno_ENOMEM
;
1049 if (AllocatorMayReturnNull())
1051 ReportPvallocOverflow(size
, stack
);
1053 // pvalloc(0) should allocate one page.
1054 size
= size
? RoundUpTo(size
, PageSize
) : PageSize
;
1055 return SetErrnoOnNull(
1056 instance
.Allocate(size
, PageSize
, stack
, FROM_MALLOC
, true));
1059 void *asan_memalign(uptr alignment
, uptr size
, BufferedStackTrace
*stack
,
1060 AllocType alloc_type
) {
1061 if (UNLIKELY(!IsPowerOfTwo(alignment
))) {
1062 errno
= errno_EINVAL
;
1063 if (AllocatorMayReturnNull())
1065 ReportInvalidAllocationAlignment(alignment
, stack
);
1067 return SetErrnoOnNull(
1068 instance
.Allocate(size
, alignment
, stack
, alloc_type
, true));
1071 void *asan_aligned_alloc(uptr alignment
, uptr size
, BufferedStackTrace
*stack
) {
1072 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment
, size
))) {
1073 errno
= errno_EINVAL
;
1074 if (AllocatorMayReturnNull())
1076 ReportInvalidAlignedAllocAlignment(size
, alignment
, stack
);
1078 return SetErrnoOnNull(
1079 instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true));
1082 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
1083 BufferedStackTrace
*stack
) {
1084 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment
))) {
1085 if (AllocatorMayReturnNull())
1086 return errno_EINVAL
;
1087 ReportInvalidPosixMemalignAlignment(alignment
, stack
);
1089 void *ptr
= instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true);
1091 // OOM error is already taken care of by Allocate.
1092 return errno_ENOMEM
;
1093 CHECK(IsAligned((uptr
)ptr
, alignment
));
1098 uptr
asan_malloc_usable_size(const void *ptr
, uptr pc
, uptr bp
) {
1100 uptr usable_size
= instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
1101 if (flags()->check_malloc_usable_size
&& (usable_size
== 0)) {
1102 GET_STACK_TRACE_FATAL(pc
, bp
);
1103 ReportMallocUsableSizeNotOwned((uptr
)ptr
, &stack
);
1108 uptr
asan_mz_size(const void *ptr
) {
1109 return instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
1112 void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
1113 instance
.ForceLock();
1116 void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
1117 instance
.ForceUnlock();
1120 } // namespace __asan
1122 // --- Implementation of LSan-specific functions --- {{{1
1124 void LockAllocator() {
1125 __asan::get_allocator().ForceLock();
1128 void UnlockAllocator() {
1129 __asan::get_allocator().ForceUnlock();
1132 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
) {
1133 *begin
= (uptr
)&__asan::get_allocator();
1134 *end
= *begin
+ sizeof(__asan::get_allocator());
1137 uptr
PointsIntoChunk(void *p
) {
1138 uptr addr
= reinterpret_cast<uptr
>(p
);
1139 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(addr
);
1140 if (!m
|| atomic_load(&m
->chunk_state
, memory_order_acquire
) !=
1141 __asan::CHUNK_ALLOCATED
)
1143 uptr chunk
= m
->Beg();
1144 if (m
->AddrIsInside(addr
))
1146 if (IsSpecialCaseOfOperatorNew0(chunk
, m
->UsedSize(), addr
))
1151 uptr
GetUserBegin(uptr chunk
) {
1152 // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
1154 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(chunk
);
1155 return m
? m
->Beg() : 0;
1158 uptr
GetUserAddr(uptr chunk
) {
1162 LsanMetadata::LsanMetadata(uptr chunk
) {
1163 metadata_
= chunk
? reinterpret_cast<void *>(chunk
- __asan::kChunkHeaderSize
)
1167 bool LsanMetadata::allocated() const {
1170 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1171 return atomic_load(&m
->chunk_state
, memory_order_relaxed
) ==
1172 __asan::CHUNK_ALLOCATED
;
1175 ChunkTag
LsanMetadata::tag() const {
1176 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1177 return static_cast<ChunkTag
>(m
->lsan_tag
);
1180 void LsanMetadata::set_tag(ChunkTag value
) {
1181 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1182 m
->lsan_tag
= value
;
1185 uptr
LsanMetadata::requested_size() const {
1186 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1187 return m
->UsedSize();
1190 u32
LsanMetadata::stack_trace_id() const {
1191 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1194 m
->GetAllocContext(tid
, stack
);
1198 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
1199 __asan::get_allocator().ForEachChunk(callback
, arg
);
1202 IgnoreObjectResult
IgnoreObject(const void *p
) {
1203 uptr addr
= reinterpret_cast<uptr
>(p
);
1204 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddr(addr
);
1206 (atomic_load(&m
->chunk_state
, memory_order_acquire
) !=
1207 __asan::CHUNK_ALLOCATED
) ||
1208 !m
->AddrIsInside(addr
)) {
1209 return kIgnoreObjectInvalid
;
1211 if (m
->lsan_tag
== kIgnored
)
1212 return kIgnoreObjectAlreadyIgnored
;
1213 m
->lsan_tag
= __lsan::kIgnored
;
1214 return kIgnoreObjectSuccess
;
1217 } // namespace __lsan
1219 // ---------------------- Interface ---------------- {{{1
1220 using namespace __asan
;
1222 static const void *AllocationBegin(const void *p
) {
1223 AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddr((uptr
)p
);
1226 if (atomic_load(&m
->chunk_state
, memory_order_acquire
) != CHUNK_ALLOCATED
)
1228 if (m
->UsedSize() == 0)
1230 return (const void *)(m
->Beg());
1233 // ASan allocator doesn't reserve extra bytes, so normally we would
1234 // just return "size". We don't want to expose our redzone sizes, etc here.
1235 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
1239 int __sanitizer_get_ownership(const void *p
) {
1240 uptr ptr
= reinterpret_cast<uptr
>(p
);
1241 return instance
.AllocationSize(ptr
) > 0;
1244 uptr
__sanitizer_get_allocated_size(const void *p
) {
1246 uptr ptr
= reinterpret_cast<uptr
>(p
);
1247 uptr allocated_size
= instance
.AllocationSize(ptr
);
1248 // Die if p is not malloced or if it is already freed.
1249 if (allocated_size
== 0) {
1250 GET_STACK_TRACE_FATAL_HERE
;
1251 ReportSanitizerGetAllocatedSizeNotOwned(ptr
, &stack
);
1253 return allocated_size
;
1256 uptr
__sanitizer_get_allocated_size_fast(const void *p
) {
1257 DCHECK_EQ(p
, __sanitizer_get_allocated_begin(p
));
1258 uptr ret
= instance
.AllocationSizeFast(reinterpret_cast<uptr
>(p
));
1259 DCHECK_EQ(ret
, __sanitizer_get_allocated_size(p
));
1263 const void *__sanitizer_get_allocated_begin(const void *p
) {
1264 return AllocationBegin(p
);
1267 void __sanitizer_purge_allocator() {
1268 GET_STACK_TRACE_MALLOC
;
1269 instance
.Purge(&stack
);
1272 int __asan_update_allocation_context(void* addr
) {
1273 GET_STACK_TRACE_MALLOC
;
1274 return instance
.UpdateAllocationStack((uptr
)addr
, &stack
);