1 //===-- asan_allocator.cpp ------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Implementation of ASan's memory allocator, 2-nd version.
12 // This variant uses the allocator from sanitizer_common, i.e. the one shared
13 // with ThreadSanitizer and MemorySanitizer.
15 //===----------------------------------------------------------------------===//
17 #include "asan_allocator.h"
19 #include "asan_mapping.h"
20 #include "asan_poisoning.h"
21 #include "asan_report.h"
22 #include "asan_stack.h"
23 #include "asan_thread.h"
24 #include "lsan/lsan_common.h"
25 #include "sanitizer_common/sanitizer_allocator_checks.h"
26 #include "sanitizer_common/sanitizer_allocator_interface.h"
27 #include "sanitizer_common/sanitizer_errno.h"
28 #include "sanitizer_common/sanitizer_flags.h"
29 #include "sanitizer_common/sanitizer_internal_defs.h"
30 #include "sanitizer_common/sanitizer_list.h"
31 #include "sanitizer_common/sanitizer_quarantine.h"
32 #include "sanitizer_common/sanitizer_stackdepot.h"
36 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
37 // We use adaptive redzones: for larger allocation larger redzones are used.
38 static u32
RZLog2Size(u32 rz_log
) {
43 static u32
RZSize2Log(u32 rz_size
) {
44 CHECK_GE(rz_size
, 16);
45 CHECK_LE(rz_size
, 2048);
46 CHECK(IsPowerOfTwo(rz_size
));
47 u32 res
= Log2(rz_size
) - 4;
48 CHECK_EQ(rz_size
, RZLog2Size(res
));
52 static AsanAllocator
&get_allocator();
54 static void AtomicContextStore(volatile atomic_uint64_t
*atomic_context
,
59 atomic_store(atomic_context
, context
, memory_order_relaxed
);
62 static void AtomicContextLoad(const volatile atomic_uint64_t
*atomic_context
,
63 u32
&tid
, u32
&stack
) {
64 u64 context
= atomic_load(atomic_context
, memory_order_relaxed
);
70 // The memory chunk allocated from the underlying allocator looks like this:
71 // L L L L L L H H U U U U U U R R
72 // L -- left redzone words (0 or more bytes)
73 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
75 // R -- right redzone (0 or more bytes)
76 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
79 // If the left redzone is greater than the ChunkHeader size we store a magic
80 // value in the first uptr word of the memory block and store the address of
81 // ChunkBase in the next uptr.
82 // M B L L L L L L L L L H H U U U U U U
84 // ---------------------|
85 // M -- magic value kAllocBegMagic
86 // B -- address of ChunkHeader pointing to the first 'H'
90 atomic_uint8_t chunk_state
;
95 // else -> log2(min(align, 512)) - 2
96 u8 user_requested_alignment_log
: 3;
99 u16 user_requested_size_hi
;
100 u32 user_requested_size_lo
;
101 atomic_uint64_t alloc_context_id
;
104 uptr
UsedSize() const {
105 uptr R
= user_requested_size_lo
;
106 if (sizeof(uptr
) > sizeof(user_requested_size_lo
))
107 R
+= (uptr
)user_requested_size_hi
<< (8 * sizeof(user_requested_size_lo
));
111 void SetUsedSize(uptr size
) {
112 user_requested_size_lo
= size
;
113 if (sizeof(uptr
) > sizeof(user_requested_size_lo
)) {
114 size
>>= (8 * sizeof(user_requested_size_lo
));
115 user_requested_size_hi
= size
;
116 CHECK_EQ(user_requested_size_hi
, size
);
120 void SetAllocContext(u32 tid
, u32 stack
) {
121 AtomicContextStore(&alloc_context_id
, tid
, stack
);
124 void GetAllocContext(u32
&tid
, u32
&stack
) const {
125 AtomicContextLoad(&alloc_context_id
, tid
, stack
);
129 class ChunkBase
: public ChunkHeader
{
130 atomic_uint64_t free_context_id
;
133 void SetFreeContext(u32 tid
, u32 stack
) {
134 AtomicContextStore(&free_context_id
, tid
, stack
);
137 void GetFreeContext(u32
&tid
, u32
&stack
) const {
138 AtomicContextLoad(&free_context_id
, tid
, stack
);
142 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
143 static const uptr kChunkHeader2Size
= sizeof(ChunkBase
) - kChunkHeaderSize
;
144 COMPILER_CHECK(kChunkHeaderSize
== 16);
145 COMPILER_CHECK(kChunkHeader2Size
<= 16);
148 // Either just allocated by underlying allocator, but AsanChunk is not yet
149 // ready, or almost returned to undelying allocator and AsanChunk is already
152 // The chunk is allocated and not yet freed.
154 // The chunk was freed and put into quarantine zone.
155 CHUNK_QUARANTINE
= 3,
158 class AsanChunk
: public ChunkBase
{
160 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
161 bool AddrIsInside(uptr addr
) {
162 return (addr
>= Beg()) && (addr
< Beg() + UsedSize());
166 class LargeChunkHeader
{
167 static constexpr uptr kAllocBegMagic
=
168 FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL
);
169 atomic_uintptr_t magic
;
170 AsanChunk
*chunk_header
;
173 AsanChunk
*Get() const {
174 return atomic_load(&magic
, memory_order_acquire
) == kAllocBegMagic
179 void Set(AsanChunk
*p
) {
182 atomic_store(&magic
, kAllocBegMagic
, memory_order_release
);
186 uptr old
= kAllocBegMagic
;
187 if (!atomic_compare_exchange_strong(&magic
, &old
, 0,
188 memory_order_release
)) {
189 CHECK_EQ(old
, kAllocBegMagic
);
194 struct QuarantineCallback
{
195 QuarantineCallback(AllocatorCache
*cache
, BufferedStackTrace
*stack
)
200 void Recycle(AsanChunk
*m
) {
201 void *p
= get_allocator().GetBlockBegin(m
);
203 // Clear the magic value, as allocator internals may overwrite the
204 // contents of deallocated chunk, confusing GetAsanChunk lookup.
205 reinterpret_cast<LargeChunkHeader
*>(p
)->Set(nullptr);
208 u8 old_chunk_state
= CHUNK_QUARANTINE
;
209 if (!atomic_compare_exchange_strong(&m
->chunk_state
, &old_chunk_state
,
210 CHUNK_INVALID
, memory_order_acquire
)) {
211 CHECK_EQ(old_chunk_state
, CHUNK_QUARANTINE
);
214 PoisonShadow(m
->Beg(),
215 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
216 kAsanHeapLeftRedzoneMagic
);
219 AsanStats
&thread_stats
= GetCurrentThreadStats();
220 thread_stats
.real_frees
++;
221 thread_stats
.really_freed
+= m
->UsedSize();
223 get_allocator().Deallocate(cache_
, p
);
226 void *Allocate(uptr size
) {
227 void *res
= get_allocator().Allocate(cache_
, size
, 1);
228 // TODO(alekseys): Consider making quarantine OOM-friendly.
230 ReportOutOfMemory(size
, stack_
);
234 void Deallocate(void *p
) {
235 get_allocator().Deallocate(cache_
, p
);
239 AllocatorCache
* const cache_
;
240 BufferedStackTrace
* const stack_
;
243 typedef Quarantine
<QuarantineCallback
, AsanChunk
> AsanQuarantine
;
244 typedef AsanQuarantine::Cache QuarantineCache
;
246 void AsanMapUnmapCallback::OnMap(uptr p
, uptr size
) const {
247 PoisonShadow(p
, size
, kAsanHeapLeftRedzoneMagic
);
249 AsanStats
&thread_stats
= GetCurrentThreadStats();
250 thread_stats
.mmaps
++;
251 thread_stats
.mmaped
+= size
;
253 void AsanMapUnmapCallback::OnUnmap(uptr p
, uptr size
) const {
254 PoisonShadow(p
, size
, 0);
255 // We are about to unmap a chunk of user memory.
256 // Mark the corresponding shadow memory as not needed.
257 FlushUnneededASanShadowMemory(p
, size
);
259 AsanStats
&thread_stats
= GetCurrentThreadStats();
260 thread_stats
.munmaps
++;
261 thread_stats
.munmaped
+= size
;
264 // We can not use THREADLOCAL because it is not supported on some of the
265 // platforms we care about (OSX 10.6, Android).
266 // static THREADLOCAL AllocatorCache cache;
267 AllocatorCache
*GetAllocatorCache(AsanThreadLocalMallocStorage
*ms
) {
269 return &ms
->allocator_cache
;
272 QuarantineCache
*GetQuarantineCache(AsanThreadLocalMallocStorage
*ms
) {
274 CHECK_LE(sizeof(QuarantineCache
), sizeof(ms
->quarantine_cache
));
275 return reinterpret_cast<QuarantineCache
*>(ms
->quarantine_cache
);
278 void AllocatorOptions::SetFrom(const Flags
*f
, const CommonFlags
*cf
) {
279 quarantine_size_mb
= f
->quarantine_size_mb
;
280 thread_local_quarantine_size_kb
= f
->thread_local_quarantine_size_kb
;
281 min_redzone
= f
->redzone
;
282 max_redzone
= f
->max_redzone
;
283 may_return_null
= cf
->allocator_may_return_null
;
284 alloc_dealloc_mismatch
= f
->alloc_dealloc_mismatch
;
285 release_to_os_interval_ms
= cf
->allocator_release_to_os_interval_ms
;
288 void AllocatorOptions::CopyTo(Flags
*f
, CommonFlags
*cf
) {
289 f
->quarantine_size_mb
= quarantine_size_mb
;
290 f
->thread_local_quarantine_size_kb
= thread_local_quarantine_size_kb
;
291 f
->redzone
= min_redzone
;
292 f
->max_redzone
= max_redzone
;
293 cf
->allocator_may_return_null
= may_return_null
;
294 f
->alloc_dealloc_mismatch
= alloc_dealloc_mismatch
;
295 cf
->allocator_release_to_os_interval_ms
= release_to_os_interval_ms
;
299 static const uptr kMaxAllowedMallocSize
=
300 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
302 AsanAllocator allocator
;
303 AsanQuarantine quarantine
;
304 StaticSpinMutex fallback_mutex
;
305 AllocatorCache fallback_allocator_cache
;
306 QuarantineCache fallback_quarantine_cache
;
308 uptr max_user_defined_malloc_size
;
309 atomic_uint8_t rss_limit_exceeded
;
311 // ------------------- Options --------------------------
312 atomic_uint16_t min_redzone
;
313 atomic_uint16_t max_redzone
;
314 atomic_uint8_t alloc_dealloc_mismatch
;
316 // ------------------- Initialization ------------------------
317 explicit Allocator(LinkerInitialized
)
318 : quarantine(LINKER_INITIALIZED
),
319 fallback_quarantine_cache(LINKER_INITIALIZED
) {}
321 void CheckOptions(const AllocatorOptions
&options
) const {
322 CHECK_GE(options
.min_redzone
, 16);
323 CHECK_GE(options
.max_redzone
, options
.min_redzone
);
324 CHECK_LE(options
.max_redzone
, 2048);
325 CHECK(IsPowerOfTwo(options
.min_redzone
));
326 CHECK(IsPowerOfTwo(options
.max_redzone
));
329 void SharedInitCode(const AllocatorOptions
&options
) {
330 CheckOptions(options
);
331 quarantine
.Init((uptr
)options
.quarantine_size_mb
<< 20,
332 (uptr
)options
.thread_local_quarantine_size_kb
<< 10);
333 atomic_store(&alloc_dealloc_mismatch
, options
.alloc_dealloc_mismatch
,
334 memory_order_release
);
335 atomic_store(&min_redzone
, options
.min_redzone
, memory_order_release
);
336 atomic_store(&max_redzone
, options
.max_redzone
, memory_order_release
);
339 void InitLinkerInitialized(const AllocatorOptions
&options
) {
340 SetAllocatorMayReturnNull(options
.may_return_null
);
341 allocator
.InitLinkerInitialized(options
.release_to_os_interval_ms
);
342 SharedInitCode(options
);
343 max_user_defined_malloc_size
= common_flags()->max_allocation_size_mb
344 ? common_flags()->max_allocation_size_mb
346 : kMaxAllowedMallocSize
;
349 bool RssLimitExceeded() {
350 return atomic_load(&rss_limit_exceeded
, memory_order_relaxed
);
353 void SetRssLimitExceeded(bool limit_exceeded
) {
354 atomic_store(&rss_limit_exceeded
, limit_exceeded
, memory_order_relaxed
);
357 void RePoisonChunk(uptr chunk
) {
358 // This could be a user-facing chunk (with redzones), or some internal
359 // housekeeping chunk, like TransferBatch. Start by assuming the former.
360 AsanChunk
*ac
= GetAsanChunk((void *)chunk
);
361 uptr allocated_size
= allocator
.GetActuallyAllocatedSize((void *)chunk
);
362 if (ac
&& atomic_load(&ac
->chunk_state
, memory_order_acquire
) ==
364 uptr beg
= ac
->Beg();
365 uptr end
= ac
->Beg() + ac
->UsedSize();
366 uptr chunk_end
= chunk
+ allocated_size
;
367 if (chunk
< beg
&& beg
< end
&& end
<= chunk_end
) {
368 // Looks like a valid AsanChunk in use, poison redzones only.
369 PoisonShadow(chunk
, beg
- chunk
, kAsanHeapLeftRedzoneMagic
);
370 uptr end_aligned_down
= RoundDownTo(end
, SHADOW_GRANULARITY
);
371 FastPoisonShadowPartialRightRedzone(
372 end_aligned_down
, end
- end_aligned_down
,
373 chunk_end
- end_aligned_down
, kAsanHeapLeftRedzoneMagic
);
378 // This is either not an AsanChunk or freed or quarantined AsanChunk.
379 // In either case, poison everything.
380 PoisonShadow(chunk
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
383 void ReInitialize(const AllocatorOptions
&options
) {
384 SetAllocatorMayReturnNull(options
.may_return_null
);
385 allocator
.SetReleaseToOSIntervalMs(options
.release_to_os_interval_ms
);
386 SharedInitCode(options
);
388 // Poison all existing allocation's redzones.
389 if (CanPoisonMemory()) {
390 allocator
.ForceLock();
391 allocator
.ForEachChunk(
392 [](uptr chunk
, void *alloc
) {
393 ((Allocator
*)alloc
)->RePoisonChunk(chunk
);
396 allocator
.ForceUnlock();
400 void GetOptions(AllocatorOptions
*options
) const {
401 options
->quarantine_size_mb
= quarantine
.GetSize() >> 20;
402 options
->thread_local_quarantine_size_kb
= quarantine
.GetCacheSize() >> 10;
403 options
->min_redzone
= atomic_load(&min_redzone
, memory_order_acquire
);
404 options
->max_redzone
= atomic_load(&max_redzone
, memory_order_acquire
);
405 options
->may_return_null
= AllocatorMayReturnNull();
406 options
->alloc_dealloc_mismatch
=
407 atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
);
408 options
->release_to_os_interval_ms
= allocator
.ReleaseToOSIntervalMs();
411 // -------------------- Helper methods. -------------------------
412 uptr
ComputeRZLog(uptr user_requested_size
) {
413 u32 rz_log
= user_requested_size
<= 64 - 16 ? 0
414 : user_requested_size
<= 128 - 32 ? 1
415 : user_requested_size
<= 512 - 64 ? 2
416 : user_requested_size
<= 4096 - 128 ? 3
417 : user_requested_size
<= (1 << 14) - 256 ? 4
418 : user_requested_size
<= (1 << 15) - 512 ? 5
419 : user_requested_size
<= (1 << 16) - 1024 ? 6
421 u32 hdr_log
= RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader
)));
422 u32 min_log
= RZSize2Log(atomic_load(&min_redzone
, memory_order_acquire
));
423 u32 max_log
= RZSize2Log(atomic_load(&max_redzone
, memory_order_acquire
));
424 return Min(Max(rz_log
, Max(min_log
, hdr_log
)), Max(max_log
, hdr_log
));
427 static uptr
ComputeUserRequestedAlignmentLog(uptr user_requested_alignment
) {
428 if (user_requested_alignment
< 8)
430 if (user_requested_alignment
> 512)
431 user_requested_alignment
= 512;
432 return Log2(user_requested_alignment
) - 2;
435 static uptr
ComputeUserAlignment(uptr user_requested_alignment_log
) {
436 if (user_requested_alignment_log
== 0)
438 return 1LL << (user_requested_alignment_log
+ 2);
441 // We have an address between two chunks, and we want to report just one.
442 AsanChunk
*ChooseChunk(uptr addr
, AsanChunk
*left_chunk
,
443 AsanChunk
*right_chunk
) {
448 // Prefer an allocated chunk over freed chunk and freed chunk
449 // over available chunk.
450 u8 left_state
= atomic_load(&left_chunk
->chunk_state
, memory_order_relaxed
);
452 atomic_load(&right_chunk
->chunk_state
, memory_order_relaxed
);
453 if (left_state
!= right_state
) {
454 if (left_state
== CHUNK_ALLOCATED
)
456 if (right_state
== CHUNK_ALLOCATED
)
458 if (left_state
== CHUNK_QUARANTINE
)
460 if (right_state
== CHUNK_QUARANTINE
)
463 // Same chunk_state: choose based on offset.
464 sptr l_offset
= 0, r_offset
= 0;
465 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
466 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
467 if (l_offset
< r_offset
)
472 bool UpdateAllocationStack(uptr addr
, BufferedStackTrace
*stack
) {
473 AsanChunk
*m
= GetAsanChunkByAddr(addr
);
474 if (!m
) return false;
475 if (atomic_load(&m
->chunk_state
, memory_order_acquire
) != CHUNK_ALLOCATED
)
477 if (m
->Beg() != addr
) return false;
478 AsanThread
*t
= GetCurrentThread();
479 m
->SetAllocContext(t
? t
->tid() : 0, StackDepotPut(*stack
));
483 // -------------------- Allocation/Deallocation routines ---------------
484 void *Allocate(uptr size
, uptr alignment
, BufferedStackTrace
*stack
,
485 AllocType alloc_type
, bool can_fill
) {
486 if (UNLIKELY(!asan_inited
))
488 if (RssLimitExceeded()) {
489 if (AllocatorMayReturnNull())
491 ReportRssLimitExceeded(stack
);
493 Flags
&fl
= *flags();
495 const uptr min_alignment
= SHADOW_GRANULARITY
;
496 const uptr user_requested_alignment_log
=
497 ComputeUserRequestedAlignmentLog(alignment
);
498 if (alignment
< min_alignment
)
499 alignment
= min_alignment
;
501 // We'd be happy to avoid allocating memory for zero-size requests, but
502 // some programs/tests depend on this behavior and assume that malloc
503 // would not return NULL even for zero-size allocations. Moreover, it
504 // looks like operator new should never return NULL, and results of
505 // consecutive "new" calls must be different even if the allocated size
509 CHECK(IsPowerOfTwo(alignment
));
510 uptr rz_log
= ComputeRZLog(size
);
511 uptr rz_size
= RZLog2Size(rz_log
);
512 uptr rounded_size
= RoundUpTo(Max(size
, kChunkHeader2Size
), alignment
);
513 uptr needed_size
= rounded_size
+ rz_size
;
514 if (alignment
> min_alignment
)
515 needed_size
+= alignment
;
516 // If we are allocating from the secondary allocator, there will be no
517 // automatic right redzone, so add the right redzone manually.
518 if (!PrimaryAllocator::CanAllocate(needed_size
, alignment
))
519 needed_size
+= rz_size
;
520 CHECK(IsAligned(needed_size
, min_alignment
));
521 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
||
522 size
> max_user_defined_malloc_size
) {
523 if (AllocatorMayReturnNull()) {
524 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
529 Min(kMaxAllowedMallocSize
, max_user_defined_malloc_size
);
530 ReportAllocationSizeTooBig(size
, needed_size
, malloc_limit
, stack
);
533 AsanThread
*t
= GetCurrentThread();
536 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
537 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
539 SpinMutexLock
l(&fallback_mutex
);
540 AllocatorCache
*cache
= &fallback_allocator_cache
;
541 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
543 if (UNLIKELY(!allocated
)) {
544 SetAllocatorOutOfMemory();
545 if (AllocatorMayReturnNull())
547 ReportOutOfMemory(size
, stack
);
550 if (*(u8
*)MEM_TO_SHADOW((uptr
)allocated
) == 0 && CanPoisonMemory()) {
551 // Heap poisoning is enabled, but the allocator provides an unpoisoned
552 // chunk. This is possible if CanPoisonMemory() was false for some
553 // time, for example, due to flags()->start_disabled.
554 // Anyway, poison the block before using it for anything else.
555 uptr allocated_size
= allocator
.GetActuallyAllocatedSize(allocated
);
556 PoisonShadow((uptr
)allocated
, allocated_size
, kAsanHeapLeftRedzoneMagic
);
559 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
560 uptr alloc_end
= alloc_beg
+ needed_size
;
561 uptr user_beg
= alloc_beg
+ rz_size
;
562 if (!IsAligned(user_beg
, alignment
))
563 user_beg
= RoundUpTo(user_beg
, alignment
);
564 uptr user_end
= user_beg
+ size
;
565 CHECK_LE(user_end
, alloc_end
);
566 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
567 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
568 m
->alloc_type
= alloc_type
;
570 m
->SetUsedSize(size
);
571 m
->user_requested_alignment_log
= user_requested_alignment_log
;
573 m
->SetAllocContext(t
? t
->tid() : 0, StackDepotPut(*stack
));
575 uptr size_rounded_down_to_granularity
=
576 RoundDownTo(size
, SHADOW_GRANULARITY
);
577 // Unpoison the bulk of the memory region.
578 if (size_rounded_down_to_granularity
)
579 PoisonShadow(user_beg
, size_rounded_down_to_granularity
, 0);
580 // Deal with the end of the region if size is not aligned to granularity.
581 if (size
!= size_rounded_down_to_granularity
&& CanPoisonMemory()) {
583 (u8
*)MemToShadow(user_beg
+ size_rounded_down_to_granularity
);
584 *shadow
= fl
.poison_partial
? (size
& (SHADOW_GRANULARITY
- 1)) : 0;
587 AsanStats
&thread_stats
= GetCurrentThreadStats();
588 thread_stats
.mallocs
++;
589 thread_stats
.malloced
+= size
;
590 thread_stats
.malloced_redzones
+= needed_size
- size
;
591 if (needed_size
> SizeClassMap::kMaxSize
)
592 thread_stats
.malloc_large
++;
594 thread_stats
.malloced_by_size
[SizeClassMap::ClassID(needed_size
)]++;
596 void *res
= reinterpret_cast<void *>(user_beg
);
597 if (can_fill
&& fl
.max_malloc_fill_size
) {
598 uptr fill_size
= Min(size
, (uptr
)fl
.max_malloc_fill_size
);
599 REAL(memset
)(res
, fl
.malloc_fill_byte
, fill_size
);
601 #if CAN_SANITIZE_LEAKS
602 m
->lsan_tag
= __lsan::DisabledInThisThread() ? __lsan::kIgnored
603 : __lsan::kDirectlyLeaked
;
605 // Must be the last mutation of metadata in this function.
606 atomic_store(&m
->chunk_state
, CHUNK_ALLOCATED
, memory_order_release
);
607 if (alloc_beg
!= chunk_beg
) {
608 CHECK_LE(alloc_beg
+ sizeof(LargeChunkHeader
), chunk_beg
);
609 reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Set(m
);
611 ASAN_MALLOC_HOOK(res
, size
);
615 // Set quarantine flag if chunk is allocated, issue ASan error report on
616 // available and quarantined chunks. Return true on success, false otherwise.
617 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk
*m
, void *ptr
,
618 BufferedStackTrace
*stack
) {
619 u8 old_chunk_state
= CHUNK_ALLOCATED
;
620 // Flip the chunk_state atomically to avoid race on double-free.
621 if (!atomic_compare_exchange_strong(&m
->chunk_state
, &old_chunk_state
,
623 memory_order_acquire
)) {
624 ReportInvalidFree(ptr
, old_chunk_state
, stack
);
625 // It's not safe to push a chunk in quarantine on invalid free.
628 CHECK_EQ(CHUNK_ALLOCATED
, old_chunk_state
);
629 // It was a user data.
630 m
->SetFreeContext(kInvalidTid
, 0);
634 // Expects the chunk to already be marked as quarantined by using
635 // AtomicallySetQuarantineFlagIfAllocated.
636 void QuarantineChunk(AsanChunk
*m
, void *ptr
, BufferedStackTrace
*stack
) {
637 CHECK_EQ(atomic_load(&m
->chunk_state
, memory_order_relaxed
),
639 AsanThread
*t
= GetCurrentThread();
640 m
->SetFreeContext(t
? t
->tid() : 0, StackDepotPut(*stack
));
642 Flags
&fl
= *flags();
643 if (fl
.max_free_fill_size
> 0) {
644 // We have to skip the chunk header, it contains free_context_id.
645 uptr scribble_start
= (uptr
)m
+ kChunkHeaderSize
+ kChunkHeader2Size
;
646 if (m
->UsedSize() >= kChunkHeader2Size
) { // Skip Header2 in user area.
647 uptr size_to_fill
= m
->UsedSize() - kChunkHeader2Size
;
648 size_to_fill
= Min(size_to_fill
, (uptr
)fl
.max_free_fill_size
);
649 REAL(memset
)((void *)scribble_start
, fl
.free_fill_byte
, size_to_fill
);
653 // Poison the region.
654 PoisonShadow(m
->Beg(),
655 RoundUpTo(m
->UsedSize(), SHADOW_GRANULARITY
),
658 AsanStats
&thread_stats
= GetCurrentThreadStats();
659 thread_stats
.frees
++;
660 thread_stats
.freed
+= m
->UsedSize();
662 // Push into quarantine.
664 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
665 AllocatorCache
*ac
= GetAllocatorCache(ms
);
666 quarantine
.Put(GetQuarantineCache(ms
), QuarantineCallback(ac
, stack
), m
,
669 SpinMutexLock
l(&fallback_mutex
);
670 AllocatorCache
*ac
= &fallback_allocator_cache
;
671 quarantine
.Put(&fallback_quarantine_cache
, QuarantineCallback(ac
, stack
),
676 void Deallocate(void *ptr
, uptr delete_size
, uptr delete_alignment
,
677 BufferedStackTrace
*stack
, AllocType alloc_type
) {
678 uptr p
= reinterpret_cast<uptr
>(ptr
);
681 uptr chunk_beg
= p
- kChunkHeaderSize
;
682 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
684 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
685 // malloc. Don't report an invalid free in this case.
686 if (SANITIZER_WINDOWS
&&
687 !get_allocator().PointerIsMine(ptr
)) {
688 if (!IsSystemHeapAddress(p
))
689 ReportFreeNotMalloced(p
, stack
);
695 // Must mark the chunk as quarantined before any changes to its metadata.
696 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
697 if (!AtomicallySetQuarantineFlagIfAllocated(m
, ptr
, stack
)) return;
699 if (m
->alloc_type
!= alloc_type
) {
700 if (atomic_load(&alloc_dealloc_mismatch
, memory_order_acquire
)) {
701 ReportAllocTypeMismatch((uptr
)ptr
, stack
, (AllocType
)m
->alloc_type
,
702 (AllocType
)alloc_type
);
705 if (flags()->new_delete_type_mismatch
&&
706 (alloc_type
== FROM_NEW
|| alloc_type
== FROM_NEW_BR
) &&
707 ((delete_size
&& delete_size
!= m
->UsedSize()) ||
708 ComputeUserRequestedAlignmentLog(delete_alignment
) !=
709 m
->user_requested_alignment_log
)) {
710 ReportNewDeleteTypeMismatch(p
, delete_size
, delete_alignment
, stack
);
714 QuarantineChunk(m
, ptr
, stack
);
717 void *Reallocate(void *old_ptr
, uptr new_size
, BufferedStackTrace
*stack
) {
718 CHECK(old_ptr
&& new_size
);
719 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
720 uptr chunk_beg
= p
- kChunkHeaderSize
;
721 AsanChunk
*m
= reinterpret_cast<AsanChunk
*>(chunk_beg
);
723 AsanStats
&thread_stats
= GetCurrentThreadStats();
724 thread_stats
.reallocs
++;
725 thread_stats
.realloced
+= new_size
;
727 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
, true);
729 u8 chunk_state
= atomic_load(&m
->chunk_state
, memory_order_acquire
);
730 if (chunk_state
!= CHUNK_ALLOCATED
)
731 ReportInvalidFree(old_ptr
, chunk_state
, stack
);
732 CHECK_NE(REAL(memcpy
), nullptr);
733 uptr memcpy_size
= Min(new_size
, m
->UsedSize());
734 // If realloc() races with free(), we may start copying freed memory.
735 // However, we will report racy double-free later anyway.
736 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
737 Deallocate(old_ptr
, 0, 0, stack
, FROM_MALLOC
);
742 void *Calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
743 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
744 if (AllocatorMayReturnNull())
746 ReportCallocOverflow(nmemb
, size
, stack
);
748 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
, false);
749 // If the memory comes from the secondary allocator no need to clear it
750 // as it comes directly from mmap.
751 if (ptr
&& allocator
.FromPrimary(ptr
))
752 REAL(memset
)(ptr
, 0, nmemb
* size
);
756 void ReportInvalidFree(void *ptr
, u8 chunk_state
, BufferedStackTrace
*stack
) {
757 if (chunk_state
== CHUNK_QUARANTINE
)
758 ReportDoubleFree((uptr
)ptr
, stack
);
760 ReportFreeNotMalloced((uptr
)ptr
, stack
);
763 void CommitBack(AsanThreadLocalMallocStorage
*ms
, BufferedStackTrace
*stack
) {
764 AllocatorCache
*ac
= GetAllocatorCache(ms
);
765 quarantine
.Drain(GetQuarantineCache(ms
), QuarantineCallback(ac
, stack
));
766 allocator
.SwallowCache(ac
);
769 // -------------------------- Chunk lookup ----------------------
771 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
772 // Returns nullptr if AsanChunk is not yet initialized just after
773 // get_allocator().Allocate(), or is being destroyed just before
774 // get_allocator().Deallocate().
775 AsanChunk
*GetAsanChunk(void *alloc_beg
) {
778 AsanChunk
*p
= reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Get();
780 if (!allocator
.FromPrimary(alloc_beg
))
782 p
= reinterpret_cast<AsanChunk
*>(alloc_beg
);
784 u8 state
= atomic_load(&p
->chunk_state
, memory_order_relaxed
);
785 // It does not guaranty that Chunk is initialized, but it's
786 // definitely not for any other value.
787 if (state
== CHUNK_ALLOCATED
|| state
== CHUNK_QUARANTINE
)
792 AsanChunk
*GetAsanChunkByAddr(uptr p
) {
793 void *alloc_beg
= allocator
.GetBlockBegin(reinterpret_cast<void *>(p
));
794 return GetAsanChunk(alloc_beg
);
797 // Allocator must be locked when this function is called.
798 AsanChunk
*GetAsanChunkByAddrFastLocked(uptr p
) {
800 allocator
.GetBlockBeginFastLocked(reinterpret_cast<void *>(p
));
801 return GetAsanChunk(alloc_beg
);
804 uptr
AllocationSize(uptr p
) {
805 AsanChunk
*m
= GetAsanChunkByAddr(p
);
807 if (atomic_load(&m
->chunk_state
, memory_order_acquire
) != CHUNK_ALLOCATED
)
809 if (m
->Beg() != p
) return 0;
810 return m
->UsedSize();
813 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
814 AsanChunk
*m1
= GetAsanChunkByAddr(addr
);
816 if (!m1
|| AsanChunkView(m1
).AddrIsAtLeft(addr
, 1, &offset
)) {
817 // The address is in the chunk's left redzone, so maybe it is actually
818 // a right buffer overflow from the other chunk to the left.
819 // Search a bit to the left to see if there is another chunk.
820 AsanChunk
*m2
= nullptr;
821 for (uptr l
= 1; l
< GetPageSizeCached(); l
++) {
822 m2
= GetAsanChunkByAddr(addr
- l
);
823 if (m2
== m1
) continue; // Still the same chunk.
826 if (m2
&& AsanChunkView(m2
).AddrIsAtRight(addr
, 1, &offset
))
827 m1
= ChooseChunk(addr
, m2
, m1
);
829 return AsanChunkView(m1
);
832 void Purge(BufferedStackTrace
*stack
) {
833 AsanThread
*t
= GetCurrentThread();
835 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
836 quarantine
.DrainAndRecycle(GetQuarantineCache(ms
),
837 QuarantineCallback(GetAllocatorCache(ms
),
841 SpinMutexLock
l(&fallback_mutex
);
842 quarantine
.DrainAndRecycle(&fallback_quarantine_cache
,
843 QuarantineCallback(&fallback_allocator_cache
,
847 allocator
.ForceReleaseToOS();
851 allocator
.PrintStats();
852 quarantine
.PrintStats();
856 allocator
.ForceLock();
857 fallback_mutex
.Lock();
861 fallback_mutex
.Unlock();
862 allocator
.ForceUnlock();
866 static Allocator
instance(LINKER_INITIALIZED
);
868 static AsanAllocator
&get_allocator() {
869 return instance
.allocator
;
872 bool AsanChunkView::IsValid() const {
873 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) !=
876 bool AsanChunkView::IsAllocated() const {
877 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) ==
880 bool AsanChunkView::IsQuarantined() const {
881 return chunk_
&& atomic_load(&chunk_
->chunk_state
, memory_order_relaxed
) ==
884 uptr
AsanChunkView::Beg() const { return chunk_
->Beg(); }
885 uptr
AsanChunkView::End() const { return Beg() + UsedSize(); }
886 uptr
AsanChunkView::UsedSize() const { return chunk_
->UsedSize(); }
887 u32
AsanChunkView::UserRequestedAlignment() const {
888 return Allocator::ComputeUserAlignment(chunk_
->user_requested_alignment_log
);
891 uptr
AsanChunkView::AllocTid() const {
894 chunk_
->GetAllocContext(tid
, stack
);
898 uptr
AsanChunkView::FreeTid() const {
899 if (!IsQuarantined())
903 chunk_
->GetFreeContext(tid
, stack
);
907 AllocType
AsanChunkView::GetAllocType() const {
908 return (AllocType
)chunk_
->alloc_type
;
911 static StackTrace
GetStackTraceFromId(u32 id
) {
913 StackTrace res
= StackDepotGet(id
);
918 u32
AsanChunkView::GetAllocStackId() const {
921 chunk_
->GetAllocContext(tid
, stack
);
925 u32
AsanChunkView::GetFreeStackId() const {
926 if (!IsQuarantined())
930 chunk_
->GetFreeContext(tid
, stack
);
934 StackTrace
AsanChunkView::GetAllocStack() const {
935 return GetStackTraceFromId(GetAllocStackId());
938 StackTrace
AsanChunkView::GetFreeStack() const {
939 return GetStackTraceFromId(GetFreeStackId());
942 void InitializeAllocator(const AllocatorOptions
&options
) {
943 instance
.InitLinkerInitialized(options
);
946 void ReInitializeAllocator(const AllocatorOptions
&options
) {
947 instance
.ReInitialize(options
);
950 void GetAllocatorOptions(AllocatorOptions
*options
) {
951 instance
.GetOptions(options
);
954 AsanChunkView
FindHeapChunkByAddress(uptr addr
) {
955 return instance
.FindHeapChunkByAddress(addr
);
957 AsanChunkView
FindHeapChunkByAllocBeg(uptr addr
) {
958 return AsanChunkView(instance
.GetAsanChunk(reinterpret_cast<void*>(addr
)));
961 void AsanThreadLocalMallocStorage::CommitBack() {
962 GET_STACK_TRACE_MALLOC
;
963 instance
.CommitBack(this, &stack
);
966 void PrintInternalAllocatorStats() {
967 instance
.PrintStats();
970 void asan_free(void *ptr
, BufferedStackTrace
*stack
, AllocType alloc_type
) {
971 instance
.Deallocate(ptr
, 0, 0, stack
, alloc_type
);
974 void asan_delete(void *ptr
, uptr size
, uptr alignment
,
975 BufferedStackTrace
*stack
, AllocType alloc_type
) {
976 instance
.Deallocate(ptr
, size
, alignment
, stack
, alloc_type
);
979 void *asan_malloc(uptr size
, BufferedStackTrace
*stack
) {
980 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
983 void *asan_calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
984 return SetErrnoOnNull(instance
.Calloc(nmemb
, size
, stack
));
987 void *asan_reallocarray(void *p
, uptr nmemb
, uptr size
,
988 BufferedStackTrace
*stack
) {
989 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
990 errno
= errno_ENOMEM
;
991 if (AllocatorMayReturnNull())
993 ReportReallocArrayOverflow(nmemb
, size
, stack
);
995 return asan_realloc(p
, nmemb
* size
, stack
);
998 void *asan_realloc(void *p
, uptr size
, BufferedStackTrace
*stack
) {
1000 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
, true));
1002 if (flags()->allocator_frees_and_returns_null_on_realloc_zero
) {
1003 instance
.Deallocate(p
, 0, 0, stack
, FROM_MALLOC
);
1006 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
1009 return SetErrnoOnNull(instance
.Reallocate(p
, size
, stack
));
1012 void *asan_valloc(uptr size
, BufferedStackTrace
*stack
) {
1013 return SetErrnoOnNull(
1014 instance
.Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
, true));
1017 void *asan_pvalloc(uptr size
, BufferedStackTrace
*stack
) {
1018 uptr PageSize
= GetPageSizeCached();
1019 if (UNLIKELY(CheckForPvallocOverflow(size
, PageSize
))) {
1020 errno
= errno_ENOMEM
;
1021 if (AllocatorMayReturnNull())
1023 ReportPvallocOverflow(size
, stack
);
1025 // pvalloc(0) should allocate one page.
1026 size
= size
? RoundUpTo(size
, PageSize
) : PageSize
;
1027 return SetErrnoOnNull(
1028 instance
.Allocate(size
, PageSize
, stack
, FROM_MALLOC
, true));
1031 void *asan_memalign(uptr alignment
, uptr size
, BufferedStackTrace
*stack
,
1032 AllocType alloc_type
) {
1033 if (UNLIKELY(!IsPowerOfTwo(alignment
))) {
1034 errno
= errno_EINVAL
;
1035 if (AllocatorMayReturnNull())
1037 ReportInvalidAllocationAlignment(alignment
, stack
);
1039 return SetErrnoOnNull(
1040 instance
.Allocate(size
, alignment
, stack
, alloc_type
, true));
1043 void *asan_aligned_alloc(uptr alignment
, uptr size
, BufferedStackTrace
*stack
) {
1044 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment
, size
))) {
1045 errno
= errno_EINVAL
;
1046 if (AllocatorMayReturnNull())
1048 ReportInvalidAlignedAllocAlignment(size
, alignment
, stack
);
1050 return SetErrnoOnNull(
1051 instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true));
1054 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
1055 BufferedStackTrace
*stack
) {
1056 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment
))) {
1057 if (AllocatorMayReturnNull())
1058 return errno_EINVAL
;
1059 ReportInvalidPosixMemalignAlignment(alignment
, stack
);
1061 void *ptr
= instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
, true);
1063 // OOM error is already taken care of by Allocate.
1064 return errno_ENOMEM
;
1065 CHECK(IsAligned((uptr
)ptr
, alignment
));
1070 uptr
asan_malloc_usable_size(const void *ptr
, uptr pc
, uptr bp
) {
1072 uptr usable_size
= instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
1073 if (flags()->check_malloc_usable_size
&& (usable_size
== 0)) {
1074 GET_STACK_TRACE_FATAL(pc
, bp
);
1075 ReportMallocUsableSizeNotOwned((uptr
)ptr
, &stack
);
1080 uptr
asan_mz_size(const void *ptr
) {
1081 return instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
1084 void asan_mz_force_lock() {
1085 instance
.ForceLock();
1088 void asan_mz_force_unlock() {
1089 instance
.ForceUnlock();
1092 void AsanSoftRssLimitExceededCallback(bool limit_exceeded
) {
1093 instance
.SetRssLimitExceeded(limit_exceeded
);
1096 } // namespace __asan
1098 // --- Implementation of LSan-specific functions --- {{{1
1100 void LockAllocator() {
1101 __asan::get_allocator().ForceLock();
1104 void UnlockAllocator() {
1105 __asan::get_allocator().ForceUnlock();
1108 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
) {
1109 *begin
= (uptr
)&__asan::get_allocator();
1110 *end
= *begin
+ sizeof(__asan::get_allocator());
1113 uptr
PointsIntoChunk(void *p
) {
1114 uptr addr
= reinterpret_cast<uptr
>(p
);
1115 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(addr
);
1116 if (!m
|| atomic_load(&m
->chunk_state
, memory_order_acquire
) !=
1117 __asan::CHUNK_ALLOCATED
)
1119 uptr chunk
= m
->Beg();
1120 if (m
->AddrIsInside(addr
))
1122 if (IsSpecialCaseOfOperatorNew0(chunk
, m
->UsedSize(), addr
))
1127 uptr
GetUserBegin(uptr chunk
) {
1128 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddrFastLocked(chunk
);
1129 return m
? m
->Beg() : 0;
1132 LsanMetadata::LsanMetadata(uptr chunk
) {
1133 metadata_
= chunk
? reinterpret_cast<void *>(chunk
- __asan::kChunkHeaderSize
)
1137 bool LsanMetadata::allocated() const {
1140 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1141 return atomic_load(&m
->chunk_state
, memory_order_relaxed
) ==
1142 __asan::CHUNK_ALLOCATED
;
1145 ChunkTag
LsanMetadata::tag() const {
1146 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1147 return static_cast<ChunkTag
>(m
->lsan_tag
);
1150 void LsanMetadata::set_tag(ChunkTag value
) {
1151 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1152 m
->lsan_tag
= value
;
1155 uptr
LsanMetadata::requested_size() const {
1156 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1157 return m
->UsedSize();
1160 u32
LsanMetadata::stack_trace_id() const {
1161 __asan::AsanChunk
*m
= reinterpret_cast<__asan::AsanChunk
*>(metadata_
);
1164 m
->GetAllocContext(tid
, stack
);
1168 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
1169 __asan::get_allocator().ForEachChunk(callback
, arg
);
1172 IgnoreObjectResult
IgnoreObjectLocked(const void *p
) {
1173 uptr addr
= reinterpret_cast<uptr
>(p
);
1174 __asan::AsanChunk
*m
= __asan::instance
.GetAsanChunkByAddr(addr
);
1176 (atomic_load(&m
->chunk_state
, memory_order_acquire
) !=
1177 __asan::CHUNK_ALLOCATED
) ||
1178 !m
->AddrIsInside(addr
)) {
1179 return kIgnoreObjectInvalid
;
1181 if (m
->lsan_tag
== kIgnored
)
1182 return kIgnoreObjectAlreadyIgnored
;
1183 m
->lsan_tag
= __lsan::kIgnored
;
1184 return kIgnoreObjectSuccess
;
1186 } // namespace __lsan
1188 // ---------------------- Interface ---------------- {{{1
1189 using namespace __asan
;
1191 // ASan allocator doesn't reserve extra bytes, so normally we would
1192 // just return "size". We don't want to expose our redzone sizes, etc here.
1193 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
1197 int __sanitizer_get_ownership(const void *p
) {
1198 uptr ptr
= reinterpret_cast<uptr
>(p
);
1199 return instance
.AllocationSize(ptr
) > 0;
1202 uptr
__sanitizer_get_allocated_size(const void *p
) {
1204 uptr ptr
= reinterpret_cast<uptr
>(p
);
1205 uptr allocated_size
= instance
.AllocationSize(ptr
);
1206 // Die if p is not malloced or if it is already freed.
1207 if (allocated_size
== 0) {
1208 GET_STACK_TRACE_FATAL_HERE
;
1209 ReportSanitizerGetAllocatedSizeNotOwned(ptr
, &stack
);
1211 return allocated_size
;
1214 void __sanitizer_purge_allocator() {
1215 GET_STACK_TRACE_MALLOC
;
1216 instance
.Purge(&stack
);
1219 int __asan_update_allocation_context(void* addr
) {
1220 GET_STACK_TRACE_MALLOC
;
1221 return instance
.UpdateAllocationStack((uptr
)addr
, &stack
);
1224 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1225 // Provide default (no-op) implementation of malloc hooks.
1226 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook
,
1227 void *ptr
, uptr size
) {
1232 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook
, void *ptr
) {