1 //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of HWAddressSanitizer.
11 // HWAddressSanitizer allocator.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
18 #include "hwasan_allocator.h"
19 #include "hwasan_checks.h"
20 #include "hwasan_mapping.h"
21 #include "hwasan_malloc_bisect.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_report.h"
24 #include "lsan/lsan_common.h"
28 static Allocator allocator
;
29 static AllocatorCache fallback_allocator_cache
;
30 static SpinMutex fallback_mutex
;
31 static atomic_uint8_t hwasan_allocator_tagging_enabled
;
33 static constexpr tag_t kFallbackAllocTag
= 0xBB & kTagMask
;
34 static constexpr tag_t kFallbackFreeTag
= 0xBC;
37 // Either just allocated by underlying allocator, but AsanChunk is not yet
38 // ready, or almost returned to undelying allocator and AsanChunk is already
41 // The chunk is allocated and not yet freed.
46 // Initialized in HwasanAllocatorInit, an never changed.
47 static ALIGNED(16) u8 tail_magic
[kShadowAlignment
- 1];
48 static uptr max_malloc_size
;
50 bool HwasanChunkView::IsAllocated() const {
51 return metadata_
&& metadata_
->IsAllocated();
54 uptr
HwasanChunkView::Beg() const {
57 uptr
HwasanChunkView::End() const {
58 return Beg() + UsedSize();
60 uptr
HwasanChunkView::UsedSize() const {
61 return metadata_
->GetRequestedSize();
63 u32
HwasanChunkView::GetAllocStackId() const {
64 return metadata_
->GetAllocStackId();
67 uptr
HwasanChunkView::ActualSize() const {
68 return allocator
.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_
));
71 bool HwasanChunkView::FromSmallHeap() const {
72 return allocator
.FromPrimary(reinterpret_cast<void *>(block_
));
75 bool HwasanChunkView::AddrIsInside(uptr addr
) const {
76 return (addr
>= Beg()) && (addr
< Beg() + UsedSize());
79 inline void Metadata::SetAllocated(u32 stack
, u64 size
) {
80 Thread
*t
= GetCurrentThread();
81 u64 context
= t
? t
->unique_id() : kMainTid
;
84 requested_size_low
= size
& ((1ul << 32) - 1);
85 requested_size_high
= size
>> 32;
86 atomic_store(&alloc_context_id
, context
, memory_order_relaxed
);
87 atomic_store(&chunk_state
, CHUNK_ALLOCATED
, memory_order_release
);
90 inline void Metadata::SetUnallocated() {
91 atomic_store(&chunk_state
, CHUNK_INVALID
, memory_order_release
);
92 requested_size_low
= 0;
93 requested_size_high
= 0;
94 atomic_store(&alloc_context_id
, 0, memory_order_relaxed
);
97 inline bool Metadata::IsAllocated() const {
98 return atomic_load(&chunk_state
, memory_order_relaxed
) == CHUNK_ALLOCATED
;
101 inline u64
Metadata::GetRequestedSize() const {
102 return (static_cast<u64
>(requested_size_high
) << 32) + requested_size_low
;
105 inline u32
Metadata::GetAllocStackId() const {
106 return atomic_load(&alloc_context_id
, memory_order_relaxed
);
109 void GetAllocatorStats(AllocatorStatCounters s
) {
110 allocator
.GetStats(s
);
113 inline void Metadata::SetLsanTag(__lsan::ChunkTag tag
) {
117 inline __lsan::ChunkTag
Metadata::GetLsanTag() const {
118 return static_cast<__lsan::ChunkTag
>(lsan_tag
);
121 uptr
GetAliasRegionStart() {
122 #if defined(HWASAN_ALIASING_MODE)
123 constexpr uptr kAliasRegionOffset
= 1ULL << (kTaggableRegionCheckShift
- 1);
124 uptr AliasRegionStart
=
125 __hwasan_shadow_memory_dynamic_address
+ kAliasRegionOffset
;
127 CHECK_EQ(AliasRegionStart
>> kTaggableRegionCheckShift
,
128 __hwasan_shadow_memory_dynamic_address
>> kTaggableRegionCheckShift
);
130 (AliasRegionStart
+ kAliasRegionOffset
- 1) >> kTaggableRegionCheckShift
,
131 __hwasan_shadow_memory_dynamic_address
>> kTaggableRegionCheckShift
);
132 return AliasRegionStart
;
138 void HwasanAllocatorInit() {
139 atomic_store_relaxed(&hwasan_allocator_tagging_enabled
,
140 !flags()->disable_allocator_tagging
);
141 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
142 allocator
.Init(common_flags()->allocator_release_to_os_interval_ms
,
143 GetAliasRegionStart());
144 for (uptr i
= 0; i
< sizeof(tail_magic
); i
++)
145 tail_magic
[i
] = GetCurrentThread()->GenerateRandomTag();
146 if (common_flags()->max_allocation_size_mb
) {
147 max_malloc_size
= common_flags()->max_allocation_size_mb
<< 20;
148 max_malloc_size
= Min(max_malloc_size
, kMaxAllowedMallocSize
);
150 max_malloc_size
= kMaxAllowedMallocSize
;
154 void HwasanAllocatorLock() { allocator
.ForceLock(); }
156 void HwasanAllocatorUnlock() { allocator
.ForceUnlock(); }
158 void AllocatorSwallowThreadLocalCache(AllocatorCache
*cache
) {
159 allocator
.SwallowCache(cache
);
162 static uptr
TaggedSize(uptr size
) {
164 uptr new_size
= RoundUpTo(size
, kShadowAlignment
);
165 CHECK_GE(new_size
, size
);
169 static void *HwasanAllocate(StackTrace
*stack
, uptr orig_size
, uptr alignment
,
171 // Keep this consistent with LSAN and ASAN behavior.
172 if (UNLIKELY(orig_size
== 0))
174 if (UNLIKELY(orig_size
> max_malloc_size
)) {
175 if (AllocatorMayReturnNull()) {
176 Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
180 ReportAllocationSizeTooBig(orig_size
, max_malloc_size
, stack
);
182 if (UNLIKELY(IsRssLimitExceeded())) {
183 if (AllocatorMayReturnNull())
185 ReportRssLimitExceeded(stack
);
188 alignment
= Max(alignment
, kShadowAlignment
);
189 uptr size
= TaggedSize(orig_size
);
190 Thread
*t
= GetCurrentThread();
193 allocated
= allocator
.Allocate(t
->allocator_cache(), size
, alignment
);
195 SpinMutexLock
l(&fallback_mutex
);
196 AllocatorCache
*cache
= &fallback_allocator_cache
;
197 allocated
= allocator
.Allocate(cache
, size
, alignment
);
199 if (UNLIKELY(!allocated
)) {
200 SetAllocatorOutOfMemory();
201 if (AllocatorMayReturnNull())
203 ReportOutOfMemory(size
, stack
);
206 internal_memset(allocated
, 0, size
);
207 } else if (flags()->max_malloc_fill_size
> 0) {
208 uptr fill_size
= Min(size
, (uptr
)flags()->max_malloc_fill_size
);
209 internal_memset(allocated
, flags()->malloc_fill_byte
, fill_size
);
211 if (size
!= orig_size
) {
212 u8
*tail
= reinterpret_cast<u8
*>(allocated
) + orig_size
;
213 uptr tail_length
= size
- orig_size
;
214 internal_memcpy(tail
, tail_magic
, tail_length
- 1);
215 // Short granule is excluded from magic tail, so we explicitly untag.
216 tail
[tail_length
- 1] = 0;
219 void *user_ptr
= allocated
;
220 // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
221 // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
223 if (InTaggableRegion(reinterpret_cast<uptr
>(user_ptr
)) &&
224 (flags()->tag_in_malloc
|| flags()->tag_in_free
) &&
225 atomic_load_relaxed(&hwasan_allocator_tagging_enabled
)) {
226 if (flags()->tag_in_malloc
&& malloc_bisect(stack
, orig_size
)) {
227 tag_t tag
= t
? t
->GenerateRandomTag() : kFallbackAllocTag
;
228 uptr tag_size
= orig_size
? orig_size
: 1;
229 uptr full_granule_size
= RoundDownTo(tag_size
, kShadowAlignment
);
231 (void *)TagMemoryAligned((uptr
)user_ptr
, full_granule_size
, tag
);
232 if (full_granule_size
!= tag_size
) {
234 reinterpret_cast<u8
*>(allocated
) + full_granule_size
;
235 TagMemoryAligned((uptr
)short_granule
, kShadowAlignment
,
236 tag_size
% kShadowAlignment
);
237 short_granule
[kShadowAlignment
- 1] = tag
;
240 user_ptr
= (void *)TagMemoryAligned((uptr
)user_ptr
, size
, 0);
245 reinterpret_cast<Metadata
*>(allocator
.GetMetaData(allocated
));
246 #if CAN_SANITIZE_LEAKS
247 meta
->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
248 : __lsan::kDirectlyLeaked
);
250 meta
->SetAllocated(StackDepotPut(*stack
), orig_size
);
251 RunMallocHooks(user_ptr
, size
);
255 static bool PointerAndMemoryTagsMatch(void *tagged_ptr
) {
257 uptr tagged_uptr
= reinterpret_cast<uptr
>(tagged_ptr
);
258 if (!InTaggableRegion(tagged_uptr
))
260 tag_t mem_tag
= *reinterpret_cast<tag_t
*>(
261 MemToShadow(reinterpret_cast<uptr
>(UntagPtr(tagged_ptr
))));
262 return PossiblyShortTagMatches(mem_tag
, tagged_uptr
, 1);
265 static bool CheckInvalidFree(StackTrace
*stack
, void *untagged_ptr
,
267 // This function can return true if halt_on_error is false.
268 if (!MemIsApp(reinterpret_cast<uptr
>(untagged_ptr
)) ||
269 !PointerAndMemoryTagsMatch(tagged_ptr
)) {
270 ReportInvalidFree(stack
, reinterpret_cast<uptr
>(tagged_ptr
));
276 static void HwasanDeallocate(StackTrace
*stack
, void *tagged_ptr
) {
278 RunFreeHooks(tagged_ptr
);
280 bool in_taggable_region
=
281 InTaggableRegion(reinterpret_cast<uptr
>(tagged_ptr
));
282 void *untagged_ptr
= in_taggable_region
? UntagPtr(tagged_ptr
) : tagged_ptr
;
284 if (CheckInvalidFree(stack
, untagged_ptr
, tagged_ptr
))
287 void *aligned_ptr
= reinterpret_cast<void *>(
288 RoundDownTo(reinterpret_cast<uptr
>(untagged_ptr
), kShadowAlignment
));
289 tag_t pointer_tag
= GetTagFromPointer(reinterpret_cast<uptr
>(tagged_ptr
));
291 reinterpret_cast<Metadata
*>(allocator
.GetMetaData(aligned_ptr
));
293 ReportInvalidFree(stack
, reinterpret_cast<uptr
>(tagged_ptr
));
296 uptr orig_size
= meta
->GetRequestedSize();
297 u32 free_context_id
= StackDepotPut(*stack
);
298 u32 alloc_context_id
= meta
->GetAllocStackId();
301 uptr tagged_size
= TaggedSize(orig_size
);
302 if (flags()->free_checks_tail_magic
&& orig_size
&&
303 tagged_size
!= orig_size
) {
304 uptr tail_size
= tagged_size
- orig_size
- 1;
305 CHECK_LT(tail_size
, kShadowAlignment
);
306 void *tail_beg
= reinterpret_cast<void *>(
307 reinterpret_cast<uptr
>(aligned_ptr
) + orig_size
);
308 tag_t short_granule_memtag
= *(reinterpret_cast<tag_t
*>(
309 reinterpret_cast<uptr
>(tail_beg
) + tail_size
));
311 (internal_memcmp(tail_beg
, tail_magic
, tail_size
) ||
312 (in_taggable_region
&& pointer_tag
!= short_granule_memtag
)))
313 ReportTailOverwritten(stack
, reinterpret_cast<uptr
>(tagged_ptr
),
314 orig_size
, tail_magic
);
317 // TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
318 meta
->SetUnallocated();
319 // This memory will not be reused by anyone else, so we are free to keep it
321 Thread
*t
= GetCurrentThread();
322 if (flags()->max_free_fill_size
> 0) {
324 Min(TaggedSize(orig_size
), (uptr
)flags()->max_free_fill_size
);
325 internal_memset(aligned_ptr
, flags()->free_fill_byte
, fill_size
);
327 if (in_taggable_region
&& flags()->tag_in_free
&& malloc_bisect(stack
, 0) &&
328 atomic_load_relaxed(&hwasan_allocator_tagging_enabled
)) {
329 // Always store full 8-bit tags on free to maximize UAF detection.
332 // Make sure we are not using a short granule tag as a poison tag. This
333 // would make us attempt to read the memory on a UaF.
334 // The tag can be zero if tagging is disabled on this thread.
336 tag
= t
->GenerateRandomTag(/*num_bits=*/8);
338 UNLIKELY((tag
< kShadowAlignment
|| tag
== pointer_tag
) && tag
!= 0));
340 static_assert(kFallbackFreeTag
>= kShadowAlignment
,
341 "fallback tag must not be a short granule tag.");
342 tag
= kFallbackFreeTag
;
344 TagMemoryAligned(reinterpret_cast<uptr
>(aligned_ptr
), TaggedSize(orig_size
),
348 allocator
.Deallocate(t
->allocator_cache(), aligned_ptr
);
349 if (auto *ha
= t
->heap_allocations())
350 ha
->push({reinterpret_cast<uptr
>(tagged_ptr
), alloc_context_id
,
351 free_context_id
, static_cast<u32
>(orig_size
)});
353 SpinMutexLock
l(&fallback_mutex
);
354 AllocatorCache
*cache
= &fallback_allocator_cache
;
355 allocator
.Deallocate(cache
, aligned_ptr
);
359 static void *HwasanReallocate(StackTrace
*stack
, void *tagged_ptr_old
,
360 uptr new_size
, uptr alignment
) {
361 void *untagged_ptr_old
=
362 InTaggableRegion(reinterpret_cast<uptr
>(tagged_ptr_old
))
363 ? UntagPtr(tagged_ptr_old
)
365 if (CheckInvalidFree(stack
, untagged_ptr_old
, tagged_ptr_old
))
367 void *tagged_ptr_new
=
368 HwasanAllocate(stack
, new_size
, alignment
, false /*zeroise*/);
369 if (tagged_ptr_old
&& tagged_ptr_new
) {
371 reinterpret_cast<Metadata
*>(allocator
.GetMetaData(untagged_ptr_old
));
373 UntagPtr(tagged_ptr_new
), untagged_ptr_old
,
374 Min(new_size
, static_cast<uptr
>(meta
->GetRequestedSize())));
375 HwasanDeallocate(stack
, tagged_ptr_old
);
377 return tagged_ptr_new
;
380 static void *HwasanCalloc(StackTrace
*stack
, uptr nmemb
, uptr size
) {
381 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
382 if (AllocatorMayReturnNull())
384 ReportCallocOverflow(nmemb
, size
, stack
);
386 return HwasanAllocate(stack
, nmemb
* size
, sizeof(u64
), true);
389 HwasanChunkView
FindHeapChunkByAddress(uptr address
) {
390 if (!allocator
.PointerIsMine(reinterpret_cast<void *>(address
)))
391 return HwasanChunkView();
392 void *block
= allocator
.GetBlockBegin(reinterpret_cast<void*>(address
));
394 return HwasanChunkView();
396 reinterpret_cast<Metadata
*>(allocator
.GetMetaData(block
));
397 return HwasanChunkView(reinterpret_cast<uptr
>(block
), metadata
);
400 static uptr
AllocationSize(const void *tagged_ptr
) {
401 const void *untagged_ptr
= UntagPtr(tagged_ptr
);
402 if (!untagged_ptr
) return 0;
403 const void *beg
= allocator
.GetBlockBegin(untagged_ptr
);
404 Metadata
*b
= (Metadata
*)allocator
.GetMetaData(untagged_ptr
);
405 if (beg
!= untagged_ptr
) return 0;
406 return b
->GetRequestedSize();
409 void *hwasan_malloc(uptr size
, StackTrace
*stack
) {
410 return SetErrnoOnNull(HwasanAllocate(stack
, size
, sizeof(u64
), false));
413 void *hwasan_calloc(uptr nmemb
, uptr size
, StackTrace
*stack
) {
414 return SetErrnoOnNull(HwasanCalloc(stack
, nmemb
, size
));
417 void *hwasan_realloc(void *ptr
, uptr size
, StackTrace
*stack
) {
419 return SetErrnoOnNull(HwasanAllocate(stack
, size
, sizeof(u64
), false));
421 HwasanDeallocate(stack
, ptr
);
424 return SetErrnoOnNull(HwasanReallocate(stack
, ptr
, size
, sizeof(u64
)));
427 void *hwasan_reallocarray(void *ptr
, uptr nmemb
, uptr size
, StackTrace
*stack
) {
428 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
429 errno
= errno_ENOMEM
;
430 if (AllocatorMayReturnNull())
432 ReportReallocArrayOverflow(nmemb
, size
, stack
);
434 return hwasan_realloc(ptr
, nmemb
* size
, stack
);
437 void *hwasan_valloc(uptr size
, StackTrace
*stack
) {
438 return SetErrnoOnNull(
439 HwasanAllocate(stack
, size
, GetPageSizeCached(), false));
442 void *hwasan_pvalloc(uptr size
, StackTrace
*stack
) {
443 uptr PageSize
= GetPageSizeCached();
444 if (UNLIKELY(CheckForPvallocOverflow(size
, PageSize
))) {
445 errno
= errno_ENOMEM
;
446 if (AllocatorMayReturnNull())
448 ReportPvallocOverflow(size
, stack
);
450 // pvalloc(0) should allocate one page.
451 size
= size
? RoundUpTo(size
, PageSize
) : PageSize
;
452 return SetErrnoOnNull(HwasanAllocate(stack
, size
, PageSize
, false));
455 void *hwasan_aligned_alloc(uptr alignment
, uptr size
, StackTrace
*stack
) {
456 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment
, size
))) {
457 errno
= errno_EINVAL
;
458 if (AllocatorMayReturnNull())
460 ReportInvalidAlignedAllocAlignment(size
, alignment
, stack
);
462 return SetErrnoOnNull(HwasanAllocate(stack
, size
, alignment
, false));
465 void *hwasan_memalign(uptr alignment
, uptr size
, StackTrace
*stack
) {
466 if (UNLIKELY(!IsPowerOfTwo(alignment
))) {
467 errno
= errno_EINVAL
;
468 if (AllocatorMayReturnNull())
470 ReportInvalidAllocationAlignment(alignment
, stack
);
472 return SetErrnoOnNull(HwasanAllocate(stack
, size
, alignment
, false));
475 int hwasan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
477 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment
))) {
478 if (AllocatorMayReturnNull())
480 ReportInvalidPosixMemalignAlignment(alignment
, stack
);
482 void *ptr
= HwasanAllocate(stack
, size
, alignment
, false);
484 // OOM error is already taken care of by HwasanAllocate.
486 CHECK(IsAligned((uptr
)ptr
, alignment
));
491 void hwasan_free(void *ptr
, StackTrace
*stack
) {
492 return HwasanDeallocate(stack
, ptr
);
495 } // namespace __hwasan
497 // --- Implementation of LSan-specific functions --- {{{1
500 void LockAllocator() {
501 __hwasan::HwasanAllocatorLock();
504 void UnlockAllocator() {
505 __hwasan::HwasanAllocatorUnlock();
508 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
) {
509 *begin
= (uptr
)&__hwasan::allocator
;
510 *end
= *begin
+ sizeof(__hwasan::allocator
);
513 uptr
PointsIntoChunk(void *p
) {
514 p
= __hwasan::InTaggableRegion(reinterpret_cast<uptr
>(p
)) ? UntagPtr(p
) : p
;
515 uptr addr
= reinterpret_cast<uptr
>(p
);
517 reinterpret_cast<uptr
>(__hwasan::allocator
.GetBlockBeginFastLocked(p
));
520 __hwasan::Metadata
*metadata
= reinterpret_cast<__hwasan::Metadata
*>(
521 __hwasan::allocator
.GetMetaData(reinterpret_cast<void *>(chunk
)));
522 if (!metadata
|| !metadata
->IsAllocated())
524 if (addr
< chunk
+ metadata
->GetRequestedSize())
526 if (IsSpecialCaseOfOperatorNew0(chunk
, metadata
->GetRequestedSize(), addr
))
531 uptr
GetUserBegin(uptr chunk
) {
532 if (__hwasan::InTaggableRegion(chunk
))
533 CHECK_EQ(UntagAddr(chunk
), chunk
);
534 void *block
= __hwasan::allocator
.GetBlockBeginFastLocked(
535 reinterpret_cast<void *>(chunk
));
538 __hwasan::Metadata
*metadata
= reinterpret_cast<__hwasan::Metadata
*>(
539 __hwasan::allocator
.GetMetaData(block
));
540 if (!metadata
|| !metadata
->IsAllocated())
543 return reinterpret_cast<uptr
>(block
);
546 uptr
GetUserAddr(uptr chunk
) {
547 tag_t mem_tag
= *(tag_t
*)__hwasan::MemToShadow(chunk
);
548 if (!__hwasan::InTaggableRegion(chunk
))
550 return AddTagToPointer(chunk
, mem_tag
);
553 LsanMetadata::LsanMetadata(uptr chunk
) {
554 if (__hwasan::InTaggableRegion(chunk
))
555 CHECK_EQ(UntagAddr(chunk
), chunk
);
557 chunk
? __hwasan::allocator
.GetMetaData(reinterpret_cast<void *>(chunk
))
561 bool LsanMetadata::allocated() const {
564 __hwasan::Metadata
*m
= reinterpret_cast<__hwasan::Metadata
*>(metadata_
);
565 return m
->IsAllocated();
568 ChunkTag
LsanMetadata::tag() const {
569 __hwasan::Metadata
*m
= reinterpret_cast<__hwasan::Metadata
*>(metadata_
);
570 return m
->GetLsanTag();
573 void LsanMetadata::set_tag(ChunkTag value
) {
574 __hwasan::Metadata
*m
= reinterpret_cast<__hwasan::Metadata
*>(metadata_
);
575 m
->SetLsanTag(value
);
578 uptr
LsanMetadata::requested_size() const {
579 __hwasan::Metadata
*m
= reinterpret_cast<__hwasan::Metadata
*>(metadata_
);
580 return m
->GetRequestedSize();
583 u32
LsanMetadata::stack_trace_id() const {
584 __hwasan::Metadata
*m
= reinterpret_cast<__hwasan::Metadata
*>(metadata_
);
585 return m
->GetAllocStackId();
588 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
589 __hwasan::allocator
.ForEachChunk(callback
, arg
);
592 IgnoreObjectResult
IgnoreObjectLocked(const void *p
) {
593 p
= __hwasan::InTaggableRegion(reinterpret_cast<uptr
>(p
)) ? UntagPtr(p
) : p
;
594 uptr addr
= reinterpret_cast<uptr
>(p
);
596 reinterpret_cast<uptr
>(__hwasan::allocator
.GetBlockBeginFastLocked(p
));
598 return kIgnoreObjectInvalid
;
599 __hwasan::Metadata
*metadata
= reinterpret_cast<__hwasan::Metadata
*>(
600 __hwasan::allocator
.GetMetaData(reinterpret_cast<void *>(chunk
)));
601 if (!metadata
|| !metadata
->IsAllocated())
602 return kIgnoreObjectInvalid
;
603 if (addr
>= chunk
+ metadata
->GetRequestedSize())
604 return kIgnoreObjectInvalid
;
605 if (metadata
->GetLsanTag() == kIgnored
)
606 return kIgnoreObjectAlreadyIgnored
;
608 metadata
->SetLsanTag(kIgnored
);
609 return kIgnoreObjectSuccess
;
612 } // namespace __lsan
614 using namespace __hwasan
;
616 void __hwasan_enable_allocator_tagging() {
617 atomic_store_relaxed(&hwasan_allocator_tagging_enabled
, 1);
620 void __hwasan_disable_allocator_tagging() {
621 atomic_store_relaxed(&hwasan_allocator_tagging_enabled
, 0);
624 uptr
__sanitizer_get_current_allocated_bytes() {
625 uptr stats
[AllocatorStatCount
];
626 allocator
.GetStats(stats
);
627 return stats
[AllocatorStatAllocated
];
630 uptr
__sanitizer_get_heap_size() {
631 uptr stats
[AllocatorStatCount
];
632 allocator
.GetStats(stats
);
633 return stats
[AllocatorStatMapped
];
636 uptr
__sanitizer_get_free_bytes() { return 1; }
638 uptr
__sanitizer_get_unmapped_bytes() { return 1; }
640 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) { return size
; }
642 int __sanitizer_get_ownership(const void *p
) { return AllocationSize(p
) != 0; }
644 uptr
__sanitizer_get_allocated_size(const void *p
) { return AllocationSize(p
); }