[clang][extract-api] Emit "navigator" property of "name" in SymbolGraph
[llvm-project.git] / compiler-rt / lib / hwasan / hwasan_allocator.cpp
blob84e183f2384f2a09e76bd8ba31f5557212d79885
1 //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of HWAddressSanitizer.
11 // HWAddressSanitizer allocator.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "hwasan.h"
18 #include "hwasan_allocator.h"
19 #include "hwasan_checks.h"
20 #include "hwasan_mapping.h"
21 #include "hwasan_malloc_bisect.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_report.h"
25 namespace __hwasan {
27 static Allocator allocator;
28 static AllocatorCache fallback_allocator_cache;
29 static SpinMutex fallback_mutex;
30 static atomic_uint8_t hwasan_allocator_tagging_enabled;
32 static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
33 static constexpr tag_t kFallbackFreeTag = 0xBC;
35 enum RightAlignMode {
36 kRightAlignNever,
37 kRightAlignSometimes,
38 kRightAlignAlways
41 // Initialized in HwasanAllocatorInit, an never changed.
42 static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
44 bool HwasanChunkView::IsAllocated() const {
45 return metadata_ && metadata_->alloc_context_id &&
46 metadata_->get_requested_size();
49 // Aligns the 'addr' right to the granule boundary.
50 static uptr AlignRight(uptr addr, uptr requested_size) {
51 uptr tail_size = requested_size % kShadowAlignment;
52 if (!tail_size) return addr;
53 return addr + kShadowAlignment - tail_size;
56 uptr HwasanChunkView::Beg() const {
57 if (metadata_ && metadata_->right_aligned)
58 return AlignRight(block_, metadata_->get_requested_size());
59 return block_;
61 uptr HwasanChunkView::End() const {
62 return Beg() + UsedSize();
64 uptr HwasanChunkView::UsedSize() const {
65 return metadata_->get_requested_size();
67 u32 HwasanChunkView::GetAllocStackId() const {
68 return metadata_->alloc_context_id;
71 uptr HwasanChunkView::ActualSize() const {
72 return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
75 bool HwasanChunkView::FromSmallHeap() const {
76 return allocator.FromPrimary(reinterpret_cast<void *>(block_));
79 void GetAllocatorStats(AllocatorStatCounters s) {
80 allocator.GetStats(s);
83 uptr GetAliasRegionStart() {
84 #if defined(HWASAN_ALIASING_MODE)
85 constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
86 uptr AliasRegionStart =
87 __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
89 CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
90 __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
91 CHECK_EQ(
92 (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
93 __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
94 return AliasRegionStart;
95 #else
96 return 0;
97 #endif
100 void HwasanAllocatorInit() {
101 atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
102 !flags()->disable_allocator_tagging);
103 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
104 allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
105 GetAliasRegionStart());
106 for (uptr i = 0; i < sizeof(tail_magic); i++)
107 tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
110 void HwasanAllocatorLock() { allocator.ForceLock(); }
112 void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
114 void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
115 allocator.SwallowCache(cache);
118 static uptr TaggedSize(uptr size) {
119 if (!size) size = 1;
120 uptr new_size = RoundUpTo(size, kShadowAlignment);
121 CHECK_GE(new_size, size);
122 return new_size;
125 static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
126 bool zeroise) {
127 if (orig_size > kMaxAllowedMallocSize) {
128 if (AllocatorMayReturnNull()) {
129 Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
130 orig_size);
131 return nullptr;
133 ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
135 if (UNLIKELY(IsRssLimitExceeded())) {
136 if (AllocatorMayReturnNull())
137 return nullptr;
138 ReportRssLimitExceeded(stack);
141 alignment = Max(alignment, kShadowAlignment);
142 uptr size = TaggedSize(orig_size);
143 Thread *t = GetCurrentThread();
144 void *allocated;
145 if (t) {
146 allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
147 } else {
148 SpinMutexLock l(&fallback_mutex);
149 AllocatorCache *cache = &fallback_allocator_cache;
150 allocated = allocator.Allocate(cache, size, alignment);
152 if (UNLIKELY(!allocated)) {
153 SetAllocatorOutOfMemory();
154 if (AllocatorMayReturnNull())
155 return nullptr;
156 ReportOutOfMemory(size, stack);
158 Metadata *meta =
159 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
160 meta->set_requested_size(orig_size);
161 meta->alloc_context_id = StackDepotPut(*stack);
162 meta->right_aligned = false;
163 if (zeroise) {
164 internal_memset(allocated, 0, size);
165 } else if (flags()->max_malloc_fill_size > 0) {
166 uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
167 internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
169 if (size != orig_size) {
170 u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
171 uptr tail_length = size - orig_size;
172 internal_memcpy(tail, tail_magic, tail_length - 1);
173 // Short granule is excluded from magic tail, so we explicitly untag.
174 tail[tail_length - 1] = 0;
177 void *user_ptr = allocated;
178 // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
179 // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
180 // retag to 0.
181 if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
182 (flags()->tag_in_malloc || flags()->tag_in_free) &&
183 atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
184 if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
185 tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
186 uptr tag_size = orig_size ? orig_size : 1;
187 uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
188 user_ptr =
189 (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
190 if (full_granule_size != tag_size) {
191 u8 *short_granule =
192 reinterpret_cast<u8 *>(allocated) + full_granule_size;
193 TagMemoryAligned((uptr)short_granule, kShadowAlignment,
194 tag_size % kShadowAlignment);
195 short_granule[kShadowAlignment - 1] = tag;
197 } else {
198 user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
202 HWASAN_MALLOC_HOOK(user_ptr, size);
203 return user_ptr;
206 static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
207 CHECK(tagged_ptr);
208 uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
209 if (!InTaggableRegion(tagged_uptr))
210 return true;
211 tag_t mem_tag = *reinterpret_cast<tag_t *>(
212 MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
213 return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
216 static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
217 void *tagged_ptr) {
218 // This function can return true if halt_on_error is false.
219 if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
220 !PointerAndMemoryTagsMatch(tagged_ptr)) {
221 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
222 return true;
224 return false;
227 static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
228 CHECK(tagged_ptr);
229 HWASAN_FREE_HOOK(tagged_ptr);
231 bool in_taggable_region =
232 InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
233 void *untagged_ptr = in_taggable_region ? UntagPtr(tagged_ptr) : tagged_ptr;
235 if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
236 return;
238 void *aligned_ptr = reinterpret_cast<void *>(
239 RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
240 tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
241 Metadata *meta =
242 reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
243 if (!meta) {
244 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
245 return;
247 uptr orig_size = meta->get_requested_size();
248 u32 free_context_id = StackDepotPut(*stack);
249 u32 alloc_context_id = meta->alloc_context_id;
251 // Check tail magic.
252 uptr tagged_size = TaggedSize(orig_size);
253 if (flags()->free_checks_tail_magic && orig_size &&
254 tagged_size != orig_size) {
255 uptr tail_size = tagged_size - orig_size - 1;
256 CHECK_LT(tail_size, kShadowAlignment);
257 void *tail_beg = reinterpret_cast<void *>(
258 reinterpret_cast<uptr>(aligned_ptr) + orig_size);
259 tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
260 reinterpret_cast<uptr>(tail_beg) + tail_size));
261 if (tail_size &&
262 (internal_memcmp(tail_beg, tail_magic, tail_size) ||
263 (in_taggable_region && pointer_tag != short_granule_memtag)))
264 ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
265 orig_size, tail_magic);
268 meta->set_requested_size(0);
269 meta->alloc_context_id = 0;
270 // This memory will not be reused by anyone else, so we are free to keep it
271 // poisoned.
272 Thread *t = GetCurrentThread();
273 if (flags()->max_free_fill_size > 0) {
274 uptr fill_size =
275 Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
276 internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
278 if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
279 atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
280 // Always store full 8-bit tags on free to maximize UAF detection.
281 tag_t tag;
282 if (t) {
283 // Make sure we are not using a short granule tag as a poison tag. This
284 // would make us attempt to read the memory on a UaF.
285 // The tag can be zero if tagging is disabled on this thread.
286 do {
287 tag = t->GenerateRandomTag(/*num_bits=*/8);
288 } while (
289 UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
290 } else {
291 static_assert(kFallbackFreeTag >= kShadowAlignment,
292 "fallback tag must not be a short granule tag.");
293 tag = kFallbackFreeTag;
295 TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
296 tag);
298 if (t) {
299 allocator.Deallocate(t->allocator_cache(), aligned_ptr);
300 if (auto *ha = t->heap_allocations())
301 ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
302 free_context_id, static_cast<u32>(orig_size)});
303 } else {
304 SpinMutexLock l(&fallback_mutex);
305 AllocatorCache *cache = &fallback_allocator_cache;
306 allocator.Deallocate(cache, aligned_ptr);
310 static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
311 uptr new_size, uptr alignment) {
312 void *untagged_ptr_old =
313 InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr_old))
314 ? UntagPtr(tagged_ptr_old)
315 : tagged_ptr_old;
316 if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
317 return nullptr;
318 void *tagged_ptr_new =
319 HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
320 if (tagged_ptr_old && tagged_ptr_new) {
321 Metadata *meta =
322 reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
323 internal_memcpy(
324 UntagPtr(tagged_ptr_new), untagged_ptr_old,
325 Min(new_size, static_cast<uptr>(meta->get_requested_size())));
326 HwasanDeallocate(stack, tagged_ptr_old);
328 return tagged_ptr_new;
331 static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
332 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
333 if (AllocatorMayReturnNull())
334 return nullptr;
335 ReportCallocOverflow(nmemb, size, stack);
337 return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
340 HwasanChunkView FindHeapChunkByAddress(uptr address) {
341 if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
342 return HwasanChunkView();
343 void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
344 if (!block)
345 return HwasanChunkView();
346 Metadata *metadata =
347 reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
348 return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
351 static uptr AllocationSize(const void *tagged_ptr) {
352 const void *untagged_ptr = UntagPtr(tagged_ptr);
353 if (!untagged_ptr) return 0;
354 const void *beg = allocator.GetBlockBegin(untagged_ptr);
355 Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
356 if (b->right_aligned) {
357 if (beg != reinterpret_cast<void *>(RoundDownTo(
358 reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
359 return 0;
360 } else {
361 if (beg != untagged_ptr) return 0;
363 return b->get_requested_size();
366 void *hwasan_malloc(uptr size, StackTrace *stack) {
367 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
370 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
371 return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
374 void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
375 if (!ptr)
376 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
377 if (size == 0) {
378 HwasanDeallocate(stack, ptr);
379 return nullptr;
381 return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
384 void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
385 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
386 errno = errno_ENOMEM;
387 if (AllocatorMayReturnNull())
388 return nullptr;
389 ReportReallocArrayOverflow(nmemb, size, stack);
391 return hwasan_realloc(ptr, nmemb * size, stack);
394 void *hwasan_valloc(uptr size, StackTrace *stack) {
395 return SetErrnoOnNull(
396 HwasanAllocate(stack, size, GetPageSizeCached(), false));
399 void *hwasan_pvalloc(uptr size, StackTrace *stack) {
400 uptr PageSize = GetPageSizeCached();
401 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
402 errno = errno_ENOMEM;
403 if (AllocatorMayReturnNull())
404 return nullptr;
405 ReportPvallocOverflow(size, stack);
407 // pvalloc(0) should allocate one page.
408 size = size ? RoundUpTo(size, PageSize) : PageSize;
409 return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
412 void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
413 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
414 errno = errno_EINVAL;
415 if (AllocatorMayReturnNull())
416 return nullptr;
417 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
419 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
422 void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
423 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
424 errno = errno_EINVAL;
425 if (AllocatorMayReturnNull())
426 return nullptr;
427 ReportInvalidAllocationAlignment(alignment, stack);
429 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
432 int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
433 StackTrace *stack) {
434 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
435 if (AllocatorMayReturnNull())
436 return errno_EINVAL;
437 ReportInvalidPosixMemalignAlignment(alignment, stack);
439 void *ptr = HwasanAllocate(stack, size, alignment, false);
440 if (UNLIKELY(!ptr))
441 // OOM error is already taken care of by HwasanAllocate.
442 return errno_ENOMEM;
443 CHECK(IsAligned((uptr)ptr, alignment));
444 *memptr = ptr;
445 return 0;
448 void hwasan_free(void *ptr, StackTrace *stack) {
449 return HwasanDeallocate(stack, ptr);
452 } // namespace __hwasan
454 using namespace __hwasan;
456 void __hwasan_enable_allocator_tagging() {
457 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
460 void __hwasan_disable_allocator_tagging() {
461 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
464 uptr __sanitizer_get_current_allocated_bytes() {
465 uptr stats[AllocatorStatCount];
466 allocator.GetStats(stats);
467 return stats[AllocatorStatAllocated];
470 uptr __sanitizer_get_heap_size() {
471 uptr stats[AllocatorStatCount];
472 allocator.GetStats(stats);
473 return stats[AllocatorStatMapped];
476 uptr __sanitizer_get_free_bytes() { return 1; }
478 uptr __sanitizer_get_unmapped_bytes() { return 1; }
480 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
482 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
484 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }