1 //===-- hwasan_report.cpp -------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of HWAddressSanitizer.
12 //===----------------------------------------------------------------------===//
14 #include "hwasan_report.h"
19 #include "hwasan_allocator.h"
20 #include "hwasan_globals.h"
21 #include "hwasan_mapping.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_thread_list.h"
24 #include "sanitizer_common/sanitizer_allocator_internal.h"
25 #include "sanitizer_common/sanitizer_array_ref.h"
26 #include "sanitizer_common/sanitizer_common.h"
27 #include "sanitizer_common/sanitizer_flags.h"
28 #include "sanitizer_common/sanitizer_internal_defs.h"
29 #include "sanitizer_common/sanitizer_mutex.h"
30 #include "sanitizer_common/sanitizer_placement_new.h"
31 #include "sanitizer_common/sanitizer_report_decorator.h"
32 #include "sanitizer_common/sanitizer_stackdepot.h"
33 #include "sanitizer_common/sanitizer_stacktrace_printer.h"
34 #include "sanitizer_common/sanitizer_symbolizer.h"
36 using namespace __sanitizer
;
42 explicit ScopedReport(bool fatal
) : fatal(fatal
) {
43 Lock
lock(&error_message_lock_
);
44 error_message_ptr_
= &error_message_
;
45 ++hwasan_report_count
;
49 void (*report_cb
)(const char *);
51 Lock
lock(&error_message_lock_
);
52 report_cb
= error_report_callback_
;
53 error_message_ptr_
= nullptr;
56 report_cb(error_message_
.data());
58 SetAbortMessage(error_message_
.data());
59 if (common_flags()->print_module_map
>= 2 ||
60 (fatal
&& common_flags()->print_module_map
))
66 static void MaybeAppendToErrorMessage(const char *msg
) {
67 Lock
lock(&error_message_lock_
);
68 if (!error_message_ptr_
)
70 error_message_ptr_
->Append(msg
);
73 static void SetErrorReportCallback(void (*callback
)(const char *)) {
74 Lock
lock(&error_message_lock_
);
75 error_report_callback_
= callback
;
79 InternalScopedString error_message_
;
82 static Mutex error_message_lock_
;
83 static InternalScopedString
*error_message_ptr_
84 SANITIZER_GUARDED_BY(error_message_lock_
);
85 static void (*error_report_callback_
)(const char *);
88 Mutex
ScopedReport::error_message_lock_
;
89 InternalScopedString
*ScopedReport::error_message_ptr_
;
90 void (*ScopedReport::error_report_callback_
)(const char *);
92 // If there is an active ScopedReport, append to its error message.
93 void AppendToErrorMessageBuffer(const char *buffer
) {
94 ScopedReport::MaybeAppendToErrorMessage(buffer
);
97 static StackTrace
GetStackTraceFromId(u32 id
) {
99 StackTrace res
= StackDepotGet(id
);
104 static void MaybePrintAndroidHelpUrl() {
105 #if SANITIZER_ANDROID
107 "Learn more about HWASan reports: "
108 "https://source.android.com/docs/security/test/memory-safety/"
114 // A RAII object that holds a copy of the current thread stack ring buffer.
115 // The actual stack buffer may change while we are iterating over it (for
116 // example, Printf may call syslog() which can itself be built with hwasan).
117 class SavedStackAllocations
{
119 SavedStackAllocations() = default;
121 explicit SavedStackAllocations(Thread
*t
) { CopyFrom(t
); }
123 void CopyFrom(Thread
*t
) {
124 StackAllocationsRingBuffer
*rb
= t
->stack_allocations();
125 uptr size
= rb
->size() * sizeof(uptr
);
127 MmapAlignedOrDieOnFatalError(size
, size
* 2, "saved stack allocations");
128 new (&rb_
) StackAllocationsRingBuffer(*rb
, storage
);
129 thread_id_
= t
->unique_id();
132 ~SavedStackAllocations() {
134 StackAllocationsRingBuffer
*rb
= get();
135 UnmapOrDie(rb
->StartOfStorage(), rb
->size() * sizeof(uptr
));
139 const StackAllocationsRingBuffer
*get() const {
140 return (const StackAllocationsRingBuffer
*)&rb_
;
143 StackAllocationsRingBuffer
*get() {
144 return (StackAllocationsRingBuffer
*)&rb_
;
147 u32
thread_id() const { return thread_id_
; }
154 class Decorator
: public __sanitizer::SanitizerCommonDecorator
{
156 Decorator() : SanitizerCommonDecorator() { }
157 const char *Access() { return Blue(); }
158 const char *Allocation() const { return Magenta(); }
159 const char *Origin() const { return Magenta(); }
160 const char *Name() const { return Green(); }
161 const char *Location() { return Green(); }
162 const char *Thread() { return Green(); }
166 static bool FindHeapAllocation(HeapAllocationsRingBuffer
*rb
, uptr tagged_addr
,
167 HeapAllocationRecord
*har
, uptr
*ring_index
,
168 uptr
*num_matching_addrs
,
169 uptr
*num_matching_addrs_4b
) {
170 if (!rb
) return false;
172 *num_matching_addrs
= 0;
173 *num_matching_addrs_4b
= 0;
174 for (uptr i
= 0, size
= rb
->size(); i
< size
; i
++) {
176 if (h
.tagged_addr
<= tagged_addr
&&
177 h
.tagged_addr
+ h
.requested_size
> tagged_addr
) {
183 // Measure the number of heap ring buffer entries that would have matched
184 // if we had only one entry per address (e.g. if the ring buffer data was
185 // stored at the address itself). This will help us tune the allocator
186 // implementation for MTE.
187 if (UntagAddr(h
.tagged_addr
) <= UntagAddr(tagged_addr
) &&
188 UntagAddr(h
.tagged_addr
) + h
.requested_size
> UntagAddr(tagged_addr
)) {
189 ++*num_matching_addrs
;
192 // Measure the number of heap ring buffer entries that would have matched
193 // if we only had 4 tag bits, which is the case for MTE.
194 auto untag_4b
= [](uptr p
) {
195 return p
& ((1ULL << 60) - 1);
197 if (untag_4b(h
.tagged_addr
) <= untag_4b(tagged_addr
) &&
198 untag_4b(h
.tagged_addr
) + h
.requested_size
> untag_4b(tagged_addr
)) {
199 ++*num_matching_addrs_4b
;
205 static void PrintStackAllocations(const StackAllocationsRingBuffer
*sa
,
206 tag_t addr_tag
, uptr untagged_addr
) {
207 uptr frames
= Min((uptr
)flags()->stack_history_size
, sa
->size());
208 bool found_local
= false;
209 InternalScopedString location
;
210 for (uptr i
= 0; i
< frames
; i
++) {
211 const uptr
*record_addr
= &(*sa
)[i
];
212 uptr record
= *record_addr
;
216 reinterpret_cast<uptr
>(record_addr
) >> kRecordAddrBaseTagShift
;
217 const uptr fp
= (record
>> kRecordFPShift
) << kRecordFPLShift
;
218 CHECK_LT(fp
, kRecordFPModulus
);
219 uptr pc_mask
= (1ULL << kRecordFPShift
) - 1;
220 uptr pc
= record
& pc_mask
;
222 if (!Symbolizer::GetOrInit()->SymbolizeFrame(pc
, &frame
))
224 for (LocalInfo
&local
: frame
.locals
) {
225 if (!local
.has_frame_offset
|| !local
.has_size
|| !local
.has_tag_offset
)
227 if (!(local
.name
&& internal_strlen(local
.name
)) &&
228 !(local
.function_name
&& internal_strlen(local
.function_name
)) &&
229 !(local
.decl_file
&& internal_strlen(local
.decl_file
)))
231 tag_t obj_tag
= base_tag
^ local
.tag_offset
;
232 if (obj_tag
!= addr_tag
)
235 // We only store bits 4-19 of FP (bits 0-3 are guaranteed to be zero).
236 // So we know only `FP % kRecordFPModulus`, and we can only calculate
237 // `local_beg % kRecordFPModulus`.
238 // Out of all possible `local_beg` we will only consider 2 candidates
239 // nearest to the `untagged_addr`.
240 uptr local_beg_mod
= (fp
+ local
.frame_offset
) % kRecordFPModulus
;
241 // Pick `local_beg` in the same 1 MiB block as `untagged_addr`.
243 RoundDownTo(untagged_addr
, kRecordFPModulus
) + local_beg_mod
;
244 // Pick the largest `local_beg <= untagged_addr`. It's either the current
245 // one or the one before.
246 if (local_beg
> untagged_addr
)
247 local_beg
-= kRecordFPModulus
;
251 const char *cause
= nullptr;
254 // Try two 1 MiB blocks options and pick nearest one.
255 for (uptr i
= 0; i
< 2; ++i
, local_beg
+= kRecordFPModulus
) {
256 uptr local_end
= local_beg
+ local
.size
;
257 if (local_beg
> local_end
)
258 continue; // This is a wraparound.
259 if (local_beg
<= untagged_addr
&& untagged_addr
< local_end
) {
260 offset
= untagged_addr
- local_beg
;
262 cause
= "use-after-scope";
263 best_beg
= local_beg
;
264 break; // This is as close at it can be.
267 if (untagged_addr
>= local_end
) {
268 uptr new_offset
= untagged_addr
- local_end
;
269 if (new_offset
< offset
) {
272 cause
= "stack-buffer-overflow";
273 best_beg
= local_beg
;
276 uptr new_offset
= local_beg
- untagged_addr
;
277 if (new_offset
< offset
) {
280 cause
= "stack-buffer-overflow";
281 best_beg
= local_beg
;
286 // To fail the `untagged_addr` must be near nullptr, which is impossible
287 // with Linux user space memory layout.
292 Printf("\nPotentially referenced stack objects:\n");
297 Printf("%s", d
.Error());
298 Printf("Cause: %s\n", cause
);
299 Printf("%s", d
.Default());
300 Printf("%s", d
.Location());
301 StackTracePrinter::GetOrInit()->RenderSourceLocation(
302 &location
, local
.decl_file
, local
.decl_line
, /* column= */ 0,
303 common_flags()->symbolize_vs_style
,
304 common_flags()->strip_path_prefix
);
306 "%p is located %zd bytes %s a %zd-byte local variable %s "
309 untagged_addr
, offset
, whence
, local
.size
, local
.name
, best_beg
,
310 best_beg
+ local
.size
, local
.function_name
, location
.data());
312 Printf("%s\n", d
.Default());
320 // We didn't find any locals. Most likely we don't have symbols, so dump
321 // the information that we have for offline analysis.
322 InternalScopedString frame_desc
;
323 Printf("Previously allocated frames:\n");
324 for (uptr i
= 0; i
< frames
; i
++) {
325 const uptr
*record_addr
= &(*sa
)[i
];
326 uptr record
= *record_addr
;
329 uptr pc_mask
= (1ULL << 48) - 1;
330 uptr pc
= record
& pc_mask
;
331 frame_desc
.AppendF(" record_addr:%p record:0x%zx",
332 reinterpret_cast<const void *>(record_addr
), record
);
333 SymbolizedStackHolder
symbolized_stack(
334 Symbolizer::GetOrInit()->SymbolizePC(pc
));
335 const SymbolizedStack
*frame
= symbolized_stack
.get();
337 StackTracePrinter::GetOrInit()->RenderFrame(
338 &frame_desc
, " %F %L", 0, frame
->info
.address
, &frame
->info
,
339 common_flags()->symbolize_vs_style
,
340 common_flags()->strip_path_prefix
);
342 Printf("%s\n", frame_desc
.data());
347 // Returns true if tag == *tag_ptr, reading tags from short granules if
348 // necessary. This may return a false positive if tags 1-15 are used as a
349 // regular tag rather than a short granule marker.
350 static bool TagsEqual(tag_t tag
, tag_t
*tag_ptr
) {
353 if (*tag_ptr
== 0 || *tag_ptr
> kShadowAlignment
- 1)
355 uptr mem
= ShadowToMem(reinterpret_cast<uptr
>(tag_ptr
));
356 tag_t inline_tag
= *reinterpret_cast<tag_t
*>(mem
+ kShadowAlignment
- 1);
357 return tag
== inline_tag
;
360 // HWASan globals store the size of the global in the descriptor. In cases where
361 // we don't have a binary with symbols, we can't grab the size of the global
362 // from the debug info - but we might be able to retrieve it from the
363 // descriptor. Returns zero if the lookup failed.
364 static uptr
GetGlobalSizeFromDescriptor(uptr ptr
) {
365 // Find the ELF object that this global resides in.
367 if (dladdr(reinterpret_cast<void *>(ptr
), &info
) == 0)
369 auto *ehdr
= reinterpret_cast<const ElfW(Ehdr
) *>(info
.dli_fbase
);
370 auto *phdr_begin
= reinterpret_cast<const ElfW(Phdr
) *>(
371 reinterpret_cast<const u8
*>(ehdr
) + ehdr
->e_phoff
);
373 // Get the load bias. This is normally the same as the dli_fbase address on
374 // position-independent code, but can be different on non-PIE executables,
375 // binaries using LLD's partitioning feature, or binaries compiled with a
377 ElfW(Addr
) load_bias
= 0;
378 for (const auto &phdr
:
379 ArrayRef
<const ElfW(Phdr
)>(phdr_begin
, phdr_begin
+ ehdr
->e_phnum
)) {
380 if (phdr
.p_type
!= PT_LOAD
|| phdr
.p_offset
!= 0)
382 load_bias
= reinterpret_cast<ElfW(Addr
)>(ehdr
) - phdr
.p_vaddr
;
386 // Walk all globals in this ELF object, looking for the one we're interested
387 // in. Once we find it, we can stop iterating and return the size of the
388 // global we're interested in.
389 for (const hwasan_global
&global
:
390 HwasanGlobalsFor(load_bias
, phdr_begin
, ehdr
->e_phnum
))
391 if (global
.addr() <= ptr
&& ptr
< global
.addr() + global
.size())
392 return global
.size();
397 void ReportStats() {}
399 constexpr uptr kDumpWidth
= 16;
400 constexpr uptr kShadowLines
= 17;
401 constexpr uptr kShadowDumpSize
= kShadowLines
* kDumpWidth
;
403 constexpr uptr kShortLines
= 3;
404 constexpr uptr kShortDumpSize
= kShortLines
* kDumpWidth
;
405 constexpr uptr kShortDumpOffset
= (kShadowLines
- kShortLines
) / 2 * kDumpWidth
;
407 static uptr
GetPrintTagStart(uptr addr
) {
408 addr
= MemToShadow(addr
);
409 addr
= RoundDownTo(addr
, kDumpWidth
);
410 addr
-= kDumpWidth
* (kShadowLines
/ 2);
414 template <typename PrintTag
>
415 static void PrintTagInfoAroundAddr(uptr addr
, uptr num_rows
,
416 InternalScopedString
&s
,
417 PrintTag print_tag
) {
418 uptr center_row_beg
= RoundDownTo(addr
, kDumpWidth
);
419 uptr beg_row
= center_row_beg
- kDumpWidth
* (num_rows
/ 2);
420 uptr end_row
= center_row_beg
+ kDumpWidth
* ((num_rows
+ 1) / 2);
421 for (uptr row
= beg_row
; row
< end_row
; row
+= kDumpWidth
) {
422 s
.Append(row
== center_row_beg
? "=>" : " ");
423 s
.AppendF("%p:", (void *)ShadowToMem(row
));
424 for (uptr i
= 0; i
< kDumpWidth
; i
++) {
425 s
.Append(row
+ i
== addr
? "[" : " ");
426 print_tag(s
, row
+ i
);
427 s
.Append(row
+ i
== addr
? "]" : " ");
433 template <typename GetTag
, typename GetShortTag
>
434 static void PrintTagsAroundAddr(uptr addr
, GetTag get_tag
,
435 GetShortTag get_short_tag
) {
436 InternalScopedString s
;
437 addr
= MemToShadow(addr
);
439 "\nMemory tags around the buggy address (one tag corresponds to %zd "
442 PrintTagInfoAroundAddr(addr
, kShadowLines
, s
,
443 [&](InternalScopedString
&s
, uptr tag_addr
) {
444 tag_t tag
= get_tag(tag_addr
);
445 s
.AppendF("%02x", tag
);
449 "Tags for short granules around the buggy address (one tag corresponds "
452 PrintTagInfoAroundAddr(addr
, kShortLines
, s
,
453 [&](InternalScopedString
&s
, uptr tag_addr
) {
454 tag_t tag
= get_tag(tag_addr
);
455 if (tag
>= 1 && tag
<= kShadowAlignment
) {
456 tag_t short_tag
= get_short_tag(tag_addr
);
457 s
.AppendF("%02x", short_tag
);
464 "https://clang.llvm.org/docs/"
465 "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
466 "description of short granule tags\n");
467 Printf("%s", s
.data());
470 static uptr
GetTopPc(const StackTrace
*stack
) {
471 return stack
->size
? StackTrace::GetPreviousInstructionPc(stack
->trace
[0])
478 BaseReport(StackTrace
*stack
, bool fatal
, uptr tagged_addr
, uptr access_size
)
479 : scoped_report(fatal
),
481 tagged_addr(tagged_addr
),
482 access_size(access_size
),
483 untagged_addr(UntagAddr(tagged_addr
)),
484 ptr_tag(GetTagFromPointer(tagged_addr
)),
485 mismatch_offset(FindMismatchOffset()),
486 heap(CopyHeapChunk()),
487 allocations(CopyAllocations()),
488 candidate(FindBufferOverflowCandidate()),
489 shadow(CopyShadow()) {}
492 struct OverflowCandidate
{
493 uptr untagged_addr
= 0;
495 bool is_close
= false;
502 bool is_allocated
= false;
506 struct HeapAllocation
{
507 HeapAllocationRecord har
= {};
509 uptr num_matching_addrs
= 0;
510 uptr num_matching_addrs_4b
= 0;
511 u32 free_thread_id
= 0;
515 ArrayRef
<SavedStackAllocations
> stack
;
516 ArrayRef
<HeapAllocation
> heap
;
523 bool from_small_heap
= false;
524 bool is_allocated
= false;
529 tag_t tags
[kShadowDumpSize
] = {};
530 tag_t short_tags
[kShortDumpSize
] = {};
533 sptr
FindMismatchOffset() const;
534 Shadow
CopyShadow() const;
535 tag_t
GetTagCopy(uptr addr
) const;
536 tag_t
GetShortTagCopy(uptr addr
) const;
537 HeapChunk
CopyHeapChunk() const;
538 Allocations
CopyAllocations();
539 OverflowCandidate
FindBufferOverflowCandidate() const;
540 void PrintAddressDescription() const;
541 void PrintHeapOrGlobalCandidate() const;
542 void PrintTags(uptr addr
) const;
544 SavedStackAllocations stack_allocations_storage
[16];
545 HeapAllocation heap_allocations_storage
[256];
547 const ScopedReport scoped_report
;
548 const StackTrace
*stack
= nullptr;
549 const uptr tagged_addr
= 0;
550 const uptr access_size
= 0;
551 const uptr untagged_addr
= 0;
552 const tag_t ptr_tag
= 0;
553 const sptr mismatch_offset
= 0;
555 const HeapChunk heap
;
556 const Allocations allocations
;
557 const OverflowCandidate candidate
;
562 sptr
BaseReport::FindMismatchOffset() const {
566 __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr
), access_size
);
568 CHECK_LT(offset
, static_cast<sptr
>(access_size
));
570 reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
+ offset
));
571 tag_t mem_tag
= *tag_ptr
;
573 if (mem_tag
&& mem_tag
< kShadowAlignment
) {
574 tag_t
*granule_ptr
= reinterpret_cast<tag_t
*>((untagged_addr
+ offset
) &
575 ~(kShadowAlignment
- 1));
576 // If offset is 0, (untagged_addr + offset) is not aligned to granules.
577 // This is the offset of the leftmost accessed byte within the bad granule.
578 u8 in_granule_offset
= (untagged_addr
+ offset
) & (kShadowAlignment
- 1);
579 tag_t short_tag
= granule_ptr
[kShadowAlignment
- 1];
580 // The first mismatch was a short granule that matched the ptr_tag.
581 if (short_tag
== ptr_tag
) {
582 // If the access starts after the end of the short granule, then the first
583 // bad byte is the first byte of the access; otherwise it is the first
584 // byte past the end of the short granule
585 if (mem_tag
> in_granule_offset
) {
586 offset
+= mem_tag
- in_granule_offset
;
593 BaseReport::Shadow
BaseReport::CopyShadow() const {
595 if (!MemIsApp(untagged_addr
))
598 result
.addr
= GetPrintTagStart(untagged_addr
+ mismatch_offset
);
599 uptr tag_addr
= result
.addr
;
600 uptr short_end
= kShortDumpOffset
+ ARRAY_SIZE(shadow
.short_tags
);
601 for (uptr i
= 0; i
< ARRAY_SIZE(result
.tags
); ++i
, ++tag_addr
) {
602 if (!MemIsShadow(tag_addr
))
604 result
.tags
[i
] = *reinterpret_cast<tag_t
*>(tag_addr
);
605 if (i
< kShortDumpOffset
|| i
>= short_end
)
607 uptr granule_addr
= ShadowToMem(tag_addr
);
608 if (1 <= result
.tags
[i
] && result
.tags
[i
] <= kShadowAlignment
&&
609 IsAccessibleMemoryRange(granule_addr
, kShadowAlignment
)) {
610 result
.short_tags
[i
- kShortDumpOffset
] =
611 *reinterpret_cast<tag_t
*>(granule_addr
+ kShadowAlignment
- 1);
617 tag_t
BaseReport::GetTagCopy(uptr addr
) const {
618 CHECK_GE(addr
, shadow
.addr
);
619 uptr idx
= addr
- shadow
.addr
;
620 CHECK_LT(idx
, ARRAY_SIZE(shadow
.tags
));
621 return shadow
.tags
[idx
];
624 tag_t
BaseReport::GetShortTagCopy(uptr addr
) const {
625 CHECK_GE(addr
, shadow
.addr
+ kShortDumpOffset
);
626 uptr idx
= addr
- shadow
.addr
- kShortDumpOffset
;
627 CHECK_LT(idx
, ARRAY_SIZE(shadow
.short_tags
));
628 return shadow
.short_tags
[idx
];
631 BaseReport::HeapChunk
BaseReport::CopyHeapChunk() const {
632 HeapChunk result
= {};
633 if (MemIsShadow(untagged_addr
))
635 HwasanChunkView chunk
= FindHeapChunkByAddress(untagged_addr
);
636 result
.begin
= chunk
.Beg();
638 result
.size
= chunk
.ActualSize();
639 result
.from_small_heap
= chunk
.FromSmallHeap();
640 result
.is_allocated
= chunk
.IsAllocated();
641 result
.stack_id
= chunk
.GetAllocStackId();
646 BaseReport::Allocations
BaseReport::CopyAllocations() {
647 if (MemIsShadow(untagged_addr
))
649 uptr stack_allocations_count
= 0;
650 uptr heap_allocations_count
= 0;
651 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) {
652 if (stack_allocations_count
< ARRAY_SIZE(stack_allocations_storage
) &&
653 t
->AddrIsInStack(untagged_addr
)) {
654 stack_allocations_storage
[stack_allocations_count
++].CopyFrom(t
);
657 if (heap_allocations_count
< ARRAY_SIZE(heap_allocations_storage
)) {
658 // Scan all threads' ring buffers to find if it's a heap-use-after-free.
659 HeapAllocationRecord har
;
660 uptr ring_index
, num_matching_addrs
, num_matching_addrs_4b
;
661 if (FindHeapAllocation(t
->heap_allocations(), tagged_addr
, &har
,
662 &ring_index
, &num_matching_addrs
,
663 &num_matching_addrs_4b
)) {
664 auto &ha
= heap_allocations_storage
[heap_allocations_count
++];
666 ha
.ring_index
= ring_index
;
667 ha
.num_matching_addrs
= num_matching_addrs
;
668 ha
.num_matching_addrs_4b
= num_matching_addrs_4b
;
669 ha
.free_thread_id
= t
->unique_id();
674 return {{stack_allocations_storage
, stack_allocations_count
},
675 {heap_allocations_storage
, heap_allocations_count
}};
678 BaseReport::OverflowCandidate
BaseReport::FindBufferOverflowCandidate() const {
679 OverflowCandidate result
= {};
680 if (MemIsShadow(untagged_addr
))
682 // Check if this looks like a heap buffer overflow by scanning
683 // the shadow left and right and looking for the first adjacent
684 // object with a different memory tag. If that tag matches ptr_tag,
685 // check the allocator if it has a live chunk there.
686 tag_t
*tag_ptr
= reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
));
687 tag_t
*candidate_tag_ptr
= nullptr, *left
= tag_ptr
, *right
= tag_ptr
;
688 uptr candidate_distance
= 0;
689 for (; candidate_distance
< 1000; candidate_distance
++) {
690 if (MemIsShadow(reinterpret_cast<uptr
>(left
)) && TagsEqual(ptr_tag
, left
)) {
691 candidate_tag_ptr
= left
;
695 if (MemIsShadow(reinterpret_cast<uptr
>(right
)) &&
696 TagsEqual(ptr_tag
, right
)) {
697 candidate_tag_ptr
= right
;
703 constexpr auto kCloseCandidateDistance
= 1;
704 result
.is_close
= candidate_distance
<= kCloseCandidateDistance
;
706 result
.after
= candidate_tag_ptr
== left
;
707 result
.untagged_addr
= ShadowToMem(reinterpret_cast<uptr
>(candidate_tag_ptr
));
708 HwasanChunkView chunk
= FindHeapChunkByAddress(result
.untagged_addr
);
709 if (chunk
.IsAllocated()) {
710 result
.heap
.is_allocated
= true;
711 result
.heap
.begin
= chunk
.Beg();
712 result
.heap
.end
= chunk
.End();
713 result
.heap
.thread_id
= chunk
.GetAllocThreadId();
714 result
.heap
.stack_id
= chunk
.GetAllocStackId();
719 void BaseReport::PrintHeapOrGlobalCandidate() const {
721 if (candidate
.heap
.is_allocated
) {
724 if (candidate
.heap
.begin
<= untagged_addr
&&
725 untagged_addr
< candidate
.heap
.end
) {
726 offset
= untagged_addr
- candidate
.heap
.begin
;
728 } else if (candidate
.after
) {
729 offset
= untagged_addr
- candidate
.heap
.end
;
732 offset
= candidate
.heap
.begin
- untagged_addr
;
735 Printf("%s", d
.Error());
736 Printf("\nCause: heap-buffer-overflow\n");
737 Printf("%s", d
.Default());
738 Printf("%s", d
.Location());
739 Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
740 untagged_addr
, offset
, whence
,
741 candidate
.heap
.end
- candidate
.heap
.begin
, candidate
.heap
.begin
,
743 Printf("%s", d
.Allocation());
744 Printf("allocated by thread T%u here:\n", candidate
.heap
.thread_id
);
745 Printf("%s", d
.Default());
746 GetStackTraceFromId(candidate
.heap
.stack_id
).Print();
749 // Check whether the address points into a loaded library. If so, this is
750 // most likely a global variable.
751 const char *module_name
;
753 Symbolizer
*sym
= Symbolizer::GetOrInit();
754 if (sym
->GetModuleNameAndOffsetForPC(candidate
.untagged_addr
, &module_name
,
756 Printf("%s", d
.Error());
757 Printf("\nCause: global-overflow\n");
758 Printf("%s", d
.Default());
760 Printf("%s", d
.Location());
761 if (sym
->SymbolizeData(candidate
.untagged_addr
, &info
) && info
.start
) {
763 "%p is located %zd bytes %s a %zd-byte global variable "
764 "%s [%p,%p) in %s\n",
766 candidate
.after
? untagged_addr
- (info
.start
+ info
.size
)
767 : info
.start
- untagged_addr
,
768 candidate
.after
? "after" : "before", info
.size
, info
.name
,
769 info
.start
, info
.start
+ info
.size
, module_name
);
771 uptr size
= GetGlobalSizeFromDescriptor(candidate
.untagged_addr
);
773 // We couldn't find the size of the global from the descriptors.
775 "%p is located %s a global variable in "
776 "\n #0 0x%x (%s+0x%x)\n",
777 untagged_addr
, candidate
.after
? "after" : "before",
778 candidate
.untagged_addr
, module_name
, module_address
);
781 "%p is located %s a %zd-byte global variable in "
782 "\n #0 0x%x (%s+0x%x)\n",
783 untagged_addr
, candidate
.after
? "after" : "before", size
,
784 candidate
.untagged_addr
, module_name
, module_address
);
786 Printf("%s", d
.Default());
790 void BaseReport::PrintAddressDescription() const {
792 int num_descriptions_printed
= 0;
794 if (MemIsShadow(untagged_addr
)) {
795 Printf("%s%p is HWAsan shadow memory.\n%s", d
.Location(), untagged_addr
,
800 // Print some very basic information about the address, if it's a heap.
803 "%s[%p,%p) is a %s %s heap chunk; "
804 "size: %zd offset: %zd\n%s",
805 d
.Location(), heap
.begin
, heap
.begin
+ heap
.size
,
806 heap
.from_small_heap
? "small" : "large",
807 heap
.is_allocated
? "allocated" : "unallocated", heap
.size
,
808 untagged_addr
- heap
.begin
, d
.Default());
811 auto announce_by_id
= [](u32 thread_id
) {
812 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) {
813 if (thread_id
== t
->unique_id())
818 // Check stack first. If the address is on the stack of a live thread, we
819 // know it cannot be a heap / global overflow.
820 for (const auto &sa
: allocations
.stack
) {
821 Printf("%s", d
.Error());
822 Printf("\nCause: stack tag-mismatch\n");
823 Printf("%s", d
.Location());
824 Printf("Address %p is located in stack of thread T%zd\n", untagged_addr
,
826 Printf("%s", d
.Default());
827 announce_by_id(sa
.thread_id());
828 PrintStackAllocations(sa
.get(), ptr_tag
, untagged_addr
);
829 num_descriptions_printed
++;
832 if (allocations
.stack
.empty() && candidate
.untagged_addr
&&
833 candidate
.is_close
) {
834 PrintHeapOrGlobalCandidate();
835 num_descriptions_printed
++;
838 for (const auto &ha
: allocations
.heap
) {
839 const HeapAllocationRecord har
= ha
.har
;
841 Printf("%s", d
.Error());
842 Printf("\nCause: use-after-free\n");
843 Printf("%s", d
.Location());
844 Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
845 untagged_addr
, untagged_addr
- UntagAddr(har
.tagged_addr
),
846 har
.requested_size
, UntagAddr(har
.tagged_addr
),
847 UntagAddr(har
.tagged_addr
) + har
.requested_size
);
848 Printf("%s", d
.Allocation());
849 Printf("freed by thread T%u here:\n", ha
.free_thread_id
);
850 Printf("%s", d
.Default());
851 GetStackTraceFromId(har
.free_context_id
).Print();
853 Printf("%s", d
.Allocation());
854 Printf("previously allocated by thread T%u here:\n", har
.alloc_thread_id
);
855 Printf("%s", d
.Default());
856 GetStackTraceFromId(har
.alloc_context_id
).Print();
858 // Print a developer note: the index of this heap object
859 // in the thread's deallocation ring buffer.
860 Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha
.ring_index
+ 1,
861 flags()->heap_history_size
);
862 Printf("hwasan_dev_note_num_matching_addrs: %zd\n", ha
.num_matching_addrs
);
863 Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
864 ha
.num_matching_addrs_4b
);
866 announce_by_id(ha
.free_thread_id
);
867 // TODO: announce_by_id(har.alloc_thread_id);
868 num_descriptions_printed
++;
871 if (candidate
.untagged_addr
&& num_descriptions_printed
== 0) {
872 PrintHeapOrGlobalCandidate();
873 num_descriptions_printed
++;
876 // Print the remaining threads, as an extra information, 1 line per thread.
877 if (flags()->print_live_threads_info
) {
879 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) { t
->Announce(); });
882 if (!num_descriptions_printed
)
883 // We exhausted our possibilities. Bail out.
884 Printf("HWAddressSanitizer can not describe address in more detail.\n");
885 if (num_descriptions_printed
> 1) {
887 "There are %d potential causes, printed above in order "
889 num_descriptions_printed
);
893 void BaseReport::PrintTags(uptr addr
) const {
896 addr
, [&](uptr addr
) { return GetTagCopy(addr
); },
897 [&](uptr addr
) { return GetShortTagCopy(addr
); });
901 class InvalidFreeReport
: public BaseReport
{
903 InvalidFreeReport(StackTrace
*stack
, uptr tagged_addr
)
904 : BaseReport(stack
, flags()->halt_on_error
, tagged_addr
, 0) {}
905 ~InvalidFreeReport();
910 InvalidFreeReport::~InvalidFreeReport() {
912 Printf("%s", d
.Error());
913 uptr pc
= GetTopPc(stack
);
914 const char *bug_type
= "invalid-free";
915 const Thread
*thread
= GetCurrentThread();
917 Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
918 SanitizerToolName
, bug_type
, untagged_addr
, pc
, thread
->unique_id());
920 Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
921 SanitizerToolName
, bug_type
, untagged_addr
, pc
);
923 Printf("%s", d
.Access());
925 Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag
,
926 GetTagCopy(MemToShadow(untagged_addr
)));
928 Printf("%s", d
.Default());
932 PrintAddressDescription();
933 PrintTags(untagged_addr
);
934 MaybePrintAndroidHelpUrl();
935 ReportErrorSummary(bug_type
, stack
);
938 class TailOverwrittenReport
: public BaseReport
{
940 explicit TailOverwrittenReport(StackTrace
*stack
, uptr tagged_addr
,
941 uptr orig_size
, const u8
*expected
)
942 : BaseReport(stack
, flags()->halt_on_error
, tagged_addr
, 0),
943 orig_size(orig_size
),
944 tail_size(kShadowAlignment
- (orig_size
% kShadowAlignment
)) {
945 CHECK_GT(tail_size
, 0U);
946 CHECK_LT(tail_size
, kShadowAlignment
);
947 internal_memcpy(tail_copy
,
948 reinterpret_cast<u8
*>(untagged_addr
+ orig_size
),
950 internal_memcpy(actual_expected
, expected
, tail_size
);
951 // Short granule is stashed in the last byte of the magic string. To avoid
952 // confusion, make the expected magic string contain the short granule tag.
953 if (orig_size
% kShadowAlignment
!= 0)
954 actual_expected
[tail_size
- 1] = ptr_tag
;
956 ~TailOverwrittenReport();
959 const uptr orig_size
= 0;
960 const uptr tail_size
= 0;
961 u8 actual_expected
[kShadowAlignment
] = {};
962 u8 tail_copy
[kShadowAlignment
] = {};
965 TailOverwrittenReport::~TailOverwrittenReport() {
967 Printf("%s", d
.Error());
968 const char *bug_type
= "allocation-tail-overwritten";
969 Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName
,
970 bug_type
, untagged_addr
, untagged_addr
+ orig_size
, orig_size
);
971 Printf("\n%s", d
.Default());
973 "Stack of invalid access unknown. Issue detected at deallocation "
975 Printf("%s", d
.Allocation());
976 Printf("deallocated here:\n");
977 Printf("%s", d
.Default());
980 Printf("%s", d
.Allocation());
981 Printf("allocated here:\n");
982 Printf("%s", d
.Default());
983 GetStackTraceFromId(heap
.stack_id
).Print();
986 InternalScopedString s
;
987 u8
*tail
= tail_copy
;
988 s
.Append("Tail contains: ");
989 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++) s
.Append(".. ");
990 for (uptr i
= 0; i
< tail_size
; i
++) s
.AppendF("%02x ", tail
[i
]);
992 s
.Append("Expected: ");
993 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++) s
.Append(".. ");
994 for (uptr i
= 0; i
< tail_size
; i
++) s
.AppendF("%02x ", actual_expected
[i
]);
997 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++) s
.Append(" ");
998 for (uptr i
= 0; i
< tail_size
; i
++)
999 s
.AppendF("%s ", actual_expected
[i
] != tail
[i
] ? "^^" : " ");
1002 "\nThis error occurs when a buffer overflow overwrites memory\n"
1003 "after a heap object, but within the %zd-byte granule, e.g.\n"
1004 " char *x = new char[20];\n"
1006 "%s does not detect such bugs in uninstrumented code at the time of "
1008 "\nbut can detect them at the time of free/delete.\n"
1009 "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
1010 kShadowAlignment
, SanitizerToolName
);
1011 Printf("%s", s
.data());
1012 GetCurrentThread()->Announce();
1013 PrintTags(untagged_addr
);
1014 MaybePrintAndroidHelpUrl();
1015 ReportErrorSummary(bug_type
, stack
);
1018 class TagMismatchReport
: public BaseReport
{
1020 explicit TagMismatchReport(StackTrace
*stack
, uptr tagged_addr
,
1021 uptr access_size
, bool is_store
, bool fatal
,
1022 uptr
*registers_frame
)
1023 : BaseReport(stack
, fatal
, tagged_addr
, access_size
),
1025 registers_frame(registers_frame
) {}
1026 ~TagMismatchReport();
1029 const bool is_store
;
1030 const uptr
*registers_frame
;
1033 TagMismatchReport::~TagMismatchReport() {
1035 // TODO: when possible, try to print heap-use-after-free, etc.
1036 const char *bug_type
= "tag-mismatch";
1037 uptr pc
= GetTopPc(stack
);
1038 Printf("%s", d
.Error());
1039 Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName
, bug_type
,
1042 Thread
*t
= GetCurrentThread();
1044 tag_t mem_tag
= GetTagCopy(MemToShadow(untagged_addr
+ mismatch_offset
));
1046 Printf("%s", d
.Access());
1047 if (mem_tag
&& mem_tag
< kShadowAlignment
) {
1049 GetShortTagCopy(MemToShadow(untagged_addr
+ mismatch_offset
));
1051 "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
1052 is_store
? "WRITE" : "READ", access_size
, untagged_addr
, ptr_tag
,
1053 mem_tag
, short_tag
, t
->unique_id());
1055 Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
1056 is_store
? "WRITE" : "READ", access_size
, untagged_addr
, ptr_tag
,
1057 mem_tag
, t
->unique_id());
1059 if (mismatch_offset
)
1060 Printf("Invalid access starting at offset %zu\n", mismatch_offset
);
1061 Printf("%s", d
.Default());
1065 PrintAddressDescription();
1068 PrintTags(untagged_addr
+ mismatch_offset
);
1070 if (registers_frame
)
1071 ReportRegisters(registers_frame
, pc
);
1073 MaybePrintAndroidHelpUrl();
1074 ReportErrorSummary(bug_type
, stack
);
1078 void ReportInvalidFree(StackTrace
*stack
, uptr tagged_addr
) {
1079 InvalidFreeReport
R(stack
, tagged_addr
);
1082 void ReportTailOverwritten(StackTrace
*stack
, uptr tagged_addr
, uptr orig_size
,
1083 const u8
*expected
) {
1084 TailOverwrittenReport
R(stack
, tagged_addr
, orig_size
, expected
);
1087 void ReportTagMismatch(StackTrace
*stack
, uptr tagged_addr
, uptr access_size
,
1088 bool is_store
, bool fatal
, uptr
*registers_frame
) {
1089 TagMismatchReport
R(stack
, tagged_addr
, access_size
, is_store
, fatal
,
1093 // See the frame breakdown defined in __hwasan_tag_mismatch (from
1094 // hwasan_tag_mismatch_{aarch64,riscv64}.S).
1095 void ReportRegisters(const uptr
*frame
, uptr pc
) {
1096 Printf("\nRegisters where the failure occurred (pc %p):\n", pc
);
1098 // We explicitly print a single line (4 registers/line) each iteration to
1099 // reduce the amount of logcat error messages printed. Each Printf() will
1100 // result in a new logcat line, irrespective of whether a newline is present,
1101 // and so we wish to reduce the number of Printf() calls we have to make.
1102 #if defined(__aarch64__)
1103 Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
1104 frame
[0], frame
[1], frame
[2], frame
[3]);
1105 #elif SANITIZER_RISCV64
1106 Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n",
1107 reinterpret_cast<const u8
*>(frame
) + 256, frame
[1], frame
[2],
1110 Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
1111 frame
[4], frame
[5], frame
[6], frame
[7]);
1112 Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
1113 frame
[8], frame
[9], frame
[10], frame
[11]);
1114 Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
1115 frame
[12], frame
[13], frame
[14], frame
[15]);
1116 Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
1117 frame
[16], frame
[17], frame
[18], frame
[19]);
1118 Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
1119 frame
[20], frame
[21], frame
[22], frame
[23]);
1120 Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
1121 frame
[24], frame
[25], frame
[26], frame
[27]);
1122 // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
1123 // passes it to this function.
1124 #if defined(__aarch64__)
1125 Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame
[28],
1126 frame
[29], frame
[30], reinterpret_cast<const u8
*>(frame
) + 256);
1127 #elif SANITIZER_RISCV64
1128 Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame
[28],
1129 frame
[29], frame
[30], frame
[31]);
1134 } // namespace __hwasan
1136 void __hwasan_set_error_report_callback(void (*callback
)(const char *)) {
1137 __hwasan::ScopedReport::SetErrorReportCallback(callback
);