1 //===-- hwasan_report.cpp -------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of HWAddressSanitizer.
12 //===----------------------------------------------------------------------===//
14 #include "hwasan_report.h"
19 #include "hwasan_allocator.h"
20 #include "hwasan_globals.h"
21 #include "hwasan_mapping.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_thread_list.h"
24 #include "sanitizer_common/sanitizer_allocator_internal.h"
25 #include "sanitizer_common/sanitizer_array_ref.h"
26 #include "sanitizer_common/sanitizer_common.h"
27 #include "sanitizer_common/sanitizer_flags.h"
28 #include "sanitizer_common/sanitizer_internal_defs.h"
29 #include "sanitizer_common/sanitizer_mutex.h"
30 #include "sanitizer_common/sanitizer_report_decorator.h"
31 #include "sanitizer_common/sanitizer_stackdepot.h"
32 #include "sanitizer_common/sanitizer_stacktrace_printer.h"
33 #include "sanitizer_common/sanitizer_symbolizer.h"
35 using namespace __sanitizer
;
41 explicit ScopedReport(bool fatal
) : fatal(fatal
) {
42 Lock
lock(&error_message_lock_
);
43 error_message_ptr_
= fatal
? &error_message_
: nullptr;
44 ++hwasan_report_count
;
48 void (*report_cb
)(const char *);
50 Lock
lock(&error_message_lock_
);
51 report_cb
= error_report_callback_
;
52 error_message_ptr_
= nullptr;
55 report_cb(error_message_
.data());
57 SetAbortMessage(error_message_
.data());
58 if (common_flags()->print_module_map
>= 2 ||
59 (fatal
&& common_flags()->print_module_map
))
65 static void MaybeAppendToErrorMessage(const char *msg
) {
66 Lock
lock(&error_message_lock_
);
67 if (!error_message_ptr_
)
69 error_message_ptr_
->Append(msg
);
72 static void SetErrorReportCallback(void (*callback
)(const char *)) {
73 Lock
lock(&error_message_lock_
);
74 error_report_callback_
= callback
;
78 InternalScopedString error_message_
;
81 static Mutex error_message_lock_
;
82 static InternalScopedString
*error_message_ptr_
83 SANITIZER_GUARDED_BY(error_message_lock_
);
84 static void (*error_report_callback_
)(const char *);
87 Mutex
ScopedReport::error_message_lock_
;
88 InternalScopedString
*ScopedReport::error_message_ptr_
;
89 void (*ScopedReport::error_report_callback_
)(const char *);
91 // If there is an active ScopedReport, append to its error message.
92 void AppendToErrorMessageBuffer(const char *buffer
) {
93 ScopedReport::MaybeAppendToErrorMessage(buffer
);
96 static StackTrace
GetStackTraceFromId(u32 id
) {
98 StackTrace res
= StackDepotGet(id
);
103 static void MaybePrintAndroidHelpUrl() {
104 #if SANITIZER_ANDROID
106 "Learn more about HWASan reports: "
107 "https://source.android.com/docs/security/test/memory-safety/"
113 // A RAII object that holds a copy of the current thread stack ring buffer.
114 // The actual stack buffer may change while we are iterating over it (for
115 // example, Printf may call syslog() which can itself be built with hwasan).
116 class SavedStackAllocations
{
118 SavedStackAllocations() = default;
120 explicit SavedStackAllocations(Thread
*t
) { CopyFrom(t
); }
122 void CopyFrom(Thread
*t
) {
123 StackAllocationsRingBuffer
*rb
= t
->stack_allocations();
124 uptr size
= rb
->size() * sizeof(uptr
);
126 MmapAlignedOrDieOnFatalError(size
, size
* 2, "saved stack allocations");
127 new (&rb_
) StackAllocationsRingBuffer(*rb
, storage
);
128 thread_id_
= t
->unique_id();
131 ~SavedStackAllocations() {
133 StackAllocationsRingBuffer
*rb
= get();
134 UnmapOrDie(rb
->StartOfStorage(), rb
->size() * sizeof(uptr
));
138 const StackAllocationsRingBuffer
*get() const {
139 return (const StackAllocationsRingBuffer
*)&rb_
;
142 StackAllocationsRingBuffer
*get() {
143 return (StackAllocationsRingBuffer
*)&rb_
;
146 u32
thread_id() const { return thread_id_
; }
153 class Decorator
: public __sanitizer::SanitizerCommonDecorator
{
155 Decorator() : SanitizerCommonDecorator() { }
156 const char *Access() { return Blue(); }
157 const char *Allocation() const { return Magenta(); }
158 const char *Origin() const { return Magenta(); }
159 const char *Name() const { return Green(); }
160 const char *Location() { return Green(); }
161 const char *Thread() { return Green(); }
165 static bool FindHeapAllocation(HeapAllocationsRingBuffer
*rb
, uptr tagged_addr
,
166 HeapAllocationRecord
*har
, uptr
*ring_index
,
167 uptr
*num_matching_addrs
,
168 uptr
*num_matching_addrs_4b
) {
169 if (!rb
) return false;
171 *num_matching_addrs
= 0;
172 *num_matching_addrs_4b
= 0;
173 for (uptr i
= 0, size
= rb
->size(); i
< size
; i
++) {
175 if (h
.tagged_addr
<= tagged_addr
&&
176 h
.tagged_addr
+ h
.requested_size
> tagged_addr
) {
182 // Measure the number of heap ring buffer entries that would have matched
183 // if we had only one entry per address (e.g. if the ring buffer data was
184 // stored at the address itself). This will help us tune the allocator
185 // implementation for MTE.
186 if (UntagAddr(h
.tagged_addr
) <= UntagAddr(tagged_addr
) &&
187 UntagAddr(h
.tagged_addr
) + h
.requested_size
> UntagAddr(tagged_addr
)) {
188 ++*num_matching_addrs
;
191 // Measure the number of heap ring buffer entries that would have matched
192 // if we only had 4 tag bits, which is the case for MTE.
193 auto untag_4b
= [](uptr p
) {
194 return p
& ((1ULL << 60) - 1);
196 if (untag_4b(h
.tagged_addr
) <= untag_4b(tagged_addr
) &&
197 untag_4b(h
.tagged_addr
) + h
.requested_size
> untag_4b(tagged_addr
)) {
198 ++*num_matching_addrs_4b
;
204 static void PrintStackAllocations(const StackAllocationsRingBuffer
*sa
,
205 tag_t addr_tag
, uptr untagged_addr
) {
206 uptr frames
= Min((uptr
)flags()->stack_history_size
, sa
->size());
207 bool found_local
= false;
208 for (uptr i
= 0; i
< frames
; i
++) {
209 const uptr
*record_addr
= &(*sa
)[i
];
210 uptr record
= *record_addr
;
214 reinterpret_cast<uptr
>(record_addr
) >> kRecordAddrBaseTagShift
;
215 uptr fp
= (record
>> kRecordFPShift
) << kRecordFPLShift
;
216 uptr pc_mask
= (1ULL << kRecordFPShift
) - 1;
217 uptr pc
= record
& pc_mask
;
219 if (Symbolizer::GetOrInit()->SymbolizeFrame(pc
, &frame
)) {
220 for (LocalInfo
&local
: frame
.locals
) {
221 if (!local
.has_frame_offset
|| !local
.has_size
|| !local
.has_tag_offset
)
223 tag_t obj_tag
= base_tag
^ local
.tag_offset
;
224 if (obj_tag
!= addr_tag
)
226 // Calculate the offset from the object address to the faulting
227 // address. Because we only store bits 4-19 of FP (bits 0-3 are
228 // guaranteed to be zero), the calculation is performed mod 2^20 and may
229 // harmlessly underflow if the address mod 2^20 is below the object
232 (untagged_addr
- fp
- local
.frame_offset
) & (kRecordFPModulus
- 1);
233 if (obj_offset
>= local
.size
)
236 Printf("Potentially referenced stack objects:\n");
239 Printf(" %s in %s %s:%d\n", local
.name
, local
.function_name
,
240 local
.decl_file
, local
.decl_line
);
249 // We didn't find any locals. Most likely we don't have symbols, so dump
250 // the information that we have for offline analysis.
251 InternalScopedString frame_desc
;
252 Printf("Previously allocated frames:\n");
253 for (uptr i
= 0; i
< frames
; i
++) {
254 const uptr
*record_addr
= &(*sa
)[i
];
255 uptr record
= *record_addr
;
258 uptr pc_mask
= (1ULL << 48) - 1;
259 uptr pc
= record
& pc_mask
;
260 frame_desc
.AppendF(" record_addr:0x%zx record:0x%zx",
261 reinterpret_cast<uptr
>(record_addr
), record
);
262 if (SymbolizedStack
*frame
= Symbolizer::GetOrInit()->SymbolizePC(pc
)) {
263 StackTracePrinter::GetOrInit()->RenderFrame(
264 &frame_desc
, " %F %L", 0, frame
->info
.address
, &frame
->info
,
265 common_flags()->symbolize_vs_style
,
266 common_flags()->strip_path_prefix
);
269 Printf("%s\n", frame_desc
.data());
274 // Returns true if tag == *tag_ptr, reading tags from short granules if
275 // necessary. This may return a false positive if tags 1-15 are used as a
276 // regular tag rather than a short granule marker.
277 static bool TagsEqual(tag_t tag
, tag_t
*tag_ptr
) {
280 if (*tag_ptr
== 0 || *tag_ptr
> kShadowAlignment
- 1)
282 uptr mem
= ShadowToMem(reinterpret_cast<uptr
>(tag_ptr
));
283 tag_t inline_tag
= *reinterpret_cast<tag_t
*>(mem
+ kShadowAlignment
- 1);
284 return tag
== inline_tag
;
287 // HWASan globals store the size of the global in the descriptor. In cases where
288 // we don't have a binary with symbols, we can't grab the size of the global
289 // from the debug info - but we might be able to retrieve it from the
290 // descriptor. Returns zero if the lookup failed.
291 static uptr
GetGlobalSizeFromDescriptor(uptr ptr
) {
292 // Find the ELF object that this global resides in.
294 if (dladdr(reinterpret_cast<void *>(ptr
), &info
) == 0)
296 auto *ehdr
= reinterpret_cast<const ElfW(Ehdr
) *>(info
.dli_fbase
);
297 auto *phdr_begin
= reinterpret_cast<const ElfW(Phdr
) *>(
298 reinterpret_cast<const u8
*>(ehdr
) + ehdr
->e_phoff
);
300 // Get the load bias. This is normally the same as the dli_fbase address on
301 // position-independent code, but can be different on non-PIE executables,
302 // binaries using LLD's partitioning feature, or binaries compiled with a
304 ElfW(Addr
) load_bias
= 0;
305 for (const auto &phdr
:
306 ArrayRef
<const ElfW(Phdr
)>(phdr_begin
, phdr_begin
+ ehdr
->e_phnum
)) {
307 if (phdr
.p_type
!= PT_LOAD
|| phdr
.p_offset
!= 0)
309 load_bias
= reinterpret_cast<ElfW(Addr
)>(ehdr
) - phdr
.p_vaddr
;
313 // Walk all globals in this ELF object, looking for the one we're interested
314 // in. Once we find it, we can stop iterating and return the size of the
315 // global we're interested in.
316 for (const hwasan_global
&global
:
317 HwasanGlobalsFor(load_bias
, phdr_begin
, ehdr
->e_phnum
))
318 if (global
.addr() <= ptr
&& ptr
< global
.addr() + global
.size())
319 return global
.size();
324 void ReportStats() {}
326 constexpr uptr kDumpWidth
= 16;
327 constexpr uptr kShadowLines
= 17;
328 constexpr uptr kShadowDumpSize
= kShadowLines
* kDumpWidth
;
330 constexpr uptr kShortLines
= 3;
331 constexpr uptr kShortDumpSize
= kShortLines
* kDumpWidth
;
332 constexpr uptr kShortDumpOffset
= (kShadowLines
- kShortLines
) / 2 * kDumpWidth
;
334 static uptr
GetPrintTagStart(uptr addr
) {
335 addr
= MemToShadow(addr
);
336 addr
= RoundDownTo(addr
, kDumpWidth
);
337 addr
-= kDumpWidth
* (kShadowLines
/ 2);
341 template <typename PrintTag
>
342 static void PrintTagInfoAroundAddr(uptr addr
, uptr num_rows
,
343 InternalScopedString
&s
,
344 PrintTag print_tag
) {
345 uptr center_row_beg
= RoundDownTo(addr
, kDumpWidth
);
346 uptr beg_row
= center_row_beg
- kDumpWidth
* (num_rows
/ 2);
347 uptr end_row
= center_row_beg
+ kDumpWidth
* ((num_rows
+ 1) / 2);
348 for (uptr row
= beg_row
; row
< end_row
; row
+= kDumpWidth
) {
349 s
.Append(row
== center_row_beg
? "=>" : " ");
350 s
.AppendF("%p:", (void *)ShadowToMem(row
));
351 for (uptr i
= 0; i
< kDumpWidth
; i
++) {
352 s
.Append(row
+ i
== addr
? "[" : " ");
353 print_tag(s
, row
+ i
);
354 s
.Append(row
+ i
== addr
? "]" : " ");
360 template <typename GetTag
, typename GetShortTag
>
361 static void PrintTagsAroundAddr(uptr addr
, GetTag get_tag
,
362 GetShortTag get_short_tag
) {
363 InternalScopedString s
;
364 addr
= MemToShadow(addr
);
366 "Memory tags around the buggy address (one tag corresponds to %zd "
369 PrintTagInfoAroundAddr(addr
, kShadowLines
, s
,
370 [&](InternalScopedString
&s
, uptr tag_addr
) {
371 tag_t tag
= get_tag(tag_addr
);
372 s
.AppendF("%02x", tag
);
376 "Tags for short granules around the buggy address (one tag corresponds "
379 PrintTagInfoAroundAddr(addr
, kShortLines
, s
,
380 [&](InternalScopedString
&s
, uptr tag_addr
) {
381 tag_t tag
= get_tag(tag_addr
);
382 if (tag
>= 1 && tag
<= kShadowAlignment
) {
383 tag_t short_tag
= get_short_tag(tag_addr
);
384 s
.AppendF("%02x", short_tag
);
391 "https://clang.llvm.org/docs/"
392 "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
393 "description of short granule tags\n");
394 Printf("%s", s
.data());
397 static uptr
GetTopPc(const StackTrace
*stack
) {
398 return stack
->size
? StackTrace::GetPreviousInstructionPc(stack
->trace
[0])
405 BaseReport(StackTrace
*stack
, bool fatal
, uptr tagged_addr
, uptr access_size
)
406 : scoped_report(fatal
),
408 tagged_addr(tagged_addr
),
409 access_size(access_size
),
410 untagged_addr(UntagAddr(tagged_addr
)),
411 ptr_tag(GetTagFromPointer(tagged_addr
)),
412 mismatch_offset(FindMismatchOffset()),
413 heap(CopyHeapChunk()),
414 allocations(CopyAllocations()),
415 candidate(FindBufferOverflowCandidate()),
416 shadow(CopyShadow()) {}
419 struct OverflowCandidate
{
420 uptr untagged_addr
= 0;
422 bool is_close
= false;
429 bool is_allocated
= false;
433 struct HeapAllocation
{
434 HeapAllocationRecord har
= {};
436 uptr num_matching_addrs
= 0;
437 uptr num_matching_addrs_4b
= 0;
438 u32 free_thread_id
= 0;
442 ArrayRef
<SavedStackAllocations
> stack
;
443 ArrayRef
<HeapAllocation
> heap
;
450 bool from_small_heap
= false;
451 bool is_allocated
= false;
456 tag_t tags
[kShadowDumpSize
] = {};
457 tag_t short_tags
[kShortDumpSize
] = {};
460 sptr
FindMismatchOffset() const;
461 Shadow
CopyShadow() const;
462 tag_t
GetTagCopy(uptr addr
) const;
463 tag_t
GetShortTagCopy(uptr addr
) const;
464 HeapChunk
CopyHeapChunk() const;
465 Allocations
CopyAllocations();
466 OverflowCandidate
FindBufferOverflowCandidate() const;
467 void PrintAddressDescription() const;
468 void PrintHeapOrGlobalCandidate() const;
469 void PrintTags(uptr addr
) const;
471 SavedStackAllocations stack_allocations_storage
[16];
472 HeapAllocation heap_allocations_storage
[256];
474 const ScopedReport scoped_report
;
475 const StackTrace
*stack
= nullptr;
476 const uptr tagged_addr
= 0;
477 const uptr access_size
= 0;
478 const uptr untagged_addr
= 0;
479 const tag_t ptr_tag
= 0;
480 const sptr mismatch_offset
= 0;
482 const HeapChunk heap
;
483 const Allocations allocations
;
484 const OverflowCandidate candidate
;
489 sptr
BaseReport::FindMismatchOffset() const {
493 __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr
), access_size
);
495 CHECK_LT(offset
, static_cast<sptr
>(access_size
));
497 reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
+ offset
));
498 tag_t mem_tag
= *tag_ptr
;
500 if (mem_tag
&& mem_tag
< kShadowAlignment
) {
501 tag_t
*granule_ptr
= reinterpret_cast<tag_t
*>((untagged_addr
+ offset
) &
502 ~(kShadowAlignment
- 1));
503 // If offset is 0, (untagged_addr + offset) is not aligned to granules.
504 // This is the offset of the leftmost accessed byte within the bad granule.
505 u8 in_granule_offset
= (untagged_addr
+ offset
) & (kShadowAlignment
- 1);
506 tag_t short_tag
= granule_ptr
[kShadowAlignment
- 1];
507 // The first mismatch was a short granule that matched the ptr_tag.
508 if (short_tag
== ptr_tag
) {
509 // If the access starts after the end of the short granule, then the first
510 // bad byte is the first byte of the access; otherwise it is the first
511 // byte past the end of the short granule
512 if (mem_tag
> in_granule_offset
) {
513 offset
+= mem_tag
- in_granule_offset
;
520 BaseReport::Shadow
BaseReport::CopyShadow() const {
522 if (!MemIsApp(untagged_addr
))
525 result
.addr
= GetPrintTagStart(untagged_addr
+ mismatch_offset
);
526 uptr tag_addr
= result
.addr
;
527 uptr short_end
= kShortDumpOffset
+ ARRAY_SIZE(shadow
.short_tags
);
528 for (uptr i
= 0; i
< ARRAY_SIZE(result
.tags
); ++i
, ++tag_addr
) {
529 if (!MemIsShadow(tag_addr
))
531 result
.tags
[i
] = *reinterpret_cast<tag_t
*>(tag_addr
);
532 if (i
< kShortDumpOffset
|| i
>= short_end
)
534 uptr granule_addr
= ShadowToMem(tag_addr
);
535 if (1 <= result
.tags
[i
] && result
.tags
[i
] <= kShadowAlignment
&&
536 IsAccessibleMemoryRange(granule_addr
, kShadowAlignment
)) {
537 result
.short_tags
[i
- kShortDumpOffset
] =
538 *reinterpret_cast<tag_t
*>(granule_addr
+ kShadowAlignment
- 1);
544 tag_t
BaseReport::GetTagCopy(uptr addr
) const {
545 CHECK_GE(addr
, shadow
.addr
);
546 uptr idx
= addr
- shadow
.addr
;
547 CHECK_LT(idx
, ARRAY_SIZE(shadow
.tags
));
548 return shadow
.tags
[idx
];
551 tag_t
BaseReport::GetShortTagCopy(uptr addr
) const {
552 CHECK_GE(addr
, shadow
.addr
+ kShortDumpOffset
);
553 uptr idx
= addr
- shadow
.addr
- kShortDumpOffset
;
554 CHECK_LT(idx
, ARRAY_SIZE(shadow
.short_tags
));
555 return shadow
.short_tags
[idx
];
558 BaseReport::HeapChunk
BaseReport::CopyHeapChunk() const {
559 HeapChunk result
= {};
560 if (MemIsShadow(untagged_addr
))
562 HwasanChunkView chunk
= FindHeapChunkByAddress(untagged_addr
);
563 result
.begin
= chunk
.Beg();
565 result
.size
= chunk
.ActualSize();
566 result
.from_small_heap
= chunk
.FromSmallHeap();
567 result
.is_allocated
= chunk
.IsAllocated();
568 result
.stack_id
= chunk
.GetAllocStackId();
573 BaseReport::Allocations
BaseReport::CopyAllocations() {
574 if (MemIsShadow(untagged_addr
))
576 uptr stack_allocations_count
= 0;
577 uptr heap_allocations_count
= 0;
578 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) {
579 if (stack_allocations_count
< ARRAY_SIZE(stack_allocations_storage
) &&
580 t
->AddrIsInStack(untagged_addr
)) {
581 stack_allocations_storage
[stack_allocations_count
++].CopyFrom(t
);
584 if (heap_allocations_count
< ARRAY_SIZE(heap_allocations_storage
)) {
585 // Scan all threads' ring buffers to find if it's a heap-use-after-free.
586 HeapAllocationRecord har
;
587 uptr ring_index
, num_matching_addrs
, num_matching_addrs_4b
;
588 if (FindHeapAllocation(t
->heap_allocations(), tagged_addr
, &har
,
589 &ring_index
, &num_matching_addrs
,
590 &num_matching_addrs_4b
)) {
591 auto &ha
= heap_allocations_storage
[heap_allocations_count
++];
593 ha
.ring_index
= ring_index
;
594 ha
.num_matching_addrs
= num_matching_addrs
;
595 ha
.num_matching_addrs_4b
= num_matching_addrs_4b
;
596 ha
.free_thread_id
= t
->unique_id();
601 return {{stack_allocations_storage
, stack_allocations_count
},
602 {heap_allocations_storage
, heap_allocations_count
}};
605 BaseReport::OverflowCandidate
BaseReport::FindBufferOverflowCandidate() const {
606 OverflowCandidate result
= {};
607 if (MemIsShadow(untagged_addr
))
609 // Check if this looks like a heap buffer overflow by scanning
610 // the shadow left and right and looking for the first adjacent
611 // object with a different memory tag. If that tag matches ptr_tag,
612 // check the allocator if it has a live chunk there.
613 tag_t
*tag_ptr
= reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
));
614 tag_t
*candidate_tag_ptr
= nullptr, *left
= tag_ptr
, *right
= tag_ptr
;
615 uptr candidate_distance
= 0;
616 for (; candidate_distance
< 1000; candidate_distance
++) {
617 if (MemIsShadow(reinterpret_cast<uptr
>(left
)) && TagsEqual(ptr_tag
, left
)) {
618 candidate_tag_ptr
= left
;
622 if (MemIsShadow(reinterpret_cast<uptr
>(right
)) &&
623 TagsEqual(ptr_tag
, right
)) {
624 candidate_tag_ptr
= right
;
630 constexpr auto kCloseCandidateDistance
= 1;
631 result
.is_close
= candidate_distance
<= kCloseCandidateDistance
;
633 result
.after
= candidate_tag_ptr
== left
;
634 result
.untagged_addr
= ShadowToMem(reinterpret_cast<uptr
>(candidate_tag_ptr
));
635 HwasanChunkView chunk
= FindHeapChunkByAddress(result
.untagged_addr
);
636 if (chunk
.IsAllocated()) {
637 result
.heap
.is_allocated
= true;
638 result
.heap
.begin
= chunk
.Beg();
639 result
.heap
.end
= chunk
.End();
640 result
.heap
.thread_id
= chunk
.GetAllocThreadId();
641 result
.heap
.stack_id
= chunk
.GetAllocStackId();
646 void BaseReport::PrintHeapOrGlobalCandidate() const {
648 if (candidate
.heap
.is_allocated
) {
651 if (candidate
.heap
.begin
<= untagged_addr
&&
652 untagged_addr
< candidate
.heap
.end
) {
653 offset
= untagged_addr
- candidate
.heap
.begin
;
655 } else if (candidate
.after
) {
656 offset
= untagged_addr
- candidate
.heap
.end
;
659 offset
= candidate
.heap
.begin
- untagged_addr
;
662 Printf("%s", d
.Error());
663 Printf("\nCause: heap-buffer-overflow\n");
664 Printf("%s", d
.Default());
665 Printf("%s", d
.Location());
666 Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
667 untagged_addr
, offset
, whence
,
668 candidate
.heap
.end
- candidate
.heap
.begin
, candidate
.heap
.begin
,
670 Printf("%s", d
.Allocation());
671 Printf("allocated by thread T%u here:\n", candidate
.heap
.thread_id
);
672 Printf("%s", d
.Default());
673 GetStackTraceFromId(candidate
.heap
.stack_id
).Print();
676 // Check whether the address points into a loaded library. If so, this is
677 // most likely a global variable.
678 const char *module_name
;
680 Symbolizer
*sym
= Symbolizer::GetOrInit();
681 if (sym
->GetModuleNameAndOffsetForPC(candidate
.untagged_addr
, &module_name
,
683 Printf("%s", d
.Error());
684 Printf("\nCause: global-overflow\n");
685 Printf("%s", d
.Default());
687 Printf("%s", d
.Location());
688 if (sym
->SymbolizeData(candidate
.untagged_addr
, &info
) && info
.start
) {
690 "%p is located %zd bytes %s a %zd-byte global variable "
691 "%s [%p,%p) in %s\n",
693 candidate
.after
? untagged_addr
- (info
.start
+ info
.size
)
694 : info
.start
- untagged_addr
,
695 candidate
.after
? "after" : "before", info
.size
, info
.name
,
696 info
.start
, info
.start
+ info
.size
, module_name
);
698 uptr size
= GetGlobalSizeFromDescriptor(candidate
.untagged_addr
);
700 // We couldn't find the size of the global from the descriptors.
702 "%p is located %s a global variable in "
703 "\n #0 0x%x (%s+0x%x)\n",
704 untagged_addr
, candidate
.after
? "after" : "before",
705 candidate
.untagged_addr
, module_name
, module_address
);
708 "%p is located %s a %zd-byte global variable in "
709 "\n #0 0x%x (%s+0x%x)\n",
710 untagged_addr
, candidate
.after
? "after" : "before", size
,
711 candidate
.untagged_addr
, module_name
, module_address
);
713 Printf("%s", d
.Default());
717 void BaseReport::PrintAddressDescription() const {
719 int num_descriptions_printed
= 0;
721 if (MemIsShadow(untagged_addr
)) {
722 Printf("%s%p is HWAsan shadow memory.\n%s", d
.Location(), untagged_addr
,
727 // Print some very basic information about the address, if it's a heap.
730 "%s[%p,%p) is a %s %s heap chunk; "
731 "size: %zd offset: %zd\n%s",
732 d
.Location(), heap
.begin
, heap
.begin
+ heap
.size
,
733 heap
.from_small_heap
? "small" : "large",
734 heap
.is_allocated
? "allocated" : "unallocated", heap
.size
,
735 untagged_addr
- heap
.begin
, d
.Default());
738 auto announce_by_id
= [](u32 thread_id
) {
739 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) {
740 if (thread_id
== t
->unique_id())
745 // Check stack first. If the address is on the stack of a live thread, we
746 // know it cannot be a heap / global overflow.
747 for (const auto &sa
: allocations
.stack
) {
748 // TODO(fmayer): figure out how to distinguish use-after-return and
749 // stack-buffer-overflow.
750 Printf("%s", d
.Error());
751 Printf("\nCause: stack tag-mismatch\n");
752 Printf("%s", d
.Location());
753 Printf("Address %p is located in stack of thread T%zd\n", untagged_addr
,
755 Printf("%s", d
.Default());
756 announce_by_id(sa
.thread_id());
757 PrintStackAllocations(sa
.get(), ptr_tag
, untagged_addr
);
758 num_descriptions_printed
++;
761 if (allocations
.stack
.empty() && candidate
.untagged_addr
&&
762 candidate
.is_close
) {
763 PrintHeapOrGlobalCandidate();
764 num_descriptions_printed
++;
767 for (const auto &ha
: allocations
.heap
) {
768 const HeapAllocationRecord har
= ha
.har
;
770 Printf("%s", d
.Error());
771 Printf("\nCause: use-after-free\n");
772 Printf("%s", d
.Location());
773 Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
774 untagged_addr
, untagged_addr
- UntagAddr(har
.tagged_addr
),
775 har
.requested_size
, UntagAddr(har
.tagged_addr
),
776 UntagAddr(har
.tagged_addr
) + har
.requested_size
);
777 Printf("%s", d
.Allocation());
778 Printf("freed by thread T%u here:\n", ha
.free_thread_id
);
779 Printf("%s", d
.Default());
780 GetStackTraceFromId(har
.free_context_id
).Print();
782 Printf("%s", d
.Allocation());
783 Printf("previously allocated by thread T%u here:\n", har
.alloc_thread_id
);
784 Printf("%s", d
.Default());
785 GetStackTraceFromId(har
.alloc_context_id
).Print();
787 // Print a developer note: the index of this heap object
788 // in the thread's deallocation ring buffer.
789 Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha
.ring_index
+ 1,
790 flags()->heap_history_size
);
791 Printf("hwasan_dev_note_num_matching_addrs: %zd\n", ha
.num_matching_addrs
);
792 Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
793 ha
.num_matching_addrs_4b
);
795 announce_by_id(ha
.free_thread_id
);
796 // TODO: announce_by_id(har.alloc_thread_id);
797 num_descriptions_printed
++;
800 if (candidate
.untagged_addr
&& num_descriptions_printed
== 0) {
801 PrintHeapOrGlobalCandidate();
802 num_descriptions_printed
++;
805 // Print the remaining threads, as an extra information, 1 line per thread.
806 if (flags()->print_live_threads_info
)
807 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) { t
->Announce(); });
809 if (!num_descriptions_printed
)
810 // We exhausted our possibilities. Bail out.
811 Printf("HWAddressSanitizer can not describe address in more detail.\n");
812 if (num_descriptions_printed
> 1) {
814 "There are %d potential causes, printed above in order "
816 num_descriptions_printed
);
820 void BaseReport::PrintTags(uptr addr
) const {
823 addr
, [&](uptr addr
) { return GetTagCopy(addr
); },
824 [&](uptr addr
) { return GetShortTagCopy(addr
); });
828 class InvalidFreeReport
: public BaseReport
{
830 InvalidFreeReport(StackTrace
*stack
, uptr tagged_addr
)
831 : BaseReport(stack
, flags()->halt_on_error
, tagged_addr
, 0) {}
832 ~InvalidFreeReport();
837 InvalidFreeReport::~InvalidFreeReport() {
839 Printf("%s", d
.Error());
840 uptr pc
= GetTopPc(stack
);
841 const char *bug_type
= "invalid-free";
842 const Thread
*thread
= GetCurrentThread();
844 Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
845 SanitizerToolName
, bug_type
, untagged_addr
, pc
, thread
->unique_id());
847 Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
848 SanitizerToolName
, bug_type
, untagged_addr
, pc
);
850 Printf("%s", d
.Access());
852 Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag
,
853 GetTagCopy(MemToShadow(untagged_addr
)));
855 Printf("%s", d
.Default());
859 PrintAddressDescription();
860 PrintTags(untagged_addr
);
861 MaybePrintAndroidHelpUrl();
862 ReportErrorSummary(bug_type
, stack
);
865 class TailOverwrittenReport
: public BaseReport
{
867 explicit TailOverwrittenReport(StackTrace
*stack
, uptr tagged_addr
,
868 uptr orig_size
, const u8
*expected
)
869 : BaseReport(stack
, flags()->halt_on_error
, tagged_addr
, 0),
870 orig_size(orig_size
),
871 tail_size(kShadowAlignment
- (orig_size
% kShadowAlignment
)) {
872 CHECK_GT(tail_size
, 0U);
873 CHECK_LT(tail_size
, kShadowAlignment
);
874 internal_memcpy(tail_copy
,
875 reinterpret_cast<u8
*>(untagged_addr
+ orig_size
),
877 internal_memcpy(actual_expected
, expected
, tail_size
);
878 // Short granule is stashed in the last byte of the magic string. To avoid
879 // confusion, make the expected magic string contain the short granule tag.
880 if (orig_size
% kShadowAlignment
!= 0)
881 actual_expected
[tail_size
- 1] = ptr_tag
;
883 ~TailOverwrittenReport();
886 const uptr orig_size
= 0;
887 const uptr tail_size
= 0;
888 u8 actual_expected
[kShadowAlignment
] = {};
889 u8 tail_copy
[kShadowAlignment
] = {};
892 TailOverwrittenReport::~TailOverwrittenReport() {
894 Printf("%s", d
.Error());
895 const char *bug_type
= "allocation-tail-overwritten";
896 Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName
,
897 bug_type
, untagged_addr
, untagged_addr
+ orig_size
, orig_size
);
898 Printf("\n%s", d
.Default());
900 "Stack of invalid access unknown. Issue detected at deallocation "
902 Printf("%s", d
.Allocation());
903 Printf("deallocated here:\n");
904 Printf("%s", d
.Default());
907 Printf("%s", d
.Allocation());
908 Printf("allocated here:\n");
909 Printf("%s", d
.Default());
910 GetStackTraceFromId(heap
.stack_id
).Print();
913 InternalScopedString s
;
914 u8
*tail
= tail_copy
;
915 s
.AppendF("Tail contains: ");
916 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++) s
.AppendF(".. ");
917 for (uptr i
= 0; i
< tail_size
; i
++) s
.AppendF("%02x ", tail
[i
]);
919 s
.AppendF("Expected: ");
920 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++) s
.AppendF(".. ");
921 for (uptr i
= 0; i
< tail_size
; i
++) s
.AppendF("%02x ", actual_expected
[i
]);
924 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++) s
.AppendF(" ");
925 for (uptr i
= 0; i
< tail_size
; i
++)
926 s
.AppendF("%s ", actual_expected
[i
] != tail
[i
] ? "^^" : " ");
929 "\nThis error occurs when a buffer overflow overwrites memory\n"
930 "after a heap object, but within the %zd-byte granule, e.g.\n"
931 " char *x = new char[20];\n"
933 "%s does not detect such bugs in uninstrumented code at the time of "
935 "\nbut can detect them at the time of free/delete.\n"
936 "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
937 kShadowAlignment
, SanitizerToolName
);
938 Printf("%s", s
.data());
939 GetCurrentThread()->Announce();
940 PrintTags(untagged_addr
);
941 MaybePrintAndroidHelpUrl();
942 ReportErrorSummary(bug_type
, stack
);
945 class TagMismatchReport
: public BaseReport
{
947 explicit TagMismatchReport(StackTrace
*stack
, uptr tagged_addr
,
948 uptr access_size
, bool is_store
, bool fatal
,
949 uptr
*registers_frame
)
950 : BaseReport(stack
, fatal
, tagged_addr
, access_size
),
952 registers_frame(registers_frame
) {}
953 ~TagMismatchReport();
957 const uptr
*registers_frame
;
960 TagMismatchReport::~TagMismatchReport() {
962 // TODO: when possible, try to print heap-use-after-free, etc.
963 const char *bug_type
= "tag-mismatch";
964 uptr pc
= GetTopPc(stack
);
965 Printf("%s", d
.Error());
966 Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName
, bug_type
,
969 Thread
*t
= GetCurrentThread();
971 tag_t mem_tag
= GetTagCopy(MemToShadow(untagged_addr
+ mismatch_offset
));
973 Printf("%s", d
.Access());
974 if (mem_tag
&& mem_tag
< kShadowAlignment
) {
976 GetShortTagCopy(MemToShadow(untagged_addr
+ mismatch_offset
));
978 "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
979 is_store
? "WRITE" : "READ", access_size
, untagged_addr
, ptr_tag
,
980 mem_tag
, short_tag
, t
->unique_id());
982 Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
983 is_store
? "WRITE" : "READ", access_size
, untagged_addr
, ptr_tag
,
984 mem_tag
, t
->unique_id());
987 Printf("Invalid access starting at offset %zu\n", mismatch_offset
);
988 Printf("%s", d
.Default());
992 PrintAddressDescription();
995 PrintTags(untagged_addr
+ mismatch_offset
);
998 ReportRegisters(registers_frame
, pc
);
1000 MaybePrintAndroidHelpUrl();
1001 ReportErrorSummary(bug_type
, stack
);
1005 void ReportInvalidFree(StackTrace
*stack
, uptr tagged_addr
) {
1006 InvalidFreeReport
R(stack
, tagged_addr
);
1009 void ReportTailOverwritten(StackTrace
*stack
, uptr tagged_addr
, uptr orig_size
,
1010 const u8
*expected
) {
1011 TailOverwrittenReport
R(stack
, tagged_addr
, orig_size
, expected
);
1014 void ReportTagMismatch(StackTrace
*stack
, uptr tagged_addr
, uptr access_size
,
1015 bool is_store
, bool fatal
, uptr
*registers_frame
) {
1016 TagMismatchReport
R(stack
, tagged_addr
, access_size
, is_store
, fatal
,
1020 // See the frame breakdown defined in __hwasan_tag_mismatch (from
1021 // hwasan_tag_mismatch_{aarch64,riscv64}.S).
1022 void ReportRegisters(const uptr
*frame
, uptr pc
) {
1023 Printf("Registers where the failure occurred (pc %p):\n", pc
);
1025 // We explicitly print a single line (4 registers/line) each iteration to
1026 // reduce the amount of logcat error messages printed. Each Printf() will
1027 // result in a new logcat line, irrespective of whether a newline is present,
1028 // and so we wish to reduce the number of Printf() calls we have to make.
1029 #if defined(__aarch64__)
1030 Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
1031 frame
[0], frame
[1], frame
[2], frame
[3]);
1032 #elif SANITIZER_RISCV64
1033 Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n",
1034 reinterpret_cast<const u8
*>(frame
) + 256, frame
[1], frame
[2],
1037 Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
1038 frame
[4], frame
[5], frame
[6], frame
[7]);
1039 Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
1040 frame
[8], frame
[9], frame
[10], frame
[11]);
1041 Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
1042 frame
[12], frame
[13], frame
[14], frame
[15]);
1043 Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
1044 frame
[16], frame
[17], frame
[18], frame
[19]);
1045 Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
1046 frame
[20], frame
[21], frame
[22], frame
[23]);
1047 Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
1048 frame
[24], frame
[25], frame
[26], frame
[27]);
1049 // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
1050 // passes it to this function.
1051 #if defined(__aarch64__)
1052 Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame
[28],
1053 frame
[29], frame
[30], reinterpret_cast<const u8
*>(frame
) + 256);
1054 #elif SANITIZER_RISCV64
1055 Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame
[28],
1056 frame
[29], frame
[30], frame
[31]);
1061 } // namespace __hwasan
1063 void __hwasan_set_error_report_callback(void (*callback
)(const char *)) {
1064 __hwasan::ScopedReport::SetErrorReportCallback(callback
);