1 //===-- hwasan_report.cpp -------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of HWAddressSanitizer.
12 //===----------------------------------------------------------------------===//
14 #include "hwasan_report.h"
19 #include "hwasan_allocator.h"
20 #include "hwasan_globals.h"
21 #include "hwasan_mapping.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_thread_list.h"
24 #include "sanitizer_common/sanitizer_allocator_internal.h"
25 #include "sanitizer_common/sanitizer_common.h"
26 #include "sanitizer_common/sanitizer_flags.h"
27 #include "sanitizer_common/sanitizer_mutex.h"
28 #include "sanitizer_common/sanitizer_report_decorator.h"
29 #include "sanitizer_common/sanitizer_stackdepot.h"
30 #include "sanitizer_common/sanitizer_stacktrace_printer.h"
31 #include "sanitizer_common/sanitizer_symbolizer.h"
33 using namespace __sanitizer
;
39 ScopedReport(bool fatal
= false) : error_message_(1), fatal(fatal
) {
40 Lock
lock(&error_message_lock_
);
41 error_message_ptr_
= fatal
? &error_message_
: nullptr;
42 ++hwasan_report_count
;
46 void (*report_cb
)(const char *);
48 Lock
lock(&error_message_lock_
);
49 report_cb
= error_report_callback_
;
50 error_message_ptr_
= nullptr;
53 report_cb(error_message_
.data());
55 SetAbortMessage(error_message_
.data());
56 if (common_flags()->print_module_map
>= 2 ||
57 (fatal
&& common_flags()->print_module_map
))
63 static void MaybeAppendToErrorMessage(const char *msg
) {
64 Lock
lock(&error_message_lock_
);
65 if (!error_message_ptr_
)
67 uptr len
= internal_strlen(msg
);
68 uptr old_size
= error_message_ptr_
->size();
69 error_message_ptr_
->resize(old_size
+ len
);
70 // overwrite old trailing '\0', keep new trailing '\0' untouched.
71 internal_memcpy(&(*error_message_ptr_
)[old_size
- 1], msg
, len
);
74 static void SetErrorReportCallback(void (*callback
)(const char *)) {
75 Lock
lock(&error_message_lock_
);
76 error_report_callback_
= callback
;
80 ScopedErrorReportLock error_report_lock_
;
81 InternalMmapVector
<char> error_message_
;
84 static InternalMmapVector
<char> *error_message_ptr_
;
85 static Mutex error_message_lock_
;
86 static void (*error_report_callback_
)(const char *);
89 InternalMmapVector
<char> *ScopedReport::error_message_ptr_
;
90 Mutex
ScopedReport::error_message_lock_
;
91 void (*ScopedReport::error_report_callback_
)(const char *);
93 // If there is an active ScopedReport, append to its error message.
94 void AppendToErrorMessageBuffer(const char *buffer
) {
95 ScopedReport::MaybeAppendToErrorMessage(buffer
);
98 static StackTrace
GetStackTraceFromId(u32 id
) {
100 StackTrace res
= StackDepotGet(id
);
105 // A RAII object that holds a copy of the current thread stack ring buffer.
106 // The actual stack buffer may change while we are iterating over it (for
107 // example, Printf may call syslog() which can itself be built with hwasan).
108 class SavedStackAllocations
{
110 SavedStackAllocations(StackAllocationsRingBuffer
*rb
) {
111 uptr size
= rb
->size() * sizeof(uptr
);
113 MmapAlignedOrDieOnFatalError(size
, size
* 2, "saved stack allocations");
114 new (&rb_
) StackAllocationsRingBuffer(*rb
, storage
);
117 ~SavedStackAllocations() {
118 StackAllocationsRingBuffer
*rb
= get();
119 UnmapOrDie(rb
->StartOfStorage(), rb
->size() * sizeof(uptr
));
122 StackAllocationsRingBuffer
*get() {
123 return (StackAllocationsRingBuffer
*)&rb_
;
130 class Decorator
: public __sanitizer::SanitizerCommonDecorator
{
132 Decorator() : SanitizerCommonDecorator() { }
133 const char *Access() { return Blue(); }
134 const char *Allocation() const { return Magenta(); }
135 const char *Origin() const { return Magenta(); }
136 const char *Name() const { return Green(); }
137 const char *Location() { return Green(); }
138 const char *Thread() { return Green(); }
141 static bool FindHeapAllocation(HeapAllocationsRingBuffer
*rb
, uptr tagged_addr
,
142 HeapAllocationRecord
*har
, uptr
*ring_index
,
143 uptr
*num_matching_addrs
,
144 uptr
*num_matching_addrs_4b
) {
145 if (!rb
) return false;
147 *num_matching_addrs
= 0;
148 *num_matching_addrs_4b
= 0;
149 for (uptr i
= 0, size
= rb
->size(); i
< size
; i
++) {
151 if (h
.tagged_addr
<= tagged_addr
&&
152 h
.tagged_addr
+ h
.requested_size
> tagged_addr
) {
158 // Measure the number of heap ring buffer entries that would have matched
159 // if we had only one entry per address (e.g. if the ring buffer data was
160 // stored at the address itself). This will help us tune the allocator
161 // implementation for MTE.
162 if (UntagAddr(h
.tagged_addr
) <= UntagAddr(tagged_addr
) &&
163 UntagAddr(h
.tagged_addr
) + h
.requested_size
> UntagAddr(tagged_addr
)) {
164 ++*num_matching_addrs
;
167 // Measure the number of heap ring buffer entries that would have matched
168 // if we only had 4 tag bits, which is the case for MTE.
169 auto untag_4b
= [](uptr p
) {
170 return p
& ((1ULL << 60) - 1);
172 if (untag_4b(h
.tagged_addr
) <= untag_4b(tagged_addr
) &&
173 untag_4b(h
.tagged_addr
) + h
.requested_size
> untag_4b(tagged_addr
)) {
174 ++*num_matching_addrs_4b
;
180 static void PrintStackAllocations(StackAllocationsRingBuffer
*sa
,
181 tag_t addr_tag
, uptr untagged_addr
) {
182 uptr frames
= Min((uptr
)flags()->stack_history_size
, sa
->size());
183 bool found_local
= false;
184 for (uptr i
= 0; i
< frames
; i
++) {
185 const uptr
*record_addr
= &(*sa
)[i
];
186 uptr record
= *record_addr
;
190 reinterpret_cast<uptr
>(record_addr
) >> kRecordAddrBaseTagShift
;
191 uptr fp
= (record
>> kRecordFPShift
) << kRecordFPLShift
;
192 uptr pc_mask
= (1ULL << kRecordFPShift
) - 1;
193 uptr pc
= record
& pc_mask
;
195 if (Symbolizer::GetOrInit()->SymbolizeFrame(pc
, &frame
)) {
196 for (LocalInfo
&local
: frame
.locals
) {
197 if (!local
.has_frame_offset
|| !local
.has_size
|| !local
.has_tag_offset
)
199 tag_t obj_tag
= base_tag
^ local
.tag_offset
;
200 if (obj_tag
!= addr_tag
)
202 // Calculate the offset from the object address to the faulting
203 // address. Because we only store bits 4-19 of FP (bits 0-3 are
204 // guaranteed to be zero), the calculation is performed mod 2^20 and may
205 // harmlessly underflow if the address mod 2^20 is below the object
208 (untagged_addr
- fp
- local
.frame_offset
) & (kRecordFPModulus
- 1);
209 if (obj_offset
>= local
.size
)
212 Printf("Potentially referenced stack objects:\n");
215 Printf(" %s in %s %s:%d\n", local
.name
, local
.function_name
,
216 local
.decl_file
, local
.decl_line
);
225 // We didn't find any locals. Most likely we don't have symbols, so dump
226 // the information that we have for offline analysis.
227 InternalScopedString frame_desc
;
228 Printf("Previously allocated frames:\n");
229 for (uptr i
= 0; i
< frames
; i
++) {
230 const uptr
*record_addr
= &(*sa
)[i
];
231 uptr record
= *record_addr
;
234 uptr pc_mask
= (1ULL << 48) - 1;
235 uptr pc
= record
& pc_mask
;
236 frame_desc
.append(" record_addr:0x%zx record:0x%zx",
237 reinterpret_cast<uptr
>(record_addr
), record
);
238 if (SymbolizedStack
*frame
= Symbolizer::GetOrInit()->SymbolizePC(pc
)) {
239 RenderFrame(&frame_desc
, " %F %L", 0, frame
->info
.address
, &frame
->info
,
240 common_flags()->symbolize_vs_style
,
241 common_flags()->strip_path_prefix
);
244 Printf("%s\n", frame_desc
.data());
249 // Returns true if tag == *tag_ptr, reading tags from short granules if
250 // necessary. This may return a false positive if tags 1-15 are used as a
251 // regular tag rather than a short granule marker.
252 static bool TagsEqual(tag_t tag
, tag_t
*tag_ptr
) {
255 if (*tag_ptr
== 0 || *tag_ptr
> kShadowAlignment
- 1)
257 uptr mem
= ShadowToMem(reinterpret_cast<uptr
>(tag_ptr
));
258 tag_t inline_tag
= *reinterpret_cast<tag_t
*>(mem
+ kShadowAlignment
- 1);
259 return tag
== inline_tag
;
262 // HWASan globals store the size of the global in the descriptor. In cases where
263 // we don't have a binary with symbols, we can't grab the size of the global
264 // from the debug info - but we might be able to retrieve it from the
265 // descriptor. Returns zero if the lookup failed.
266 static uptr
GetGlobalSizeFromDescriptor(uptr ptr
) {
267 // Find the ELF object that this global resides in.
269 if (dladdr(reinterpret_cast<void *>(ptr
), &info
) == 0)
271 auto *ehdr
= reinterpret_cast<const ElfW(Ehdr
) *>(info
.dli_fbase
);
272 auto *phdr_begin
= reinterpret_cast<const ElfW(Phdr
) *>(
273 reinterpret_cast<const u8
*>(ehdr
) + ehdr
->e_phoff
);
275 // Get the load bias. This is normally the same as the dli_fbase address on
276 // position-independent code, but can be different on non-PIE executables,
277 // binaries using LLD's partitioning feature, or binaries compiled with a
279 ElfW(Addr
) load_bias
= 0;
280 for (const auto &phdr
:
281 ArrayRef
<const ElfW(Phdr
)>(phdr_begin
, phdr_begin
+ ehdr
->e_phnum
)) {
282 if (phdr
.p_type
!= PT_LOAD
|| phdr
.p_offset
!= 0)
284 load_bias
= reinterpret_cast<ElfW(Addr
)>(ehdr
) - phdr
.p_vaddr
;
288 // Walk all globals in this ELF object, looking for the one we're interested
289 // in. Once we find it, we can stop iterating and return the size of the
290 // global we're interested in.
291 for (const hwasan_global
&global
:
292 HwasanGlobalsFor(load_bias
, phdr_begin
, ehdr
->e_phnum
))
293 if (global
.addr() <= ptr
&& ptr
< global
.addr() + global
.size())
294 return global
.size();
299 static void ShowHeapOrGlobalCandidate(uptr untagged_addr
, tag_t
*candidate
,
300 tag_t
*left
, tag_t
*right
) {
302 uptr mem
= ShadowToMem(reinterpret_cast<uptr
>(candidate
));
303 HwasanChunkView chunk
= FindHeapChunkByAddress(mem
);
304 if (chunk
.IsAllocated()) {
307 if (untagged_addr
< chunk
.End() && untagged_addr
>= chunk
.Beg()) {
308 offset
= untagged_addr
- chunk
.Beg();
310 } else if (candidate
== left
) {
311 offset
= untagged_addr
- chunk
.End();
312 whence
= "to the right of";
314 offset
= chunk
.Beg() - untagged_addr
;
315 whence
= "to the left of";
317 Printf("%s", d
.Error());
318 Printf("\nCause: heap-buffer-overflow\n");
319 Printf("%s", d
.Default());
320 Printf("%s", d
.Location());
321 Printf("%p is located %zd bytes %s %zd-byte region [%p,%p)\n",
322 untagged_addr
, offset
, whence
, chunk
.UsedSize(), chunk
.Beg(),
324 Printf("%s", d
.Allocation());
325 Printf("allocated here:\n");
326 Printf("%s", d
.Default());
327 GetStackTraceFromId(chunk
.GetAllocStackId()).Print();
330 // Check whether the address points into a loaded library. If so, this is
331 // most likely a global variable.
332 const char *module_name
;
334 Symbolizer
*sym
= Symbolizer::GetOrInit();
335 if (sym
->GetModuleNameAndOffsetForPC(mem
, &module_name
, &module_address
)) {
336 Printf("%s", d
.Error());
337 Printf("\nCause: global-overflow\n");
338 Printf("%s", d
.Default());
340 Printf("%s", d
.Location());
341 if (sym
->SymbolizeData(mem
, &info
) && info
.start
) {
343 "%p is located %zd bytes to the %s of %zd-byte global variable "
344 "%s [%p,%p) in %s\n",
346 candidate
== left
? untagged_addr
- (info
.start
+ info
.size
)
347 : info
.start
- untagged_addr
,
348 candidate
== left
? "right" : "left", info
.size
, info
.name
,
349 info
.start
, info
.start
+ info
.size
, module_name
);
351 uptr size
= GetGlobalSizeFromDescriptor(mem
);
353 // We couldn't find the size of the global from the descriptors.
355 "%p is located to the %s of a global variable in "
356 "\n #0 0x%x (%s+0x%x)\n",
357 untagged_addr
, candidate
== left
? "right" : "left", mem
,
358 module_name
, module_address
);
361 "%p is located to the %s of a %zd-byte global variable in "
362 "\n #0 0x%x (%s+0x%x)\n",
363 untagged_addr
, candidate
== left
? "right" : "left", size
, mem
,
364 module_name
, module_address
);
366 Printf("%s", d
.Default());
370 void PrintAddressDescription(
371 uptr tagged_addr
, uptr access_size
,
372 StackAllocationsRingBuffer
*current_stack_allocations
) {
374 int num_descriptions_printed
= 0;
375 uptr untagged_addr
= UntagAddr(tagged_addr
);
377 if (MemIsShadow(untagged_addr
)) {
378 Printf("%s%p is HWAsan shadow memory.\n%s", d
.Location(), untagged_addr
,
383 // Print some very basic information about the address, if it's a heap.
384 HwasanChunkView chunk
= FindHeapChunkByAddress(untagged_addr
);
385 if (uptr beg
= chunk
.Beg()) {
386 uptr size
= chunk
.ActualSize();
387 Printf("%s[%p,%p) is a %s %s heap chunk; "
388 "size: %zd offset: %zd\n%s",
391 chunk
.FromSmallHeap() ? "small" : "large",
392 chunk
.IsAllocated() ? "allocated" : "unallocated",
393 size
, untagged_addr
- beg
,
397 tag_t addr_tag
= GetTagFromPointer(tagged_addr
);
399 bool on_stack
= false;
400 // Check stack first. If the address is on the stack of a live thread, we
401 // know it cannot be a heap / global overflow.
402 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) {
403 if (t
->AddrIsInStack(untagged_addr
)) {
405 // TODO(fmayer): figure out how to distinguish use-after-return and
406 // stack-buffer-overflow.
407 Printf("%s", d
.Error());
408 Printf("\nCause: stack tag-mismatch\n");
409 Printf("%s", d
.Location());
410 Printf("Address %p is located in stack of thread T%zd\n", untagged_addr
,
412 Printf("%s", d
.Default());
415 auto *sa
= (t
== GetCurrentThread() && current_stack_allocations
)
416 ? current_stack_allocations
417 : t
->stack_allocations();
418 PrintStackAllocations(sa
, addr_tag
, untagged_addr
);
419 num_descriptions_printed
++;
423 // Check if this looks like a heap buffer overflow by scanning
424 // the shadow left and right and looking for the first adjacent
425 // object with a different memory tag. If that tag matches addr_tag,
426 // check the allocator if it has a live chunk there.
427 tag_t
*tag_ptr
= reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
));
428 tag_t
*candidate
= nullptr, *left
= tag_ptr
, *right
= tag_ptr
;
429 uptr candidate_distance
= 0;
430 for (; candidate_distance
< 1000; candidate_distance
++) {
431 if (MemIsShadow(reinterpret_cast<uptr
>(left
)) &&
432 TagsEqual(addr_tag
, left
)) {
437 if (MemIsShadow(reinterpret_cast<uptr
>(right
)) &&
438 TagsEqual(addr_tag
, right
)) {
445 constexpr auto kCloseCandidateDistance
= 1;
447 if (!on_stack
&& candidate
&& candidate_distance
<= kCloseCandidateDistance
) {
448 ShowHeapOrGlobalCandidate(untagged_addr
, candidate
, left
, right
);
449 num_descriptions_printed
++;
452 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) {
453 // Scan all threads' ring buffers to find if it's a heap-use-after-free.
454 HeapAllocationRecord har
;
455 uptr ring_index
, num_matching_addrs
, num_matching_addrs_4b
;
456 if (FindHeapAllocation(t
->heap_allocations(), tagged_addr
, &har
,
457 &ring_index
, &num_matching_addrs
,
458 &num_matching_addrs_4b
)) {
459 Printf("%s", d
.Error());
460 Printf("\nCause: use-after-free\n");
461 Printf("%s", d
.Location());
462 Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
463 untagged_addr
, untagged_addr
- UntagAddr(har
.tagged_addr
),
464 har
.requested_size
, UntagAddr(har
.tagged_addr
),
465 UntagAddr(har
.tagged_addr
) + har
.requested_size
);
466 Printf("%s", d
.Allocation());
467 Printf("freed by thread T%zd here:\n", t
->unique_id());
468 Printf("%s", d
.Default());
469 GetStackTraceFromId(har
.free_context_id
).Print();
471 Printf("%s", d
.Allocation());
472 Printf("previously allocated here:\n", t
);
473 Printf("%s", d
.Default());
474 GetStackTraceFromId(har
.alloc_context_id
).Print();
476 // Print a developer note: the index of this heap object
477 // in the thread's deallocation ring buffer.
478 Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index
+ 1,
479 flags()->heap_history_size
);
480 Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs
);
481 Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
482 num_matching_addrs_4b
);
485 num_descriptions_printed
++;
489 if (candidate
&& num_descriptions_printed
== 0) {
490 ShowHeapOrGlobalCandidate(untagged_addr
, candidate
, left
, right
);
491 num_descriptions_printed
++;
494 // Print the remaining threads, as an extra information, 1 line per thread.
495 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) { t
->Announce(); });
497 if (!num_descriptions_printed
)
498 // We exhausted our possibilities. Bail out.
499 Printf("HWAddressSanitizer can not describe address in more detail.\n");
500 if (num_descriptions_printed
> 1) {
502 "There are %d potential causes, printed above in order "
504 num_descriptions_printed
);
508 void ReportStats() {}
510 static void PrintTagInfoAroundAddr(tag_t
*tag_ptr
, uptr num_rows
,
511 void (*print_tag
)(InternalScopedString
&s
,
513 const uptr row_len
= 16; // better be power of two.
514 tag_t
*center_row_beg
= reinterpret_cast<tag_t
*>(
515 RoundDownTo(reinterpret_cast<uptr
>(tag_ptr
), row_len
));
516 tag_t
*beg_row
= center_row_beg
- row_len
* (num_rows
/ 2);
517 tag_t
*end_row
= center_row_beg
+ row_len
* ((num_rows
+ 1) / 2);
518 InternalScopedString s
;
519 for (tag_t
*row
= beg_row
; row
< end_row
; row
+= row_len
) {
520 s
.append("%s", row
== center_row_beg
? "=>" : " ");
521 s
.append("%p:", (void *)row
);
522 for (uptr i
= 0; i
< row_len
; i
++) {
523 s
.append("%s", row
+ i
== tag_ptr
? "[" : " ");
524 print_tag(s
, &row
[i
]);
525 s
.append("%s", row
+ i
== tag_ptr
? "]" : " ");
529 Printf("%s", s
.data());
532 static void PrintTagsAroundAddr(tag_t
*tag_ptr
) {
534 "Memory tags around the buggy address (one tag corresponds to %zd "
535 "bytes):\n", kShadowAlignment
);
536 PrintTagInfoAroundAddr(tag_ptr
, 17, [](InternalScopedString
&s
, tag_t
*tag
) {
537 s
.append("%02x", *tag
);
541 "Tags for short granules around the buggy address (one tag corresponds "
544 PrintTagInfoAroundAddr(tag_ptr
, 3, [](InternalScopedString
&s
, tag_t
*tag
) {
545 if (*tag
>= 1 && *tag
<= kShadowAlignment
) {
546 uptr granule_addr
= ShadowToMem(reinterpret_cast<uptr
>(tag
));
548 *reinterpret_cast<u8
*>(granule_addr
+ kShadowAlignment
- 1));
555 "https://clang.llvm.org/docs/"
556 "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
557 "description of short granule tags\n");
560 uptr
GetTopPc(StackTrace
*stack
) {
561 return stack
->size
? StackTrace::GetPreviousInstructionPc(stack
->trace
[0])
565 void ReportInvalidFree(StackTrace
*stack
, uptr tagged_addr
) {
566 ScopedReport
R(flags()->halt_on_error
);
568 uptr untagged_addr
= UntagAddr(tagged_addr
);
569 tag_t ptr_tag
= GetTagFromPointer(tagged_addr
);
570 tag_t
*tag_ptr
= nullptr;
572 if (MemIsApp(untagged_addr
)) {
573 tag_ptr
= reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
));
574 if (MemIsShadow(reinterpret_cast<uptr
>(tag_ptr
)))
580 Printf("%s", d
.Error());
581 uptr pc
= GetTopPc(stack
);
582 const char *bug_type
= "invalid-free";
583 const Thread
*thread
= GetCurrentThread();
585 Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
586 SanitizerToolName
, bug_type
, untagged_addr
, pc
, thread
->unique_id());
588 Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
589 SanitizerToolName
, bug_type
, untagged_addr
, pc
);
591 Printf("%s", d
.Access());
593 Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag
, mem_tag
);
594 Printf("%s", d
.Default());
598 PrintAddressDescription(tagged_addr
, 0, nullptr);
601 PrintTagsAroundAddr(tag_ptr
);
603 ReportErrorSummary(bug_type
, stack
);
606 void ReportTailOverwritten(StackTrace
*stack
, uptr tagged_addr
, uptr orig_size
,
607 const u8
*expected
) {
608 uptr tail_size
= kShadowAlignment
- (orig_size
% kShadowAlignment
);
609 u8 actual_expected
[kShadowAlignment
];
610 internal_memcpy(actual_expected
, expected
, tail_size
);
611 tag_t ptr_tag
= GetTagFromPointer(tagged_addr
);
612 // Short granule is stashed in the last byte of the magic string. To avoid
613 // confusion, make the expected magic string contain the short granule tag.
614 if (orig_size
% kShadowAlignment
!= 0) {
615 actual_expected
[tail_size
- 1] = ptr_tag
;
618 ScopedReport
R(flags()->halt_on_error
);
620 uptr untagged_addr
= UntagAddr(tagged_addr
);
621 Printf("%s", d
.Error());
622 const char *bug_type
= "allocation-tail-overwritten";
623 Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName
,
624 bug_type
, untagged_addr
, untagged_addr
+ orig_size
, orig_size
);
625 Printf("\n%s", d
.Default());
627 "Stack of invalid access unknown. Issue detected at deallocation "
629 Printf("%s", d
.Allocation());
630 Printf("deallocated here:\n");
631 Printf("%s", d
.Default());
633 HwasanChunkView chunk
= FindHeapChunkByAddress(untagged_addr
);
635 Printf("%s", d
.Allocation());
636 Printf("allocated here:\n");
637 Printf("%s", d
.Default());
638 GetStackTraceFromId(chunk
.GetAllocStackId()).Print();
641 InternalScopedString s
;
642 CHECK_GT(tail_size
, 0U);
643 CHECK_LT(tail_size
, kShadowAlignment
);
644 u8
*tail
= reinterpret_cast<u8
*>(untagged_addr
+ orig_size
);
645 s
.append("Tail contains: ");
646 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++)
648 for (uptr i
= 0; i
< tail_size
; i
++)
649 s
.append("%02x ", tail
[i
]);
651 s
.append("Expected: ");
652 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++)
654 for (uptr i
= 0; i
< tail_size
; i
++) s
.append("%02x ", actual_expected
[i
]);
657 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++)
659 for (uptr i
= 0; i
< tail_size
; i
++)
660 s
.append("%s ", actual_expected
[i
] != tail
[i
] ? "^^" : " ");
662 s
.append("\nThis error occurs when a buffer overflow overwrites memory\n"
663 "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
664 " char *x = new char[20];\n"
666 "%s does not detect such bugs in uninstrumented code at the time of write,"
667 "\nbut can detect them at the time of free/delete.\n"
668 "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
669 kShadowAlignment
, SanitizerToolName
);
670 Printf("%s", s
.data());
671 GetCurrentThread()->Announce();
673 tag_t
*tag_ptr
= reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
));
674 PrintTagsAroundAddr(tag_ptr
);
676 ReportErrorSummary(bug_type
, stack
);
679 void ReportTagMismatch(StackTrace
*stack
, uptr tagged_addr
, uptr access_size
,
680 bool is_store
, bool fatal
, uptr
*registers_frame
) {
681 ScopedReport
R(fatal
);
682 SavedStackAllocations
current_stack_allocations(
683 GetCurrentThread()->stack_allocations());
686 uptr untagged_addr
= UntagAddr(tagged_addr
);
687 // TODO: when possible, try to print heap-use-after-free, etc.
688 const char *bug_type
= "tag-mismatch";
689 uptr pc
= GetTopPc(stack
);
690 Printf("%s", d
.Error());
691 Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName
, bug_type
,
694 Thread
*t
= GetCurrentThread();
697 __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr
), access_size
);
698 CHECK(offset
>= 0 && offset
< static_cast<sptr
>(access_size
));
699 tag_t ptr_tag
= GetTagFromPointer(tagged_addr
);
701 reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
+ offset
));
702 tag_t mem_tag
= *tag_ptr
;
704 Printf("%s", d
.Access());
705 if (mem_tag
&& mem_tag
< kShadowAlignment
) {
706 tag_t
*granule_ptr
= reinterpret_cast<tag_t
*>((untagged_addr
+ offset
) &
707 ~(kShadowAlignment
- 1));
708 // If offset is 0, (untagged_addr + offset) is not aligned to granules.
709 // This is the offset of the leftmost accessed byte within the bad granule.
710 u8 in_granule_offset
= (untagged_addr
+ offset
) & (kShadowAlignment
- 1);
711 tag_t short_tag
= granule_ptr
[kShadowAlignment
- 1];
712 // The first mismatch was a short granule that matched the ptr_tag.
713 if (short_tag
== ptr_tag
) {
714 // If the access starts after the end of the short granule, then the first
715 // bad byte is the first byte of the access; otherwise it is the first
716 // byte past the end of the short granule
717 if (mem_tag
> in_granule_offset
) {
718 offset
+= mem_tag
- in_granule_offset
;
722 "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
723 is_store
? "WRITE" : "READ", access_size
, untagged_addr
, ptr_tag
,
724 mem_tag
, short_tag
, t
->unique_id());
726 Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
727 is_store
? "WRITE" : "READ", access_size
, untagged_addr
, ptr_tag
,
728 mem_tag
, t
->unique_id());
731 Printf("Invalid access starting at offset %zu\n", offset
);
732 Printf("%s", d
.Default());
736 PrintAddressDescription(tagged_addr
, access_size
,
737 current_stack_allocations
.get());
740 PrintTagsAroundAddr(tag_ptr
);
743 ReportRegisters(registers_frame
, pc
);
745 ReportErrorSummary(bug_type
, stack
);
748 // See the frame breakdown defined in __hwasan_tag_mismatch (from
749 // hwasan_tag_mismatch_aarch64.S).
750 void ReportRegisters(uptr
*frame
, uptr pc
) {
751 Printf("Registers where the failure occurred (pc %p):\n", pc
);
753 // We explicitly print a single line (4 registers/line) each iteration to
754 // reduce the amount of logcat error messages printed. Each Printf() will
755 // result in a new logcat line, irrespective of whether a newline is present,
756 // and so we wish to reduce the number of Printf() calls we have to make.
757 Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
758 frame
[0], frame
[1], frame
[2], frame
[3]);
759 Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
760 frame
[4], frame
[5], frame
[6], frame
[7]);
761 Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
762 frame
[8], frame
[9], frame
[10], frame
[11]);
763 Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
764 frame
[12], frame
[13], frame
[14], frame
[15]);
765 Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
766 frame
[16], frame
[17], frame
[18], frame
[19]);
767 Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
768 frame
[20], frame
[21], frame
[22], frame
[23]);
769 Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
770 frame
[24], frame
[25], frame
[26], frame
[27]);
771 // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
772 // passes it to this function.
773 Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame
[28],
774 frame
[29], frame
[30], reinterpret_cast<u8
*>(frame
) + 256);
777 } // namespace __hwasan
779 void __hwasan_set_error_report_callback(void (*callback
)(const char *)) {
780 __hwasan::ScopedReport::SetErrorReportCallback(callback
);