1 //===-- asan_report.cpp ---------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // This file contains error reporting code.
12 //===----------------------------------------------------------------------===//
14 #include "asan_report.h"
16 #include "asan_descriptions.h"
17 #include "asan_errors.h"
18 #include "asan_flags.h"
19 #include "asan_internal.h"
20 #include "asan_mapping.h"
21 #include "asan_scariness_score.h"
22 #include "asan_stack.h"
23 #include "asan_thread.h"
24 #include "sanitizer_common/sanitizer_common.h"
25 #include "sanitizer_common/sanitizer_flags.h"
26 #include "sanitizer_common/sanitizer_interface_internal.h"
27 #include "sanitizer_common/sanitizer_report_decorator.h"
28 #include "sanitizer_common/sanitizer_stackdepot.h"
29 #include "sanitizer_common/sanitizer_symbolizer.h"
33 // -------------------- User-specified callbacks ----------------- {{{1
34 static void (*error_report_callback
)(const char*);
35 static char *error_message_buffer
= nullptr;
36 static uptr error_message_buffer_pos
= 0;
37 static Mutex error_message_buf_mutex
;
38 static const unsigned kAsanBuggyPcPoolSize
= 25;
39 static __sanitizer::atomic_uintptr_t AsanBuggyPcPool
[kAsanBuggyPcPoolSize
];
41 void AppendToErrorMessageBuffer(const char *buffer
) {
42 Lock
l(&error_message_buf_mutex
);
43 if (!error_message_buffer
) {
44 error_message_buffer
=
45 (char*)MmapOrDieQuietly(kErrorMessageBufferSize
, __func__
);
46 error_message_buffer_pos
= 0;
48 uptr length
= internal_strlen(buffer
);
49 RAW_CHECK(kErrorMessageBufferSize
>= error_message_buffer_pos
);
50 uptr remaining
= kErrorMessageBufferSize
- error_message_buffer_pos
;
51 internal_strncpy(error_message_buffer
+ error_message_buffer_pos
,
53 error_message_buffer
[kErrorMessageBufferSize
- 1] = '\0';
54 // FIXME: reallocate the buffer instead of truncating the message.
55 error_message_buffer_pos
+= Min(remaining
, length
);
58 // ---------------------- Helper functions ----------------------- {{{1
60 void PrintMemoryByte(InternalScopedString
*str
, const char *before
, u8 byte
,
61 bool in_shadow
, const char *after
) {
63 str
->AppendF("%s%s%x%x%s%s", before
,
64 in_shadow
? d
.ShadowByte(byte
) : d
.MemoryByte(), byte
>> 4,
65 byte
& 15, d
.Default(), after
);
68 static void PrintZoneForPointer(uptr ptr
, uptr zone_ptr
,
69 const char *zone_name
) {
72 Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n", (void *)ptr
,
73 (void *)zone_ptr
, zone_name
);
75 Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n",
76 (void *)ptr
, (void *)zone_ptr
);
79 Printf("malloc_zone_from_ptr(%p) = 0\n", (void *)ptr
);
83 // ---------------------- Address Descriptions ------------------- {{{1
85 bool ParseFrameDescription(const char *frame_descr
,
86 InternalMmapVector
<StackVarDescr
> *vars
) {
89 // This string is created by the compiler and has the following form:
90 // "n alloc_1 alloc_2 ... alloc_n"
91 // where alloc_i looks like "offset size len ObjectName"
92 // or "offset size len ObjectName:line".
93 uptr n_objects
= (uptr
)internal_simple_strtoll(frame_descr
, &p
, 10);
97 for (uptr i
= 0; i
< n_objects
; i
++) {
98 uptr beg
= (uptr
)internal_simple_strtoll(p
, &p
, 10);
99 uptr size
= (uptr
)internal_simple_strtoll(p
, &p
, 10);
100 uptr len
= (uptr
)internal_simple_strtoll(p
, &p
, 10);
101 if (beg
== 0 || size
== 0 || *p
!= ' ') {
105 char *colon_pos
= internal_strchr(p
, ':');
108 if (colon_pos
!= nullptr && colon_pos
< p
+ len
) {
109 name_len
= colon_pos
- p
;
110 line
= (uptr
)internal_simple_strtoll(colon_pos
+ 1, nullptr, 10);
112 StackVarDescr var
= {beg
, size
, p
, name_len
, line
};
113 vars
->push_back(var
);
120 // -------------------- Different kinds of reports ----------------- {{{1
122 // Use ScopedInErrorReport to run common actions just before and
123 // immediately after printing error report.
124 class ScopedInErrorReport
{
126 explicit ScopedInErrorReport(bool fatal
= false)
127 : halt_on_error_(fatal
|| flags()->halt_on_error
) {
128 // Make sure the registry and sanitizer report mutexes are locked while
129 // we're printing an error report.
130 // We can lock them only here to avoid self-deadlock in case of
131 // recursive reports.
132 asanThreadRegistry().Lock();
134 "=================================================================\n");
137 ~ScopedInErrorReport() {
138 if (halt_on_error_
&& !__sanitizer_acquire_crash_state()) {
139 asanThreadRegistry().Unlock();
143 if (current_error_
.IsValid()) current_error_
.Print();
145 // Make sure the current thread is announced.
146 DescribeThread(GetCurrentThread());
147 // We may want to grab this lock again when printing stats.
148 asanThreadRegistry().Unlock();
149 // Print memory stats.
150 if (flags()->print_stats
)
151 __asan_print_accumulated_stats();
153 if (common_flags()->print_cmdline
)
156 if (common_flags()->print_module_map
== 2)
159 // Copy the message buffer so that we could start logging without holding a
160 // lock that gets acquired during printing.
161 InternalMmapVector
<char> buffer_copy(kErrorMessageBufferSize
);
163 Lock
l(&error_message_buf_mutex
);
164 internal_memcpy(buffer_copy
.data(),
165 error_message_buffer
, kErrorMessageBufferSize
);
166 // Clear error_message_buffer so that if we find other errors
167 // we don't re-log this error.
168 error_message_buffer_pos
= 0;
171 LogFullErrorReport(buffer_copy
.data());
173 if (error_report_callback
) {
174 error_report_callback(buffer_copy
.data());
177 if (halt_on_error_
&& common_flags()->abort_on_error
) {
178 // On Android the message is truncated to 512 characters.
179 // FIXME: implement "compact" error format, possibly without, or with
180 // highly compressed stack traces?
181 // FIXME: or just use the summary line as abort message?
182 SetAbortMessage(buffer_copy
.data());
185 // In halt_on_error = false mode, reset the current error object (before
188 internal_memset(¤t_error_
, 0, sizeof(current_error_
));
190 if (halt_on_error_
) {
191 Report("ABORTING\n");
196 void ReportError(const ErrorDescription
&description
) {
197 // Can only report one error per ScopedInErrorReport.
198 CHECK_EQ(current_error_
.kind
, kErrorKindInvalid
);
199 internal_memcpy(¤t_error_
, &description
, sizeof(current_error_
));
202 static ErrorDescription
&CurrentError() {
203 return current_error_
;
207 ScopedErrorReportLock error_report_lock_
;
208 // Error currently being reported. This enables the destructor to interact
209 // with the debugger and point it to an error description.
210 static ErrorDescription current_error_
;
214 ErrorDescription
ScopedInErrorReport::current_error_(LINKER_INITIALIZED
);
216 void ReportDeadlySignal(const SignalContext
&sig
) {
217 ScopedInErrorReport
in_report(/*fatal*/ true);
218 ErrorDeadlySignal
error(GetCurrentTidOrInvalid(), sig
);
219 in_report
.ReportError(error
);
222 void ReportDoubleFree(uptr addr
, BufferedStackTrace
*free_stack
) {
223 ScopedInErrorReport in_report
;
224 ErrorDoubleFree
error(GetCurrentTidOrInvalid(), free_stack
, addr
);
225 in_report
.ReportError(error
);
228 void ReportNewDeleteTypeMismatch(uptr addr
, uptr delete_size
,
229 uptr delete_alignment
,
230 BufferedStackTrace
*free_stack
) {
231 ScopedInErrorReport in_report
;
232 ErrorNewDeleteTypeMismatch
error(GetCurrentTidOrInvalid(), free_stack
, addr
,
233 delete_size
, delete_alignment
);
234 in_report
.ReportError(error
);
237 void ReportFreeNotMalloced(uptr addr
, BufferedStackTrace
*free_stack
) {
238 ScopedInErrorReport in_report
;
239 ErrorFreeNotMalloced
error(GetCurrentTidOrInvalid(), free_stack
, addr
);
240 in_report
.ReportError(error
);
243 void ReportAllocTypeMismatch(uptr addr
, BufferedStackTrace
*free_stack
,
244 AllocType alloc_type
,
245 AllocType dealloc_type
) {
246 ScopedInErrorReport in_report
;
247 ErrorAllocTypeMismatch
error(GetCurrentTidOrInvalid(), free_stack
, addr
,
248 alloc_type
, dealloc_type
);
249 in_report
.ReportError(error
);
252 void ReportMallocUsableSizeNotOwned(uptr addr
, BufferedStackTrace
*stack
) {
253 ScopedInErrorReport in_report
;
254 ErrorMallocUsableSizeNotOwned
error(GetCurrentTidOrInvalid(), stack
, addr
);
255 in_report
.ReportError(error
);
258 void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr
,
259 BufferedStackTrace
*stack
) {
260 ScopedInErrorReport in_report
;
261 ErrorSanitizerGetAllocatedSizeNotOwned
error(GetCurrentTidOrInvalid(), stack
,
263 in_report
.ReportError(error
);
266 void ReportCallocOverflow(uptr count
, uptr size
, BufferedStackTrace
*stack
) {
267 ScopedInErrorReport
in_report(/*fatal*/ true);
268 ErrorCallocOverflow
error(GetCurrentTidOrInvalid(), stack
, count
, size
);
269 in_report
.ReportError(error
);
272 void ReportReallocArrayOverflow(uptr count
, uptr size
,
273 BufferedStackTrace
*stack
) {
274 ScopedInErrorReport
in_report(/*fatal*/ true);
275 ErrorReallocArrayOverflow
error(GetCurrentTidOrInvalid(), stack
, count
, size
);
276 in_report
.ReportError(error
);
279 void ReportPvallocOverflow(uptr size
, BufferedStackTrace
*stack
) {
280 ScopedInErrorReport
in_report(/*fatal*/ true);
281 ErrorPvallocOverflow
error(GetCurrentTidOrInvalid(), stack
, size
);
282 in_report
.ReportError(error
);
285 void ReportInvalidAllocationAlignment(uptr alignment
,
286 BufferedStackTrace
*stack
) {
287 ScopedInErrorReport
in_report(/*fatal*/ true);
288 ErrorInvalidAllocationAlignment
error(GetCurrentTidOrInvalid(), stack
,
290 in_report
.ReportError(error
);
293 void ReportInvalidAlignedAllocAlignment(uptr size
, uptr alignment
,
294 BufferedStackTrace
*stack
) {
295 ScopedInErrorReport
in_report(/*fatal*/ true);
296 ErrorInvalidAlignedAllocAlignment
error(GetCurrentTidOrInvalid(), stack
,
298 in_report
.ReportError(error
);
301 void ReportInvalidPosixMemalignAlignment(uptr alignment
,
302 BufferedStackTrace
*stack
) {
303 ScopedInErrorReport
in_report(/*fatal*/ true);
304 ErrorInvalidPosixMemalignAlignment
error(GetCurrentTidOrInvalid(), stack
,
306 in_report
.ReportError(error
);
309 void ReportAllocationSizeTooBig(uptr user_size
, uptr total_size
, uptr max_size
,
310 BufferedStackTrace
*stack
) {
311 ScopedInErrorReport
in_report(/*fatal*/ true);
312 ErrorAllocationSizeTooBig
error(GetCurrentTidOrInvalid(), stack
, user_size
,
313 total_size
, max_size
);
314 in_report
.ReportError(error
);
317 void ReportRssLimitExceeded(BufferedStackTrace
*stack
) {
318 ScopedInErrorReport
in_report(/*fatal*/ true);
319 ErrorRssLimitExceeded
error(GetCurrentTidOrInvalid(), stack
);
320 in_report
.ReportError(error
);
323 void ReportOutOfMemory(uptr requested_size
, BufferedStackTrace
*stack
) {
324 ScopedInErrorReport
in_report(/*fatal*/ true);
325 ErrorOutOfMemory
error(GetCurrentTidOrInvalid(), stack
, requested_size
);
326 in_report
.ReportError(error
);
329 void ReportStringFunctionMemoryRangesOverlap(const char *function
,
330 const char *offset1
, uptr length1
,
331 const char *offset2
, uptr length2
,
332 BufferedStackTrace
*stack
) {
333 ScopedInErrorReport in_report
;
334 ErrorStringFunctionMemoryRangesOverlap
error(
335 GetCurrentTidOrInvalid(), stack
, (uptr
)offset1
, length1
, (uptr
)offset2
,
337 in_report
.ReportError(error
);
340 void ReportStringFunctionSizeOverflow(uptr offset
, uptr size
,
341 BufferedStackTrace
*stack
) {
342 ScopedInErrorReport in_report
;
343 ErrorStringFunctionSizeOverflow
error(GetCurrentTidOrInvalid(), stack
, offset
,
345 in_report
.ReportError(error
);
348 void ReportBadParamsToAnnotateContiguousContainer(uptr beg
, uptr end
,
349 uptr old_mid
, uptr new_mid
,
350 BufferedStackTrace
*stack
) {
351 ScopedInErrorReport in_report
;
352 ErrorBadParamsToAnnotateContiguousContainer
error(
353 GetCurrentTidOrInvalid(), stack
, beg
, end
, old_mid
, new_mid
);
354 in_report
.ReportError(error
);
357 void ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
358 uptr storage_beg
, uptr storage_end
, uptr old_container_beg
,
359 uptr old_container_end
, uptr new_container_beg
, uptr new_container_end
,
360 BufferedStackTrace
*stack
) {
361 ScopedInErrorReport in_report
;
362 ErrorBadParamsToAnnotateDoubleEndedContiguousContainer
error(
363 GetCurrentTidOrInvalid(), stack
, storage_beg
, storage_end
,
364 old_container_beg
, old_container_end
, new_container_beg
,
366 in_report
.ReportError(error
);
369 void ReportODRViolation(const __asan_global
*g1
, u32 stack_id1
,
370 const __asan_global
*g2
, u32 stack_id2
) {
371 ScopedInErrorReport in_report
;
372 ErrorODRViolation
error(GetCurrentTidOrInvalid(), g1
, stack_id1
, g2
,
374 in_report
.ReportError(error
);
377 // ----------------------- CheckForInvalidPointerPair ----------- {{{1
378 static NOINLINE
void ReportInvalidPointerPair(uptr pc
, uptr bp
, uptr sp
,
380 ScopedInErrorReport in_report
;
381 ErrorInvalidPointerPair
error(GetCurrentTidOrInvalid(), pc
, bp
, sp
, a1
, a2
);
382 in_report
.ReportError(error
);
385 static bool IsInvalidPointerPair(uptr a1
, uptr a2
) {
389 // 256B in shadow memory can be iterated quite fast
390 static const uptr kMaxOffset
= 2048;
392 uptr left
= a1
< a2
? a1
: a2
;
393 uptr right
= a1
< a2
? a2
: a1
;
394 uptr offset
= right
- left
;
395 if (offset
<= kMaxOffset
)
396 return __asan_region_is_poisoned(left
, offset
);
398 AsanThread
*t
= GetCurrentThread();
400 // check whether left is a stack memory pointer
401 if (uptr shadow_offset1
= t
->GetStackVariableShadowStart(left
)) {
402 uptr shadow_offset2
= t
->GetStackVariableShadowStart(right
);
403 return shadow_offset2
== 0 || shadow_offset1
!= shadow_offset2
;
406 // check whether left is a heap memory address
407 HeapAddressDescription hdesc1
, hdesc2
;
408 if (GetHeapAddressInformation(left
, 0, &hdesc1
) &&
409 hdesc1
.chunk_access
.access_type
== kAccessTypeInside
)
410 return !GetHeapAddressInformation(right
, 0, &hdesc2
) ||
411 hdesc2
.chunk_access
.access_type
!= kAccessTypeInside
||
412 hdesc1
.chunk_access
.chunk_begin
!= hdesc2
.chunk_access
.chunk_begin
;
414 // check whether left is an address of a global variable
415 GlobalAddressDescription gdesc1
, gdesc2
;
416 if (GetGlobalAddressInformation(left
, 0, &gdesc1
))
417 return !GetGlobalAddressInformation(right
- 1, 0, &gdesc2
) ||
418 !gdesc1
.PointsInsideTheSameVariable(gdesc2
);
420 if (t
->GetStackVariableShadowStart(right
) ||
421 GetHeapAddressInformation(right
, 0, &hdesc2
) ||
422 GetGlobalAddressInformation(right
- 1, 0, &gdesc2
))
425 // At this point we know nothing about both a1 and a2 addresses.
429 static inline void CheckForInvalidPointerPair(void *p1
, void *p2
) {
430 switch (flags()->detect_invalid_pointer_pairs
) {
434 if (p1
== nullptr || p2
== nullptr)
439 uptr a1
= reinterpret_cast<uptr
>(p1
);
440 uptr a2
= reinterpret_cast<uptr
>(p2
);
442 if (IsInvalidPointerPair(a1
, a2
)) {
444 ReportInvalidPointerPair(pc
, bp
, sp
, a1
, a2
);
447 // ----------------------- Mac-specific reports ----------------- {{{1
449 void ReportMacMzReallocUnknown(uptr addr
, uptr zone_ptr
, const char *zone_name
,
450 BufferedStackTrace
*stack
) {
451 ScopedInErrorReport in_report
;
453 "mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
454 "This is an unrecoverable problem, exiting now.\n",
456 PrintZoneForPointer(addr
, zone_ptr
, zone_name
);
458 DescribeAddressIfHeap(addr
);
461 // -------------- SuppressErrorReport -------------- {{{1
462 // Avoid error reports duplicating for ASan recover mode.
463 static bool SuppressErrorReport(uptr pc
) {
464 if (!common_flags()->suppress_equal_pcs
) return false;
465 for (unsigned i
= 0; i
< kAsanBuggyPcPoolSize
; i
++) {
466 uptr cmp
= atomic_load_relaxed(&AsanBuggyPcPool
[i
]);
467 if (cmp
== 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool
[i
], &cmp
,
468 pc
, memory_order_relaxed
))
470 if (cmp
== pc
) return true;
475 void ReportGenericError(uptr pc
, uptr bp
, uptr sp
, uptr addr
, bool is_write
,
476 uptr access_size
, u32 exp
, bool fatal
) {
477 if (__asan_test_only_reported_buggy_pointer
) {
478 *__asan_test_only_reported_buggy_pointer
= addr
;
481 if (!fatal
&& SuppressErrorReport(pc
)) return;
482 ENABLE_FRAME_POINTER
;
484 // Optimization experiments.
485 // The experiments can be used to evaluate potential optimizations that remove
486 // instrumentation (assess false negatives). Instead of completely removing
487 // some instrumentation, compiler can emit special calls into runtime
488 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass
489 // mask of experiments (exp).
490 // The reaction to a non-zero value of exp is to be defined.
493 ScopedInErrorReport
in_report(fatal
);
494 ErrorGeneric
error(GetCurrentTidOrInvalid(), pc
, bp
, sp
, addr
, is_write
,
496 in_report
.ReportError(error
);
499 } // namespace __asan
501 // --------------------------- Interface --------------------- {{{1
502 using namespace __asan
;
504 void __asan_report_error(uptr pc
, uptr bp
, uptr sp
, uptr addr
, int is_write
,
505 uptr access_size
, u32 exp
) {
506 ENABLE_FRAME_POINTER
;
507 bool fatal
= flags()->halt_on_error
;
508 ReportGenericError(pc
, bp
, sp
, addr
, is_write
, access_size
, exp
, fatal
);
511 void NOINLINE
__asan_set_error_report_callback(void (*callback
)(const char*)) {
512 Lock
l(&error_message_buf_mutex
);
513 error_report_callback
= callback
;
516 void __asan_describe_address(uptr addr
) {
517 // Thread registry must be locked while we're describing an address.
518 asanThreadRegistry().Lock();
519 PrintAddressDescription(addr
, 1, "");
520 asanThreadRegistry().Unlock();
523 int __asan_report_present() {
524 return ScopedInErrorReport::CurrentError().kind
!= kErrorKindInvalid
;
527 uptr
__asan_get_report_pc() {
528 if (ScopedInErrorReport::CurrentError().kind
== kErrorKindGeneric
)
529 return ScopedInErrorReport::CurrentError().Generic
.pc
;
533 uptr
__asan_get_report_bp() {
534 if (ScopedInErrorReport::CurrentError().kind
== kErrorKindGeneric
)
535 return ScopedInErrorReport::CurrentError().Generic
.bp
;
539 uptr
__asan_get_report_sp() {
540 if (ScopedInErrorReport::CurrentError().kind
== kErrorKindGeneric
)
541 return ScopedInErrorReport::CurrentError().Generic
.sp
;
545 uptr
__asan_get_report_address() {
546 ErrorDescription
&err
= ScopedInErrorReport::CurrentError();
547 if (err
.kind
== kErrorKindGeneric
)
548 return err
.Generic
.addr_description
.Address();
549 else if (err
.kind
== kErrorKindDoubleFree
)
550 return err
.DoubleFree
.addr_description
.addr
;
554 int __asan_get_report_access_type() {
555 if (ScopedInErrorReport::CurrentError().kind
== kErrorKindGeneric
)
556 return ScopedInErrorReport::CurrentError().Generic
.is_write
;
560 uptr
__asan_get_report_access_size() {
561 if (ScopedInErrorReport::CurrentError().kind
== kErrorKindGeneric
)
562 return ScopedInErrorReport::CurrentError().Generic
.access_size
;
566 const char *__asan_get_report_description() {
567 if (ScopedInErrorReport::CurrentError().kind
== kErrorKindGeneric
)
568 return ScopedInErrorReport::CurrentError().Generic
.bug_descr
;
569 return ScopedInErrorReport::CurrentError().Base
.scariness
.GetDescription();
573 SANITIZER_INTERFACE_ATTRIBUTE
574 void __sanitizer_ptr_sub(void *a
, void *b
) {
575 CheckForInvalidPointerPair(a
, b
);
577 SANITIZER_INTERFACE_ATTRIBUTE
578 void __sanitizer_ptr_cmp(void *a
, void *b
) {
579 CheckForInvalidPointerPair(a
, b
);
583 // Provide default implementation of __asan_on_error that does nothing
584 // and may be overriden by user.
585 SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error
, void) {}