1 //===-- asan_noinst_test.cpp ----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // This test file should be compiled w/o asan instrumentation.
12 //===----------------------------------------------------------------------===//
15 #include <sanitizer/allocator_interface.h>
18 #include <string.h> // for memset()
24 #include "asan_allocator.h"
25 #include "asan_internal.h"
26 #include "asan_mapping.h"
27 #include "asan_test_utils.h"
29 using namespace __sanitizer
;
32 // Please don't call intercepted functions (including malloc() and friends)
33 // in this test. The static runtime library is linked explicitly (without
34 // -fsanitize=address), thus the interceptors do not work correctly on OS X.
36 // Make sure __asan_init is called before any test case is run.
37 struct AsanInitCaller
{
42 static AsanInitCaller asan_init_caller
;
44 TEST(AddressSanitizer
, InternalSimpleDeathTest
) {
45 EXPECT_DEATH(exit(1), "");
48 static void *MallocStress(void *NumOfItrPtr
) {
49 size_t n
= *((size_t *)NumOfItrPtr
);
51 BufferedStackTrace stack1
;
52 stack1
.trace_buffer
[0] = 0xa123;
53 stack1
.trace_buffer
[1] = 0xa456;
56 BufferedStackTrace stack2
;
57 stack2
.trace_buffer
[0] = 0xb123;
58 stack2
.trace_buffer
[1] = 0xb456;
61 BufferedStackTrace stack3
;
62 stack3
.trace_buffer
[0] = 0xc123;
63 stack3
.trace_buffer
[1] = 0xc456;
66 std::vector
<void *> vec
;
67 for (size_t i
= 0; i
< n
; i
++) {
69 if (vec
.empty()) continue;
70 size_t idx
= my_rand_r(&seed
) % vec
.size();
72 vec
[idx
] = vec
.back();
74 __asan::asan_free(ptr
, &stack1
, __asan::FROM_MALLOC
);
76 size_t size
= my_rand_r(&seed
) % 1000 + 1;
77 switch ((my_rand_r(&seed
) % 128)) {
78 case 0: size
+= 1024; break;
79 case 1: size
+= 2048; break;
80 case 2: size
+= 4096; break;
82 size_t alignment
= 1 << (my_rand_r(&seed
) % 10 + 1);
83 char *ptr
= (char*)__asan::asan_memalign(alignment
, size
,
84 &stack2
, __asan::FROM_MALLOC
);
85 EXPECT_EQ(size
, __asan::asan_malloc_usable_size(ptr
, 0, 0));
92 for (size_t i
= 0; i
< vec
.size(); i
++)
93 __asan::asan_free(vec
[i
], &stack3
, __asan::FROM_MALLOC
);
97 TEST(AddressSanitizer
, NoInstMallocTest
) {
98 const size_t kNumIterations
= (ASAN_LOW_MEMORY
) ? 300000 : 1000000;
99 MallocStress((void *)&kNumIterations
);
102 TEST(AddressSanitizer
, ThreadedMallocStressTest
) {
103 const int kNumThreads
= 4;
104 const size_t kNumIterations
= (ASAN_LOW_MEMORY
) ? 10000 : 100000;
105 pthread_t t
[kNumThreads
];
106 for (int i
= 0; i
< kNumThreads
; i
++) {
107 PTHREAD_CREATE(&t
[i
], 0, (void *(*)(void *x
))MallocStress
,
108 (void *)&kNumIterations
);
110 for (int i
= 0; i
< kNumThreads
; i
++) {
111 PTHREAD_JOIN(t
[i
], 0);
115 static void PrintShadow(const char *tag
, uptr ptr
, size_t size
) {
116 fprintf(stderr
, "%s shadow: %lx size % 3ld: ", tag
, (long)ptr
, (long)size
);
117 uptr prev_shadow
= 0;
118 for (sptr i
= -32; i
< (sptr
)size
+ 32; i
++) {
119 uptr shadow
= __asan::MemToShadow(ptr
+ i
);
120 if (i
== 0 || i
== (sptr
)size
)
121 fprintf(stderr
, ".");
122 if (shadow
!= prev_shadow
) {
123 prev_shadow
= shadow
;
124 fprintf(stderr
, "%02x", (int)*(u8
*)shadow
);
127 fprintf(stderr
, "\n");
130 TEST(AddressSanitizer
, DISABLED_InternalPrintShadow
) {
131 for (size_t size
= 1; size
<= 513; size
++) {
132 char *ptr
= new char[size
];
133 PrintShadow("m", (uptr
)ptr
, size
);
135 PrintShadow("f", (uptr
)ptr
, size
);
139 TEST(AddressSanitizer
, QuarantineTest
) {
140 UNINITIALIZED BufferedStackTrace stack
;
141 stack
.trace_buffer
[0] = 0x890;
144 const int size
= 1024;
145 void *p
= __asan::asan_malloc(size
, &stack
);
146 __asan::asan_free(p
, &stack
, __asan::FROM_MALLOC
);
148 size_t max_i
= 1 << 30;
149 for (i
= 0; i
< max_i
; i
++) {
150 void *p1
= __asan::asan_malloc(size
, &stack
);
151 __asan::asan_free(p1
, &stack
, __asan::FROM_MALLOC
);
154 EXPECT_GE(i
, 10000U);
158 #if !defined(__NetBSD__)
159 void *ThreadedQuarantineTestWorker(void *unused
) {
161 u32 seed
= my_rand();
162 UNINITIALIZED BufferedStackTrace stack
;
163 stack
.trace_buffer
[0] = 0x890;
166 for (size_t i
= 0; i
< 1000; i
++) {
167 void *p
= __asan::asan_malloc(1 + (my_rand_r(&seed
) % 4000), &stack
);
168 __asan::asan_free(p
, &stack
, __asan::FROM_MALLOC
);
173 // Check that the thread local allocators are flushed when threads are
175 TEST(AddressSanitizer
, ThreadedQuarantineTest
) {
176 // Run the routine once to warm up ASAN internal structures to get more
177 // predictable incremental memory changes.
179 PTHREAD_CREATE(&t
, NULL
, ThreadedQuarantineTestWorker
, 0);
182 const int n_threads
= 3000;
183 size_t mmaped1
= __sanitizer_get_heap_size();
184 for (int i
= 0; i
< n_threads
; i
++) {
186 PTHREAD_CREATE(&t
, NULL
, ThreadedQuarantineTestWorker
, 0);
188 size_t mmaped2
= __sanitizer_get_heap_size();
189 // Figure out why this much memory is required.
190 EXPECT_LT(mmaped2
- mmaped1
, 320U * (1 << 20));
195 void *ThreadedOneSizeMallocStress(void *unused
) {
197 UNINITIALIZED BufferedStackTrace stack
;
198 stack
.trace_buffer
[0] = 0x890;
200 const size_t kNumMallocs
= 1000;
201 for (int iter
= 0; iter
< 1000; iter
++) {
202 void *p
[kNumMallocs
];
203 for (size_t i
= 0; i
< kNumMallocs
; i
++) {
204 p
[i
] = __asan::asan_malloc(32, &stack
);
206 for (size_t i
= 0; i
< kNumMallocs
; i
++) {
207 __asan::asan_free(p
[i
], &stack
, __asan::FROM_MALLOC
);
213 TEST(AddressSanitizer
, ThreadedOneSizeMallocStressTest
) {
214 const int kNumThreads
= 4;
215 pthread_t t
[kNumThreads
];
216 for (int i
= 0; i
< kNumThreads
; i
++) {
217 PTHREAD_CREATE(&t
[i
], 0, ThreadedOneSizeMallocStress
, 0);
219 for (int i
= 0; i
< kNumThreads
; i
++) {
220 PTHREAD_JOIN(t
[i
], 0);
224 TEST(AddressSanitizer
, ShadowRegionIsPoisonedTest
) {
225 using __asan::kHighMemEnd
;
226 // Check that __asan_region_is_poisoned works for shadow regions.
227 uptr ptr
= kLowShadowBeg
+ 200;
228 EXPECT_EQ(ptr
, __asan_region_is_poisoned(ptr
, 100));
229 ptr
= kShadowGapBeg
+ 200;
230 EXPECT_EQ(ptr
, __asan_region_is_poisoned(ptr
, 100));
231 ptr
= kHighShadowBeg
+ 200;
232 EXPECT_EQ(ptr
, __asan_region_is_poisoned(ptr
, 100));
235 // Test __asan_load1 & friends.
236 typedef void (*CB
)(uptr p
);
237 static void TestLoadStoreCallbacks(CB cb
[2][5]) {
240 __asan_test_only_reported_buggy_pointer
= &buggy_ptr
;
241 UNINITIALIZED BufferedStackTrace stack
;
242 stack
.trace_buffer
[0] = 0x890;
245 for (uptr len
= 16; len
<= 32; len
++) {
246 char *ptr
= (char*) __asan::asan_malloc(len
, &stack
);
247 uptr p
= reinterpret_cast<uptr
>(ptr
);
248 for (uptr is_write
= 0; is_write
<= 1; is_write
++) {
249 for (uptr size_log
= 0; size_log
<= 4; size_log
++) {
250 uptr size
= 1 << size_log
;
251 CB call
= cb
[is_write
][size_log
];
252 // Iterate only size-aligned offsets.
253 for (uptr offset
= 0; offset
<= len
; offset
+= size
) {
256 if (offset
+ size
<= len
)
257 EXPECT_EQ(buggy_ptr
, 0U);
259 EXPECT_EQ(buggy_ptr
, p
+ offset
);
263 __asan::asan_free(ptr
, &stack
, __asan::FROM_MALLOC
);
265 __asan_test_only_reported_buggy_pointer
= 0;
268 TEST(AddressSanitizer
, LoadStoreCallbacks
) {
283 TestLoadStoreCallbacks(cb
);
286 #if defined(__x86_64__) && \
287 !(defined(SANITIZER_APPLE) || defined(SANITIZER_WINDOWS))
290 #define CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(s, reg, op) \
291 void CallAsanMemoryAccessAdd##reg##op##s(uptr address) { \
292 asm("push %%" #reg " \n" \
293 "mov %[x], %%" #reg " \n" \
294 "call __asan_check_" #op "_add_" #s "_" #reg "\n" \
295 "pop %%" #reg " \n" \
301 #define TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(reg) \
302 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(1, reg, load) \
303 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(1, reg, store) \
304 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(2, reg, load) \
305 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(2, reg, store) \
306 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(4, reg, load) \
307 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(4, reg, store) \
308 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(8, reg, load) \
309 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(8, reg, store) \
310 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(16, reg, load) \
311 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(16, reg, store) \
313 TEST(AddressSanitizer, LoadStoreCallbacksAddX86##reg) { \
315 CallAsanMemoryAccessAdd##reg##load1, \
316 CallAsanMemoryAccessAdd##reg##load2, \
317 CallAsanMemoryAccessAdd##reg##load4, \
318 CallAsanMemoryAccessAdd##reg##load8, \
319 CallAsanMemoryAccessAdd##reg##load16, \
322 CallAsanMemoryAccessAdd##reg##store1, \
323 CallAsanMemoryAccessAdd##reg##store2, \
324 CallAsanMemoryAccessAdd##reg##store4, \
325 CallAsanMemoryAccessAdd##reg##store8, \
326 CallAsanMemoryAccessAdd##reg##store16, \
328 TestLoadStoreCallbacks(cb); \
331 // Instantiate all but R10 and R11 callbacks. We are using PLTSafe class with
332 // the intrinsic, which guarantees that the code generation will never emit
333 // R10 or R11 callbacks.
334 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RAX
)
335 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBX
)
336 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RCX
)
337 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDX
)
338 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RSI
)
339 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDI
)
340 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBP
)
341 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R8
)
342 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R9
)
343 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R12
)
344 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R13
)
345 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R14
)
346 TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R15
)