1 ; RUN: opt < %s -msan-check-access-address=0 -S -passes='module(msan-module),function(msan)' 2>&1 | FileCheck -allow-deprecated-dag-overlap %s
2 ; RUN: opt < %s --passes='module(msan-module),function(msan)' -msan-check-access-address=0 -S | FileCheck -allow-deprecated-dag-overlap %s
3 ; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S -passes='module(msan-module),function(msan)' 2>&1 | \
4 ; RUN: FileCheck -allow-deprecated-dag-overlap -check-prefixes=CHECK,CHECK-ORIGINS %s
5 ; RUN: opt < %s -passes='module(msan-module),function(msan)' -msan-check-access-address=0 -msan-track-origins=1 -S | \
6 ; RUN: FileCheck -allow-deprecated-dag-overlap -check-prefixes=CHECK,CHECK-ORIGINS %s
8 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
9 target triple = "x86_64-unknown-linux-gnu"
11 ; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @msan.module_ctor, i8* null }
13 ; Check the presence and the linkage type of __msan_track_origins and
14 ; other interface symbols.
15 ; CHECK-NOT: @__msan_track_origins
16 ; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1
17 ; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0
18 ; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}]
19 ; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32
20 ; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}]
21 ; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}]
22 ; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}]
23 ; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64
24 ; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32
27 ; Check instrumentation of stores
29 define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
31 store i32 %x, i32* %p, align 4
36 ; CHECK: load {{.*}} @__msan_param_tls
37 ; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
40 ; CHECK-ORIGINS: br i1
41 ; CHECK-ORIGINS: {{^[0-9]+}}:
42 ; CHECK-ORIGINS: store
43 ; CHECK-ORIGINS: br label
44 ; CHECK-ORIGINS: {{^[0-9]+}}:
49 ; Check instrumentation of aligned stores
50 ; Shadow store has the same alignment as the original store; origin store
51 ; does not specify explicit alignment.
53 define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
55 store i32 %x, i32* %p, align 32
59 ; CHECK-LABEL: @AlignedStore
60 ; CHECK: load {{.*}} @__msan_param_tls
61 ; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
62 ; CHECK: store {{.*}} align 32
64 ; CHECK-ORIGINS: br i1
65 ; CHECK-ORIGINS: {{^[0-9]+}}:
66 ; CHECK-ORIGINS: store {{.*}} align 32
67 ; CHECK-ORIGINS: br label
68 ; CHECK-ORIGINS: {{^[0-9]+}}:
69 ; CHECK: store {{.*}} align 32
73 ; load followed by cmp: check that we load the shadow and call __msan_warning.
74 define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
76 %0 = load i32, i32* %a, align 4
77 %tobool = icmp eq i32 %0, 0
78 br i1 %tobool, label %if.end, label %if.then
80 if.then: ; preds = %entry
81 tail call void (...) @foo() nounwind
84 if.end: ; preds = %entry, %if.then
88 declare void @foo(...)
90 ; CHECK-LABEL: @LoadAndCmp
93 ; CHECK: call void @__msan_warning_noreturn()
94 ; CHECK-NEXT: call void asm sideeffect
95 ; CHECK-NEXT: unreachable
98 ; Check that we store the shadow for the retval.
99 define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
104 ; CHECK-LABEL: @ReturnInt
105 ; CHECK: store i32 0,{{.*}}__msan_retval_tls
108 ; Check that we get the shadow for the retval.
109 define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory {
111 %call = tail call i32 @ReturnInt() nounwind
112 store i32 %call, i32* %a, align 4
116 ; CHECK-LABEL: @CopyRetVal
117 ; CHECK: load{{.*}}__msan_retval_tls
123 ; Check that we generate PHIs for shadow.
124 define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory {
126 %tobool = icmp eq i32* %b, null
127 br i1 %tobool, label %if.else, label %if.then
129 if.then: ; preds = %entry
130 %0 = load i32, i32* %b, align 4
133 if.else: ; preds = %entry
134 %1 = load i32, i32* %c, align 4
137 if.end: ; preds = %if.else, %if.then
138 %t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ]
139 store i32 %t.0, i32* %a, align 4
143 ; CHECK-LABEL: @FuncWithPhi
150 ; Compute shadow for "x << 10"
151 define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
153 %0 = load i32, i32* %x, align 4
155 store i32 %1, i32* %x, align 4
159 ; CHECK-LABEL: @ShlConst
168 ; Compute shadow for "10 << x": it should have 'sext i1'.
169 define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
171 %0 = load i32, i32* %x, align 4
173 store i32 %1, i32* %x, align 4
177 ; CHECK-LABEL: @ShlNonConst
186 define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory {
188 %0 = load i16, i16* %b, align 2
189 %1 = sext i16 %0 to i32
190 store i32 %1, i32* %a, align 4
205 define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory {
207 call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i1 false)
211 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
213 ; CHECK-LABEL: @MemSet
214 ; CHECK: call i8* @__msan_memset
219 define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
221 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false)
225 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
227 ; CHECK-LABEL: @MemCpy
228 ; CHECK: call i8* @__msan_memcpy
232 ; memmove is lowered to a call
233 define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
235 call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false)
239 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
241 ; CHECK-LABEL: @MemMove
242 ; CHECK: call i8* @__msan_memmove
246 ;; Placeholder tests that will fail once element atomic @llvm.mem[cpy|move|set] instrinsics have
247 ;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to
248 ;; verify that MSAN handles these intrinsics properly once they have been
249 ;; added to that class hierarchy.
250 declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
251 declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
252 declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
254 define void @atomic_memcpy(i8* nocapture %x, i8* nocapture %y) nounwind {
255 ; CHECK-LABEL: atomic_memcpy
256 ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
257 ; CHECK-NEXT: ret void
258 call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
262 define void @atomic_memmove(i8* nocapture %x, i8* nocapture %y) nounwind {
263 ; CHECK-LABEL: atomic_memmove
264 ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
265 ; CHECK-NEXT: ret void
266 call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
270 define void @atomic_memset(i8* nocapture %x) nounwind {
271 ; CHECK-LABEL: atomic_memset
272 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
273 ; CHECK-NEXT: ret void
274 call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
281 ; Check that we propagate shadow for "select"
283 define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory {
285 %cond = select i1 %c, i32 %a, i32 %b
289 ; CHECK-LABEL: @Select
294 ; CHECK-DAG: select i1
295 ; CHECK-ORIGINS-DAG: select
296 ; CHECK-ORIGINS-DAG: select
297 ; CHECK-DAG: select i1
298 ; CHECK: store i32{{.*}}@__msan_retval_tls
299 ; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
303 ; Check that we propagate origin for "select" with vector condition.
304 ; Select condition is flattened to i1, which is then used to select one of the
307 define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
309 %cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b
313 ; CHECK-LABEL: @SelectVector
314 ; CHECK: select <8 x i1>
315 ; CHECK-DAG: or <8 x i16>
316 ; CHECK-DAG: xor <8 x i16>
317 ; CHECK: or <8 x i16>
318 ; CHECK-DAG: select <8 x i1>
319 ; CHECK-ORIGINS-DAG: select
320 ; CHECK-ORIGINS-DAG: select
321 ; CHECK-DAG: select <8 x i1>
322 ; CHECK: store <8 x i16>{{.*}}@__msan_retval_tls
323 ; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
324 ; CHECK: ret <8 x i16>
327 ; Check that we propagate origin for "select" with scalar condition and vector
328 ; arguments. Select condition shadow is sign-extended to the vector type and
329 ; mixed into the result shadow.
331 define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwtable readnone sanitize_memory {
333 %cond = select i1 %c, <8 x i16> %a, <8 x i16> %b
337 ; CHECK-LABEL: @SelectVector2
339 ; CHECK-DAG: or <8 x i16>
340 ; CHECK-DAG: xor <8 x i16>
341 ; CHECK: or <8 x i16>
342 ; CHECK-DAG: select i1
343 ; CHECK-ORIGINS-DAG: select i1
344 ; CHECK-ORIGINS-DAG: select i1
345 ; CHECK-DAG: select i1
346 ; CHECK: ret <8 x i16>
349 define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %b) readnone sanitize_memory {
351 %c = select i1 %x, { i64, i64 } %a, { i64, i64 } %b
355 ; CHECK-LABEL: @SelectStruct
356 ; CHECK: select i1 {{.*}}, { i64, i64 }
357 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
358 ; CHECK-ORIGINS: select i1
359 ; CHECK-ORIGINS: select i1
360 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 }
361 ; CHECK: ret { i64, i64 }
364 define { i64*, double } @SelectStruct2(i1 zeroext %x, { i64*, double } %a, { i64*, double } %b) readnone sanitize_memory {
366 %c = select i1 %x, { i64*, double } %a, { i64*, double } %b
367 ret { i64*, double } %c
370 ; CHECK-LABEL: @SelectStruct2
371 ; CHECK: select i1 {{.*}}, { i64, i64 }
372 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
373 ; CHECK-ORIGINS: select i1
374 ; CHECK-ORIGINS: select i1
375 ; CHECK-NEXT: select i1 {{.*}}, { i64*, double }
376 ; CHECK: ret { i64*, double }
379 define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
381 %0 = inttoptr i64 %x to i8*
385 ; CHECK-LABEL: @IntToPtr
386 ; CHECK: load i64, i64*{{.*}}__msan_param_tls
387 ; CHECK-ORIGINS-NEXT: load i32, i32*{{.*}}__msan_param_origin_tls
388 ; CHECK-NEXT: inttoptr
389 ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
393 define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
395 %0 = inttoptr i16 %x to i8*
399 ; CHECK-LABEL: @IntToPtr_ZExt
400 ; CHECK: load i16, i16*{{.*}}__msan_param_tls
402 ; CHECK-NEXT: inttoptr
403 ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
407 ; Check that we insert exactly one check on udiv
408 ; (2nd arg shadow is checked, 1st arg shadow is propagated)
410 define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
412 %div = udiv i32 %a, %b
418 ; CHECK: call void @__msan_warning
424 ; Check that fdiv, unlike udiv, simply propagates shadow.
426 define float @FDiv(float %a, float %b) nounwind uwtable readnone sanitize_memory {
428 %c = fdiv float %a, %b
433 ; CHECK: %[[SA:.*]] = load i32,{{.*}}@__msan_param_tls
434 ; CHECK: %[[SB:.*]] = load i32,{{.*}}@__msan_param_tls
435 ; CHECK: %[[SC:.*]] = or i32 %[[SB]], %[[SA]]
436 ; CHECK: = fdiv float
437 ; CHECK: store i32 %[[SC]], i32* {{.*}}@__msan_retval_tls
440 ; Check that fneg simply propagates shadow.
442 define float @FNeg(float %a) nounwind uwtable readnone sanitize_memory {
449 ; CHECK: %[[SA:.*]] = load i32,{{.*}}@__msan_param_tls
450 ; CHECK-ORIGINS: %[[SB:.*]] = load i32,{{.*}}@__msan_param_origin_tls
451 ; CHECK: = fneg float
452 ; CHECK: store i32 %[[SA]], i32* {{.*}}@__msan_retval_tls
453 ; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
456 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
458 define zeroext i1 @ICmpSLTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
459 %1 = icmp slt i32 %x, 0
463 ; CHECK-LABEL: @ICmpSLTZero
465 ; CHECK-NOT: call void @__msan_warning
467 ; CHECK-NOT: call void @__msan_warning
470 define zeroext i1 @ICmpSGEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
471 %1 = icmp sge i32 %x, 0
475 ; CHECK-LABEL: @ICmpSGEZero
477 ; CHECK-NOT: call void @__msan_warning
479 ; CHECK-NOT: call void @__msan_warning
482 define zeroext i1 @ICmpSGTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
483 %1 = icmp sgt i32 0, %x
487 ; CHECK-LABEL: @ICmpSGTZero
489 ; CHECK-NOT: call void @__msan_warning
491 ; CHECK-NOT: call void @__msan_warning
494 define zeroext i1 @ICmpSLEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
495 %1 = icmp sle i32 0, %x
499 ; CHECK-LABEL: @ICmpSLEZero
501 ; CHECK-NOT: call void @__msan_warning
503 ; CHECK-NOT: call void @__msan_warning
507 ; Check that we propagate shadow for x<=-1, x>-1, etc (i.e. sign bit tests)
509 define zeroext i1 @ICmpSLTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
510 %1 = icmp slt i32 -1, %x
514 ; CHECK-LABEL: @ICmpSLTAllOnes
516 ; CHECK-NOT: call void @__msan_warning
518 ; CHECK-NOT: call void @__msan_warning
521 define zeroext i1 @ICmpSGEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
522 %1 = icmp sge i32 -1, %x
526 ; CHECK-LABEL: @ICmpSGEAllOnes
528 ; CHECK-NOT: call void @__msan_warning
530 ; CHECK-NOT: call void @__msan_warning
533 define zeroext i1 @ICmpSGTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
534 %1 = icmp sgt i32 %x, -1
538 ; CHECK-LABEL: @ICmpSGTAllOnes
540 ; CHECK-NOT: call void @__msan_warning
542 ; CHECK-NOT: call void @__msan_warning
545 define zeroext i1 @ICmpSLEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
546 %1 = icmp sle i32 %x, -1
550 ; CHECK-LABEL: @ICmpSLEAllOnes
552 ; CHECK-NOT: call void @__msan_warning
554 ; CHECK-NOT: call void @__msan_warning
558 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
559 ; of the vector arguments.
561 define <2 x i1> @ICmpSLT_vector_Zero(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory {
562 %1 = icmp slt <2 x i32*> %x, zeroinitializer
566 ; CHECK-LABEL: @ICmpSLT_vector_Zero
567 ; CHECK: icmp slt <2 x i64>
568 ; CHECK-NOT: call void @__msan_warning
569 ; CHECK: icmp slt <2 x i32*>
570 ; CHECK-NOT: call void @__msan_warning
571 ; CHECK: ret <2 x i1>
573 ; Check that we propagate shadow for x<=-1, x>0, etc (i.e. sign bit tests)
574 ; of the vector arguments.
576 define <2 x i1> @ICmpSLT_vector_AllOnes(<2 x i32> %x) nounwind uwtable readnone sanitize_memory {
577 %1 = icmp slt <2 x i32> <i32 -1, i32 -1>, %x
581 ; CHECK-LABEL: @ICmpSLT_vector_AllOnes
582 ; CHECK: icmp slt <2 x i32>
583 ; CHECK-NOT: call void @__msan_warning
584 ; CHECK: icmp slt <2 x i32>
585 ; CHECK-NOT: call void @__msan_warning
586 ; CHECK: ret <2 x i1>
589 ; Check that we propagate shadow for unsigned relational comparisons with
592 define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory {
594 %cmp = icmp ugt i32 %x, 7
598 ; CHECK-LABEL: @ICmpUGTConst
599 ; CHECK: icmp ugt i32
600 ; CHECK-NOT: call void @__msan_warning
601 ; CHECK: icmp ugt i32
602 ; CHECK-NOT: call void @__msan_warning
603 ; CHECK: icmp ugt i32
604 ; CHECK-NOT: call void @__msan_warning
608 ; Check that loads of shadow have the same alignment as the original loads.
609 ; Check that loads of origin have the alignment of max(4, original alignment).
611 define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
612 %y = alloca i32, align 64
613 %1 = load volatile i32, i32* %y, align 64
617 ; CHECK-LABEL: @ShadowLoadAlignmentLarge
618 ; CHECK: load volatile i32, i32* {{.*}} align 64
619 ; CHECK: load i32, i32* {{.*}} align 64
622 define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
623 %y = alloca i32, align 2
624 %1 = load volatile i32, i32* %y, align 2
628 ; CHECK-LABEL: @ShadowLoadAlignmentSmall
629 ; CHECK: load volatile i32, i32* {{.*}} align 2
630 ; CHECK: load i32, i32* {{.*}} align 2
631 ; CHECK-ORIGINS: load i32, i32* {{.*}} align 4
635 ; Test vector manipulation instructions.
636 ; Check that the same bit manipulation is applied to the shadow values.
637 ; Check that there is a zero test of the shadow of %idx argument, where present.
639 define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
640 %x = extractelement <4 x i32> %vec, i32 %idx
644 ; CHECK-LABEL: @ExtractElement
645 ; CHECK: extractelement
646 ; CHECK: call void @__msan_warning
647 ; CHECK: extractelement
650 define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
651 %vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx
655 ; CHECK-LABEL: @InsertElement
656 ; CHECK: insertelement
657 ; CHECK: call void @__msan_warning
658 ; CHECK: insertelement
659 ; CHECK: ret <4 x i32>
661 define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
662 %vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1,
663 <4 x i32> <i32 0, i32 4, i32 1, i32 5>
667 ; CHECK-LABEL: @ShuffleVector
668 ; CHECK: shufflevector
669 ; CHECK-NOT: call void @__msan_warning
670 ; CHECK: shufflevector
671 ; CHECK: ret <4 x i32>
674 ; Test bswap intrinsic instrumentation
675 define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory {
676 %y = tail call i32 @llvm.bswap.i32(i32 %x)
680 declare i32 @llvm.bswap.i32(i32) nounwind readnone
682 ; CHECK-LABEL: @BSwap
683 ; CHECK-NOT: call void @__msan_warning
684 ; CHECK: @llvm.bswap.i32
685 ; CHECK-NOT: call void @__msan_warning
686 ; CHECK: @llvm.bswap.i32
687 ; CHECK-NOT: call void @__msan_warning
690 ; Test handling of vectors of pointers.
691 ; Check that shadow of such vector is a vector of integers.
693 define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory {
694 %x = load <8 x i8*>, <8 x i8*>* %p
698 ; CHECK-LABEL: @VectorOfPointers
699 ; CHECK: load <8 x i8*>, <8 x i8*>*
700 ; CHECK: load <8 x i64>, <8 x i64>*
701 ; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls
702 ; CHECK: ret <8 x i8*>
704 ; Test handling of va_copy.
706 declare void @llvm.va_copy(i8*, i8*) nounwind
708 define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory {
709 call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind
713 ; CHECK-LABEL: @VACopy
714 ; CHECK: call void @llvm.memset.p0i8.i64({{.*}}, i8 0, i64 24, i1 false)
718 ; Test that va_start instrumentation does not use va_arg_tls*.
719 ; It should work with a local stack copy instead.
721 %struct.__va_list_tag = type { i32, i32, i8*, i8* }
722 declare void @llvm.va_start(i8*) nounwind
724 ; Function Attrs: nounwind uwtable
725 define void @VAStart(i32 %x, ...) sanitize_memory {
727 %x.addr = alloca i32, align 4
728 %va = alloca [1 x %struct.__va_list_tag], align 16
729 store i32 %x, i32* %x.addr, align 4
730 %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i32 0, i32 0
731 %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
732 call void @llvm.va_start(i8* %arraydecay1)
736 ; CHECK-LABEL: @VAStart
737 ; CHECK: call void @llvm.va_start
738 ; CHECK-NOT: @__msan_va_arg_tls
739 ; CHECK-NOT: @__msan_va_arg_overflow_size_tls
743 ; Test handling of volatile stores.
744 ; Check that MemorySanitizer does not add a check of the value being stored.
746 define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
748 store volatile i32 %x, i32* %p, align 4
752 ; CHECK-LABEL: @VolatileStore
753 ; CHECK-NOT: @__msan_warning
757 ; Test that checks are omitted and returned value is always initialized if
758 ; sanitize_memory attribute is missing.
760 define i32 @NoSanitizeMemory(i32 %x) uwtable {
762 %tobool = icmp eq i32 %x, 0
763 br i1 %tobool, label %if.end, label %if.then
765 if.then: ; preds = %entry
766 tail call void @bar()
769 if.end: ; preds = %entry, %if.then
775 ; CHECK-LABEL: @NoSanitizeMemory
776 ; CHECK-NOT: @__msan_warning
777 ; CHECK: store i32 0, {{.*}} @__msan_retval_tls
778 ; CHECK-NOT: @__msan_warning
782 ; Test that stack allocations are unpoisoned in functions missing
783 ; sanitize_memory attribute
785 define i32 @NoSanitizeMemoryAlloca() {
787 %p = alloca i32, align 4
788 %x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
792 declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
794 ; CHECK-LABEL: @NoSanitizeMemoryAlloca
795 ; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 0, i64 4, i1 false)
796 ; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(i32*
800 ; Test that undef is unpoisoned in functions missing
801 ; sanitize_memory attribute
803 define i32 @NoSanitizeMemoryUndef() {
805 %x = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
809 declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
811 ; CHECK-LABEL: @NoSanitizeMemoryUndef
812 ; CHECK: store i32 0, i32* {{.*}} @__msan_param_tls
813 ; CHECK: call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
817 ; Test PHINode instrumentation in blacklisted functions
819 define i32 @NoSanitizeMemoryPHI(i32 %x) {
821 %tobool = icmp ne i32 %x, 0
822 br i1 %tobool, label %cond.true, label %cond.false
824 cond.true: ; preds = %entry
827 cond.false: ; preds = %entry
830 cond.end: ; preds = %cond.false, %cond.true
831 %cond = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
835 ; CHECK: [[A:%.*]] = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
836 ; CHECK: store i32 0, i32* bitcast {{.*}} @__msan_retval_tls
837 ; CHECK: ret i32 [[A]]
840 ; Test that there are no __msan_param_origin_tls stores when
841 ; argument shadow is a compile-time zero constant (which is always the case
842 ; in functions missing sanitize_memory attribute).
844 define i32 @NoSanitizeMemoryParamTLS(i32* nocapture readonly %x) {
846 %0 = load i32, i32* %x, align 4
847 %call = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 %0)
851 declare i32 @NoSanitizeMemoryParamTLSHelper(i32 %x)
853 ; CHECK-LABEL: define i32 @NoSanitizeMemoryParamTLS(
854 ; CHECK-NOT: __msan_param_origin_tls
858 ; Test argument shadow alignment
860 define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory {
865 ; CHECK-LABEL: @ArgumentShadowAlignment
866 ; CHECK: load <2 x i64>, <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8
867 ; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8
868 ; CHECK: ret <2 x i64>
871 ; Test origin propagation for insertvalue
873 define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory {
875 %a = insertvalue { i64, i32 } undef, i64 %x, 0
876 %b = insertvalue { i64, i32 } %a, i32 %y, 1
880 ; CHECK-ORIGINS: @make_pair_64_32
881 ; First element shadow
882 ; CHECK-ORIGINS: insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 {{.*}}, 0
883 ; First element origin
884 ; CHECK-ORIGINS: icmp ne i64
885 ; CHECK-ORIGINS: select i1
886 ; First element app value
887 ; CHECK-ORIGINS: insertvalue { i64, i32 } undef, i64 {{.*}}, 0
888 ; Second element shadow
889 ; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
890 ; Second element origin
891 ; CHECK-ORIGINS: icmp ne i32
892 ; CHECK-ORIGINS: select i1
893 ; Second element app value
894 ; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
895 ; CHECK-ORIGINS: ret { i64, i32 }
898 ; Test shadow propagation for aggregates passed through ellipsis.
900 %struct.StructByVal = type { i32, i32, i32, i32 }
902 declare void @VAArgStructFn(i32 %guard, ...)
904 define void @VAArgStruct(%struct.StructByVal* nocapture %s) sanitize_memory {
906 %agg.tmp2 = alloca %struct.StructByVal, align 8
907 %0 = bitcast %struct.StructByVal* %s to i8*
908 %agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
909 %agg.tmp.sroa.0.0.copyload = load i64, i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
910 %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
911 %agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
912 %agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
913 %1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
914 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %0, i64 16, i1 false)
915 call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2)
919 ; "undef" and the first 2 structs go to general purpose registers;
920 ; the third struct goes to the overflow area byval
922 ; CHECK-LABEL: @VAArgStruct
923 ; undef not stored to __msan_va_arg_tls - it's a fixed argument
924 ; first struct through general purpose registers
925 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 8){{.*}}, align 8
926 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 16){{.*}}, align 8
927 ; second struct through general purpose registers
928 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 24){{.*}}, align 8
929 ; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 32){{.*}}, align 8
930 ; third struct through the overflow area byval
931 ; CHECK: ptrtoint %struct.StructByVal* {{.*}} to i64
932 ; CHECK: bitcast { i32, i32, i32, i32 }* {{.*}}@__msan_va_arg_tls {{.*}}, i64 176
933 ; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
934 ; CHECK: store i64 16, i64* @__msan_va_arg_overflow_size_tls
935 ; CHECK: call void (i32, ...) @VAArgStructFn
938 ; Same code compiled without SSE (see attributes below).
939 ; The register save area is only 48 bytes instead of 176.
940 define void @VAArgStructNoSSE(%struct.StructByVal* nocapture %s) sanitize_memory #0 {
942 %agg.tmp2 = alloca %struct.StructByVal, align 8
943 %0 = bitcast %struct.StructByVal* %s to i8*
944 %agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
945 %agg.tmp.sroa.0.0.copyload = load i64, i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
946 %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
947 %agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
948 %agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
949 %1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
950 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %0, i64 16, i1 false)
951 call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2)
955 attributes #0 = { "target-features"="+fxsr,+x87,-sse" }
957 ; CHECK: bitcast { i32, i32, i32, i32 }* {{.*}}@__msan_va_arg_tls {{.*}}, i64 48
959 declare i32 @InnerTailCall(i32 %a)
961 define void @MismatchedReturnTypeTailCall(i32 %a) sanitize_memory {
962 %b = tail call i32 @InnerTailCall(i32 %a)
966 ; We used to strip off the 'tail' modifier, but now that we unpoison return slot
967 ; shadow before the call, we don't need to anymore.
969 ; CHECK-LABEL: define void @MismatchedReturnTypeTailCall
970 ; CHECK: tail call i32 @InnerTailCall
974 declare i32 @MustTailCall(i32 %a)
976 define i32 @CallMustTailCall(i32 %a) sanitize_memory {
977 %b = musttail call i32 @MustTailCall(i32 %a)
981 ; For "musttail" calls we can not insert any shadow manipulating code between
982 ; call and the return instruction. And we don't need to, because everything is
983 ; taken care of in the callee.
985 ; CHECK-LABEL: define i32 @CallMustTailCall
986 ; CHECK: musttail call i32 @MustTailCall
987 ; No instrumentation between call and ret.
988 ; CHECK-NEXT: ret i32
990 declare i32* @MismatchingMustTailCall(i32 %a)
992 define i8* @MismatchingCallMustTailCall(i32 %a) sanitize_memory {
993 %b = musttail call i32* @MismatchingMustTailCall(i32 %a)
994 %c = bitcast i32* %b to i8*
998 ; For "musttail" calls we can not insert any shadow manipulating code between
999 ; call and the return instruction. And we don't need to, because everything is
1000 ; taken care of in the callee.
1002 ; CHECK-LABEL: define i8* @MismatchingCallMustTailCall
1003 ; CHECK: musttail call i32* @MismatchingMustTailCall
1004 ; No instrumentation between call and ret.
1005 ; CHECK-NEXT: bitcast i32* {{.*}} to i8*
1006 ; CHECK-NEXT: ret i8*
1009 ; CHECK-LABEL: define internal void @msan.module_ctor() {
1010 ; CHECK: call void @__msan_init()