1 ; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
2 ; RUN: opt < %s -msan -S | FileCheck %s
4 target datalayout = "e-m:e-i64:64-n32:64"
5 target triple = "powerpc64le--linux"
7 define i32 @foo(i32 %guard, ...) {
8 %vl = alloca i8*, align 8
9 %1 = bitcast i8** %vl to i8*
10 call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
11 call void @llvm.va_start(i8* %1)
12 call void @llvm.va_end(i8* %1)
13 call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
17 ; First, check allocation of the save area.
20 ; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
21 ; CHECK: [[B:%.*]] = add i64 0, [[A]]
22 ; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
24 ; CHECK: [[STACK:%.*]] = bitcast {{.*}} @__msan_va_arg_tls to i8*
25 ; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[C]], i8* align 8 [[STACK]], i64 [[B]], i1 false)
27 declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
28 declare void @llvm.va_start(i8*) #2
29 declare void @llvm.va_end(i8*) #2
30 declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
33 %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
37 ; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
40 ; CHECK: store i32 0, i32* bitcast ([100 x i64]* @__msan_va_arg_tls to i32*), align 8
41 ; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
42 ; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 16) to i64*), align 8
43 ; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
45 ; Check vector argument.
47 %1 = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>)
51 ; The vector is at offset 16 of parameter save area, but __msan_va_arg_tls
52 ; corresponds to offset 8+ of parameter save area - so the offset from
53 ; __msan_va_arg_tls is actually misaligned.
55 ; CHECK: store <2 x i64> zeroinitializer, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to <2 x i64>*), align 8
56 ; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
60 %1 = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2])
65 ; CHECK: store [2 x i64] zeroinitializer, [2 x i64]* bitcast ([100 x i64]* @__msan_va_arg_tls to [2 x i64]*), align 8
66 ; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
70 %1 = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2])
75 ; CHECK: store [2 x i128] zeroinitializer, [2 x i128]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to [2 x i128]*), align 8
76 ; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
78 ; Check 8-aligned byval.
79 define i32 @bar6([2 x i64]* %arg) {
80 %1 = call i32 (i32, ...) @foo(i32 0, [2 x i64]* byval align 8 %arg)
85 ; CHECK: [[SHADOW:%[0-9]+]] = bitcast [2 x i64]* bitcast ([100 x i64]* @__msan_va_arg_tls to [2 x i64]*) to i8*
86 ; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[SHADOW]], i8* align 8 {{.*}}, i64 16, i1 false)
87 ; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
89 ; Check 16-aligned byval.
90 define i32 @bar7([4 x i64]* %arg) {
91 %1 = call i32 (i32, ...) @foo(i32 0, [4 x i64]* byval align 16 %arg)
96 ; CHECK: [[SHADOW:%[0-9]+]] = bitcast [4 x i64]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to [4 x i64]*)
97 ; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[SHADOW]], i8* align 8 {{.*}}, i64 32, i1 false)
98 ; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
100 ; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
101 ; passed to a variadic function.
102 define dso_local i64 @many_args() {
104 %ret = call i64 (i64, ...) @sum(i64 120,
105 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
106 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
107 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
108 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
109 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
110 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
111 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
112 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
113 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
114 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
115 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
116 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1
121 ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
122 ; CHECK-LABEL: @many_args
123 ; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792)
124 ; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800)
125 declare i64 @sum(i64 %n, ...)