1 ; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
2 ; RUN: opt < %s -msan -S | FileCheck %s
4 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
5 target triple = "aarch64-unknown-linux-gnu"
7 %struct.__va_list = type { i8*, i8*, i8*, i32, i32 }
9 define i32 @foo(i32 %guard, ...) {
10 %vl = alloca %struct.__va_list, align 8
11 %1 = bitcast %struct.__va_list* %vl to i8*
12 call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
13 call void @llvm.va_start(i8* %1)
14 call void @llvm.va_end(i8* %1)
15 call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
19 ; First check if the variadic shadow values are saved in stack with correct
20 ; size (192 is total of general purpose registers size, 64, plus total of
21 ; floating-point registers size, 128).
24 ; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
25 ; CHECK: [[B:%.*]] = add i64 192, [[A]]
26 ; CHECK: alloca {{.*}} [[B]]
28 ; We expect three memcpy operations: one for the general purpose registers,
29 ; one for floating-point/SIMD ones, and one for thre remaining arguments.
31 ; Propagate the GR shadow values on for the va_list::__gp_top, adjust the
32 ; offset in the __msan_va_arg_tls based on va_list:__gp_off, and finally
34 ; CHECK: [[GRP:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i64 {{%.*}}
35 ; CHECK: [[GRSIZE:%.*]] = sub i64 64, {{%.*}}
36 ; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 {{%.*}}, i8* align 8 [[GRP]], i64 [[GRSIZE]], i1 false)
38 ; Propagate the VR shadow values on for the va_list::__vr_top, adjust the
39 ; offset in the __msan_va_arg_tls based on va_list:__vr_off, and finally
41 ; CHECK: [[VRP:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i64 {{%.*}}
42 ; CHECK: [[VRSIZE:%.*]] = sub i64 128, {{%.*}}
43 ; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 {{%.*}}, i8* align 8 [[VRP]], i64 [[VRSIZE]], i1 false)
45 ; Copy the remaining shadow values on the va_list::__stack position (it is
46 ; on the constant offset of 192 from __msan_va_arg_tls).
47 ; CHECK: [[STACK:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i32 192
48 ; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 {{%.*}}, i8* align 16 [[STACK]], i64 {{%.*}}, i1 false)
50 declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
51 declare void @llvm.va_start(i8*) #2
52 declare void @llvm.va_end(i8*) #2
53 declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
56 %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i32 2, double 3.000000e+00,
57 double 4.000000e+00, i32 5, i32 6,
58 double 7.000000e+00, i32 8, i32 9, i32 10, i32 11)
62 ; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
63 ; array. General purpose registers are saved at positions from 0 to 64, Floating
64 ; point and SIMD are saved from 64 to 192, and the remaining from 192.
66 ; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 8
67 ; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 16
68 ; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 64
69 ; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 80
70 ; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 24
71 ; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 32
72 ; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 96
73 ; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 40
74 ; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 48
75 ; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 56
76 ; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 192
77 ; CHECK: store {{.*}} 8, {{.*}} @__msan_va_arg_overflow_size_tls
79 ; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
80 ; passed to a variadic function.
82 define dso_local i64 @many_args() {
84 %ret = call i64 (i64, ...) @sum(i64 120,
85 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
86 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
87 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
88 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
89 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
90 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
91 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
92 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
93 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
94 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
95 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
96 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1
101 ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
102 ; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792)
103 ; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800)
104 declare i64 @sum(i64 %n, ...)