1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // RUN: %clang_cc1 -triple riscv64 -emit-llvm %s -o - | FileCheck %s
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-abi lp64f -emit-llvm %s -o - \
5 // RUN: %clang_cc1 -triple riscv64 -target-feature +d -target-feature +f -target-abi lp64d -emit-llvm %s -o - \
7 // RUN: %clang_cc1 -triple riscv64 -target-abi lp64e -emit-llvm %s -o - \
21 struct small_aligned
{
29 // Ensure that ABI lowering happens as expected for vararg calls.
30 // Specifically, ensure that signext is emitted for varargs that will be
31 // passed in registers but not on the stack. Ensure this takes into account
32 // the use of "aligned" register pairs for varargs with 2*xlen alignment.
34 int f_va_callee(int, ...);
36 // CHECK-LABEL: define dso_local void @f_va_caller
37 // CHECK-SAME: () #[[ATTR0:[0-9]+]] {
39 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL:%.*]] = alloca [[STRUCT_TINY:%.*]], align 2
40 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL1:%.*]] = alloca [[STRUCT_SMALL:%.*]], align 8
41 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL4:%.*]] = alloca [[STRUCT_SMALL_ALIGNED:%.*]], align 16
42 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL6:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 8
43 // CHECK-NEXT: [[BYVAL_TEMP:%.*]] = alloca [[STRUCT_LARGE]], align 8
44 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL12:%.*]] = alloca [[STRUCT_SMALL_ALIGNED]], align 16
45 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL16:%.*]] = alloca [[STRUCT_SMALL]], align 8
46 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL21:%.*]] = alloca [[STRUCT_SMALL_ALIGNED]], align 16
47 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL25:%.*]] = alloca [[STRUCT_SMALL]], align 8
48 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL30:%.*]] = alloca [[STRUCT_SMALL_ALIGNED]], align 16
49 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL34:%.*]] = alloca [[STRUCT_SMALL]], align 8
50 // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 0
51 // CHECK-NEXT: store i16 6, ptr [[A]], align 2
52 // CHECK-NEXT: [[B:%.*]] = getelementptr inbounds nuw [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 1
53 // CHECK-NEXT: store i16 7, ptr [[B]], align 2
54 // CHECK-NEXT: [[C:%.*]] = getelementptr inbounds nuw [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 2
55 // CHECK-NEXT: store i16 8, ptr [[C]], align 2
56 // CHECK-NEXT: [[D:%.*]] = getelementptr inbounds nuw [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 3
57 // CHECK-NEXT: store i16 9, ptr [[D]], align 2
58 // CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL1]], i32 0, i32 0
59 // CHECK-NEXT: store i64 10, ptr [[A2]], align 8
60 // CHECK-NEXT: [[B3:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL1]], i32 0, i32 1
61 // CHECK-NEXT: store ptr null, ptr [[B3]], align 8
62 // CHECK-NEXT: [[A5:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL4]], i32 0, i32 0
63 // CHECK-NEXT: store i128 11, ptr [[A5]], align 16
64 // CHECK-NEXT: [[A7:%.*]] = getelementptr inbounds nuw [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 0
65 // CHECK-NEXT: store i64 12, ptr [[A7]], align 8
66 // CHECK-NEXT: [[B8:%.*]] = getelementptr inbounds nuw [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 1
67 // CHECK-NEXT: store i64 13, ptr [[B8]], align 8
68 // CHECK-NEXT: [[C9:%.*]] = getelementptr inbounds nuw [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 2
69 // CHECK-NEXT: store i64 14, ptr [[C9]], align 8
70 // CHECK-NEXT: [[D10:%.*]] = getelementptr inbounds nuw [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 3
71 // CHECK-NEXT: store i64 15, ptr [[D10]], align 8
72 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTCOMPOUNDLITERAL]], align 2
73 // CHECK-NEXT: [[TMP1:%.*]] = load [2 x i64], ptr [[DOTCOMPOUNDLITERAL1]], align 8
74 // CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL4]], i32 0, i32 0
75 // CHECK-NEXT: [[TMP2:%.*]] = load i128, ptr [[COERCE_DIVE]], align 16
76 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[BYVAL_TEMP]], ptr align 8 [[DOTCOMPOUNDLITERAL6]], i64 32, i1 false)
77 // CHECK-NEXT: [[CALL:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i64 noundef 3, double noundef 4.000000e+00, double noundef 5.000000e+00, i64 [[TMP0]], [2 x i64] [[TMP1]], i128 [[TMP2]], ptr noundef [[BYVAL_TEMP]])
78 // CHECK-NEXT: [[CALL11:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, fp128 noundef 0xL00000000000000004001400000000000, i32 noundef signext 6, i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
79 // CHECK-NEXT: [[A13:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL12]], i32 0, i32 0
80 // CHECK-NEXT: store i128 5, ptr [[A13]], align 16
81 // CHECK-NEXT: [[COERCE_DIVE14:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL12]], i32 0, i32 0
82 // CHECK-NEXT: [[TMP3:%.*]] = load i128, ptr [[COERCE_DIVE14]], align 16
83 // CHECK-NEXT: [[CALL15:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i128 [[TMP3]], i32 noundef signext 6, i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
84 // CHECK-NEXT: [[A17:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL16]], i32 0, i32 0
85 // CHECK-NEXT: store i64 5, ptr [[A17]], align 8
86 // CHECK-NEXT: [[B18:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL16]], i32 0, i32 1
87 // CHECK-NEXT: store ptr null, ptr [[B18]], align 8
88 // CHECK-NEXT: [[TMP4:%.*]] = load [2 x i64], ptr [[DOTCOMPOUNDLITERAL16]], align 8
89 // CHECK-NEXT: [[CALL19:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, [2 x i64] [[TMP4]], i32 noundef signext 6, i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
90 // CHECK-NEXT: [[CALL20:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, fp128 noundef 0xL00000000000000004001800000000000, i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
91 // CHECK-NEXT: [[A22:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL21]], i32 0, i32 0
92 // CHECK-NEXT: store i128 6, ptr [[A22]], align 16
93 // CHECK-NEXT: [[COERCE_DIVE23:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL21]], i32 0, i32 0
94 // CHECK-NEXT: [[TMP5:%.*]] = load i128, ptr [[COERCE_DIVE23]], align 16
95 // CHECK-NEXT: [[CALL24:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, i128 [[TMP5]], i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
96 // CHECK-NEXT: [[A26:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL25]], i32 0, i32 0
97 // CHECK-NEXT: store i64 6, ptr [[A26]], align 8
98 // CHECK-NEXT: [[B27:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL25]], i32 0, i32 1
99 // CHECK-NEXT: store ptr null, ptr [[B27]], align 8
100 // CHECK-NEXT: [[TMP6:%.*]] = load [2 x i64], ptr [[DOTCOMPOUNDLITERAL25]], align 8
101 // CHECK-NEXT: [[CALL28:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, [2 x i64] [[TMP6]], i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
102 // CHECK-NEXT: [[CALL29:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, i32 noundef signext 6, fp128 noundef 0xL00000000000000004001C00000000000, i32 noundef signext 8, i32 noundef signext 9)
103 // CHECK-NEXT: [[A31:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL30]], i32 0, i32 0
104 // CHECK-NEXT: store i128 7, ptr [[A31]], align 16
105 // CHECK-NEXT: [[COERCE_DIVE32:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL30]], i32 0, i32 0
106 // CHECK-NEXT: [[TMP7:%.*]] = load i128, ptr [[COERCE_DIVE32]], align 16
107 // CHECK-NEXT: [[CALL33:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, i32 noundef signext 6, i128 [[TMP7]], i32 noundef signext 8, i32 noundef signext 9)
108 // CHECK-NEXT: [[A35:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL34]], i32 0, i32 0
109 // CHECK-NEXT: store i64 7, ptr [[A35]], align 8
110 // CHECK-NEXT: [[B36:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL34]], i32 0, i32 1
111 // CHECK-NEXT: store ptr null, ptr [[B36]], align 8
112 // CHECK-NEXT: [[TMP8:%.*]] = load [2 x i64], ptr [[DOTCOMPOUNDLITERAL34]], align 8
113 // CHECK-NEXT: [[CALL37:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, i32 noundef signext 6, [2 x i64] [[TMP8]], i32 noundef signext 8, i32 noundef signext 9)
114 // CHECK-NEXT: ret void
116 void f_va_caller(void) {
117 f_va_callee(1, 2, 3LL, 4.0f
, 5.0, (struct tiny
){6, 7, 8, 9},
118 (struct small
){10, NULL
}, (struct small_aligned
){11},
119 (struct large
){12, 13, 14, 15});
120 f_va_callee(1, 2, 3, 4, 5.0L, 6, 7, 8, 9);
121 f_va_callee(1, 2, 3, 4, (struct small_aligned
){5}, 6, 7, 8, 9);
122 f_va_callee(1, 2, 3, 4, (struct small
){5, NULL
}, 6, 7, 8, 9);
123 f_va_callee(1, 2, 3, 4, 5, 6.0L, 7, 8, 9);
124 f_va_callee(1, 2, 3, 4, 5, (struct small_aligned
){6}, 7, 8, 9);
125 f_va_callee(1, 2, 3, 4, 5, (struct small
){6, NULL
}, 7, 8, 9);
126 f_va_callee(1, 2, 3, 4, 5, 6, 7.0L, 8, 9);
127 f_va_callee(1, 2, 3, 4, 5, 6, (struct small_aligned
){7}, 8, 9);
128 f_va_callee(1, 2, 3, 4, 5, 6, (struct small
){7, NULL
}, 8, 9);
131 // CHECK-LABEL: define dso_local signext i32 @f_va_1
132 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
133 // CHECK-NEXT: entry:
134 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 8
135 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 8
136 // CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
137 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
138 // CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
139 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
140 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i64 8
141 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
142 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 8
143 // CHECK-NEXT: store i32 [[TMP0]], ptr [[V]], align 4
144 // CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
145 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[V]], align 4
146 // CHECK-NEXT: ret i32 [[TMP1]]
148 int f_va_1(char *fmt
, ...) {
149 __builtin_va_list va
;
151 __builtin_va_start(va
, fmt
);
152 int v
= __builtin_va_arg(va
, int);
153 __builtin_va_end(va
);
158 // An "aligned" register pair (where the first register is even-numbered) is
159 // used to pass varargs with 2x xlen alignment and 2x xlen size. Ensure the
160 // correct offsets are used.
162 // CHECK-LABEL: define dso_local fp128 @f_va_2
163 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
164 // CHECK-NEXT: entry:
165 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 8
166 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 8
167 // CHECK-NEXT: [[V:%.*]] = alloca fp128, align 16
168 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
169 // CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
170 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
171 // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 15
172 // CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -16)
173 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 16
174 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
175 // CHECK-NEXT: [[TMP1:%.*]] = load fp128, ptr [[ARGP_CUR_ALIGNED]], align 16
176 // CHECK-NEXT: store fp128 [[TMP1]], ptr [[V]], align 16
177 // CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
178 // CHECK-NEXT: [[TMP2:%.*]] = load fp128, ptr [[V]], align 16
179 // CHECK-NEXT: ret fp128 [[TMP2]]
181 long double f_va_2(char *fmt
, ...) {
182 __builtin_va_list va
;
184 __builtin_va_start(va
, fmt
);
185 long double v
= __builtin_va_arg(va
, long double);
186 __builtin_va_end(va
);
191 // Two "aligned" register pairs.
193 // CHECK-LABEL: define dso_local fp128 @f_va_3
194 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
195 // CHECK-NEXT: entry:
196 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 8
197 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 8
198 // CHECK-NEXT: [[V:%.*]] = alloca fp128, align 16
199 // CHECK-NEXT: [[W:%.*]] = alloca i32, align 4
200 // CHECK-NEXT: [[X:%.*]] = alloca fp128, align 16
201 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
202 // CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
203 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
204 // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 15
205 // CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -16)
206 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 16
207 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
208 // CHECK-NEXT: [[TMP1:%.*]] = load fp128, ptr [[ARGP_CUR_ALIGNED]], align 16
209 // CHECK-NEXT: store fp128 [[TMP1]], ptr [[V]], align 16
210 // CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[VA]], align 8
211 // CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i64 8
212 // CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[VA]], align 8
213 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGP_CUR1]], align 8
214 // CHECK-NEXT: store i32 [[TMP2]], ptr [[W]], align 4
215 // CHECK-NEXT: [[ARGP_CUR3:%.*]] = load ptr, ptr [[VA]], align 8
216 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3]], i32 15
217 // CHECK-NEXT: [[ARGP_CUR3_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP3]], i64 -16)
218 // CHECK-NEXT: [[ARGP_NEXT4:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3_ALIGNED]], i64 16
219 // CHECK-NEXT: store ptr [[ARGP_NEXT4]], ptr [[VA]], align 8
220 // CHECK-NEXT: [[TMP4:%.*]] = load fp128, ptr [[ARGP_CUR3_ALIGNED]], align 16
221 // CHECK-NEXT: store fp128 [[TMP4]], ptr [[X]], align 16
222 // CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
223 // CHECK-NEXT: [[TMP5:%.*]] = load fp128, ptr [[V]], align 16
224 // CHECK-NEXT: [[TMP6:%.*]] = load fp128, ptr [[X]], align 16
225 // CHECK-NEXT: [[ADD:%.*]] = fadd fp128 [[TMP5]], [[TMP6]]
226 // CHECK-NEXT: ret fp128 [[ADD]]
228 long double f_va_3(char *fmt
, ...) {
229 __builtin_va_list va
;
231 __builtin_va_start(va
, fmt
);
232 long double v
= __builtin_va_arg(va
, long double);
233 int w
= __builtin_va_arg(va
, int);
234 long double x
= __builtin_va_arg(va
, long double);
235 __builtin_va_end(va
);
240 // CHECK-LABEL: define dso_local signext i32 @f_va_4
241 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
242 // CHECK-NEXT: entry:
243 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 8
244 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 8
245 // CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
246 // CHECK-NEXT: [[TS:%.*]] = alloca [[STRUCT_TINY:%.*]], align 2
247 // CHECK-NEXT: [[SS:%.*]] = alloca [[STRUCT_SMALL:%.*]], align 8
248 // CHECK-NEXT: [[LS:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 8
249 // CHECK-NEXT: [[RET:%.*]] = alloca i32, align 4
250 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
251 // CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
252 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
253 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i64 8
254 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
255 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 8
256 // CHECK-NEXT: store i32 [[TMP0]], ptr [[V]], align 4
257 // CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[VA]], align 8
258 // CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i64 8
259 // CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[VA]], align 8
260 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 2 [[TS]], ptr align 8 [[ARGP_CUR1]], i64 8, i1 false)
261 // CHECK-NEXT: [[ARGP_CUR3:%.*]] = load ptr, ptr [[VA]], align 8
262 // CHECK-NEXT: [[ARGP_NEXT4:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3]], i64 16
263 // CHECK-NEXT: store ptr [[ARGP_NEXT4]], ptr [[VA]], align 8
264 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[SS]], ptr align 8 [[ARGP_CUR3]], i64 16, i1 false)
265 // CHECK-NEXT: [[ARGP_CUR5:%.*]] = load ptr, ptr [[VA]], align 8
266 // CHECK-NEXT: [[ARGP_NEXT6:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR5]], i64 8
267 // CHECK-NEXT: store ptr [[ARGP_NEXT6]], ptr [[VA]], align 8
268 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGP_CUR5]], align 8
269 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[LS]], ptr align 8 [[TMP1]], i64 32, i1 false)
270 // CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
271 // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_TINY]], ptr [[TS]], i32 0, i32 0
272 // CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
273 // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[TMP2]] to i64
274 // CHECK-NEXT: [[A7:%.*]] = getelementptr inbounds nuw [[STRUCT_SMALL]], ptr [[SS]], i32 0, i32 0
275 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[A7]], align 8
276 // CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[CONV]], [[TMP3]]
277 // CHECK-NEXT: [[C:%.*]] = getelementptr inbounds nuw [[STRUCT_LARGE]], ptr [[LS]], i32 0, i32 2
278 // CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr [[C]], align 8
279 // CHECK-NEXT: [[ADD8:%.*]] = add nsw i64 [[ADD]], [[TMP4]]
280 // CHECK-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD8]] to i32
281 // CHECK-NEXT: store i32 [[CONV9]], ptr [[RET]], align 4
282 // CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[RET]], align 4
283 // CHECK-NEXT: ret i32 [[TMP5]]
285 int f_va_4(char *fmt
, ...) {
286 __builtin_va_list va
;
288 __builtin_va_start(va
, fmt
);
289 int v
= __builtin_va_arg(va
, int);
290 struct tiny ts
= __builtin_va_arg(va
, struct tiny
);
291 struct small ss
= __builtin_va_arg(va
, struct small
);
292 struct large ls
= __builtin_va_arg(va
, struct large
);
293 __builtin_va_end(va
);
295 int ret
= ts
.a
+ ss
.a
+ ls
.c
;