Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / RISCV / riscv32-vararg.c
blob02b1ed38e26556753177a77b574cc5e54c4e6e2d
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // RUN: %clang_cc1 -triple riscv32 -emit-llvm %s -o - | FileCheck %s
3 // RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-abi ilp32f -emit-llvm %s -o - \
4 // RUN: | FileCheck %s
5 // RUN: %clang_cc1 -triple riscv32 -target-feature +d -target-feature +f -target-abi ilp32d -emit-llvm %s -o - \
6 // RUN: | FileCheck %s
8 #include <stddef.h>
9 #include <stdint.h>
11 struct tiny {
12 uint8_t a, b, c, d;
14 struct small {
15 int32_t a, *b;
17 struct small_aligned {
18 int64_t a;
20 struct large {
21 int32_t a, b, c, d;
24 // Ensure that ABI lowering happens as expected for vararg calls. For RV32
25 // with the base integer calling convention there will be no observable
26 // differences in the lowered IR for a call with varargs vs without.
28 int f_va_callee(int, ...);
30 // CHECK-LABEL: define dso_local void @f_va_caller
31 // CHECK-SAME: () #[[ATTR0:[0-9]+]] {
32 // CHECK-NEXT: entry:
33 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL:%.*]] = alloca [[STRUCT_TINY:%.*]], align 1
34 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL1:%.*]] = alloca [[STRUCT_SMALL:%.*]], align 4
35 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL4:%.*]] = alloca [[STRUCT_SMALL_ALIGNED:%.*]], align 8
36 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL6:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 4
37 // CHECK-NEXT: [[BYVAL_TEMP:%.*]] = alloca [[STRUCT_LARGE]], align 4
38 // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 0
39 // CHECK-NEXT: store i8 6, ptr [[A]], align 1
40 // CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 1
41 // CHECK-NEXT: store i8 7, ptr [[B]], align 1
42 // CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 2
43 // CHECK-NEXT: store i8 8, ptr [[C]], align 1
44 // CHECK-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 3
45 // CHECK-NEXT: store i8 9, ptr [[D]], align 1
46 // CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL1]], i32 0, i32 0
47 // CHECK-NEXT: store i32 10, ptr [[A2]], align 4
48 // CHECK-NEXT: [[B3:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL1]], i32 0, i32 1
49 // CHECK-NEXT: store ptr null, ptr [[B3]], align 4
50 // CHECK-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL4]], i32 0, i32 0
51 // CHECK-NEXT: store i64 11, ptr [[A5]], align 8
52 // CHECK-NEXT: [[A7:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 0
53 // CHECK-NEXT: store i32 12, ptr [[A7]], align 4
54 // CHECK-NEXT: [[B8:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 1
55 // CHECK-NEXT: store i32 13, ptr [[B8]], align 4
56 // CHECK-NEXT: [[C9:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 2
57 // CHECK-NEXT: store i32 14, ptr [[C9]], align 4
58 // CHECK-NEXT: [[D10:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 3
59 // CHECK-NEXT: store i32 15, ptr [[D10]], align 4
60 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTCOMPOUNDLITERAL]], align 1
61 // CHECK-NEXT: [[TMP1:%.*]] = load [2 x i32], ptr [[DOTCOMPOUNDLITERAL1]], align 4
62 // CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL4]], i32 0, i32 0
63 // CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[COERCE_DIVE]], align 8
64 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[BYVAL_TEMP]], ptr align 4 [[DOTCOMPOUNDLITERAL6]], i32 16, i1 false)
65 // CHECK-NEXT: [[CALL:%.*]] = call i32 (i32, ...) @f_va_callee(i32 noundef 1, i32 noundef 2, i64 noundef 3, double noundef 4.000000e+00, double noundef 5.000000e+00, i32 [[TMP0]], [2 x i32] [[TMP1]], i64 [[TMP2]], ptr noundef [[BYVAL_TEMP]])
66 // CHECK-NEXT: ret void
68 void f_va_caller(void) {
69 f_va_callee(1, 2, 3LL, 4.0f, 5.0, (struct tiny){6, 7, 8, 9},
70 (struct small){10, NULL}, (struct small_aligned){11},
71 (struct large){12, 13, 14, 15});
74 // CHECK-LABEL: define dso_local i32 @f_va_1
75 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
76 // CHECK-NEXT: entry:
77 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 4
78 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
79 // CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
80 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
81 // CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
82 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
83 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
84 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
85 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
86 // CHECK-NEXT: store i32 [[TMP0]], ptr [[V]], align 4
87 // CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
88 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[V]], align 4
89 // CHECK-NEXT: ret i32 [[TMP1]]
91 int f_va_1(char *fmt, ...) {
92 __builtin_va_list va;
94 __builtin_va_start(va, fmt);
95 int v = __builtin_va_arg(va, int);
96 __builtin_va_end(va);
98 return v;
101 // An "aligned" register pair (where the first register is even-numbered) is
102 // used to pass varargs with 2x xlen alignment and 2x xlen size. Ensure the
103 // correct offsets are used.
105 // CHECK-LABEL: define dso_local double @f_va_2
106 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 4
109 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
110 // CHECK-NEXT: [[V:%.*]] = alloca double, align 8
111 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
112 // CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
113 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
114 // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
115 // CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -8)
116 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i32 8
117 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
118 // CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[ARGP_CUR_ALIGNED]], align 8
119 // CHECK-NEXT: store double [[TMP1]], ptr [[V]], align 8
120 // CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
121 // CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[V]], align 8
122 // CHECK-NEXT: ret double [[TMP2]]
124 double f_va_2(char *fmt, ...) {
125 __builtin_va_list va;
127 __builtin_va_start(va, fmt);
128 double v = __builtin_va_arg(va, double);
129 __builtin_va_end(va);
131 return v;
134 // Two "aligned" register pairs.
136 // CHECK-LABEL: define dso_local double @f_va_3
137 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 4
140 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
141 // CHECK-NEXT: [[V:%.*]] = alloca double, align 8
142 // CHECK-NEXT: [[W:%.*]] = alloca i32, align 4
143 // CHECK-NEXT: [[X:%.*]] = alloca double, align 8
144 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
145 // CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
146 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
147 // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
148 // CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -8)
149 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i32 8
150 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
151 // CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[ARGP_CUR_ALIGNED]], align 8
152 // CHECK-NEXT: store double [[TMP1]], ptr [[V]], align 8
153 // CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[VA]], align 4
154 // CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i32 4
155 // CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[VA]], align 4
156 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGP_CUR1]], align 4
157 // CHECK-NEXT: store i32 [[TMP2]], ptr [[W]], align 4
158 // CHECK-NEXT: [[ARGP_CUR3:%.*]] = load ptr, ptr [[VA]], align 4
159 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3]], i32 7
160 // CHECK-NEXT: [[ARGP_CUR3_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP3]], i32 -8)
161 // CHECK-NEXT: [[ARGP_NEXT4:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3_ALIGNED]], i32 8
162 // CHECK-NEXT: store ptr [[ARGP_NEXT4]], ptr [[VA]], align 4
163 // CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[ARGP_CUR3_ALIGNED]], align 8
164 // CHECK-NEXT: store double [[TMP4]], ptr [[X]], align 8
165 // CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
166 // CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[V]], align 8
167 // CHECK-NEXT: [[TMP6:%.*]] = load double, ptr [[X]], align 8
168 // CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP5]], [[TMP6]]
169 // CHECK-NEXT: ret double [[ADD]]
171 double f_va_3(char *fmt, ...) {
172 __builtin_va_list va;
174 __builtin_va_start(va, fmt);
175 double v = __builtin_va_arg(va, double);
176 int w = __builtin_va_arg(va, int);
177 double x = __builtin_va_arg(va, double);
178 __builtin_va_end(va);
180 return v + x;
183 // CHECK-LABEL: define dso_local i32 @f_va_4
184 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
185 // CHECK-NEXT: entry:
186 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 4
187 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
188 // CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
189 // CHECK-NEXT: [[LD:%.*]] = alloca fp128, align 16
190 // CHECK-NEXT: [[TS:%.*]] = alloca [[STRUCT_TINY:%.*]], align 1
191 // CHECK-NEXT: [[SS:%.*]] = alloca [[STRUCT_SMALL:%.*]], align 4
192 // CHECK-NEXT: [[LS:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 4
193 // CHECK-NEXT: [[RET:%.*]] = alloca i32, align 4
194 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
195 // CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
196 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
197 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
198 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
199 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
200 // CHECK-NEXT: store i32 [[TMP0]], ptr [[V]], align 4
201 // CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[VA]], align 4
202 // CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i32 4
203 // CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[VA]], align 4
204 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGP_CUR1]], align 4
205 // CHECK-NEXT: [[TMP2:%.*]] = load fp128, ptr [[TMP1]], align 16
206 // CHECK-NEXT: store fp128 [[TMP2]], ptr [[LD]], align 16
207 // CHECK-NEXT: [[ARGP_CUR3:%.*]] = load ptr, ptr [[VA]], align 4
208 // CHECK-NEXT: [[ARGP_NEXT4:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3]], i32 4
209 // CHECK-NEXT: store ptr [[ARGP_NEXT4]], ptr [[VA]], align 4
210 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[TS]], ptr align 4 [[ARGP_CUR3]], i32 4, i1 false)
211 // CHECK-NEXT: [[ARGP_CUR5:%.*]] = load ptr, ptr [[VA]], align 4
212 // CHECK-NEXT: [[ARGP_NEXT6:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR5]], i32 8
213 // CHECK-NEXT: store ptr [[ARGP_NEXT6]], ptr [[VA]], align 4
214 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[SS]], ptr align 4 [[ARGP_CUR5]], i32 8, i1 false)
215 // CHECK-NEXT: [[ARGP_CUR7:%.*]] = load ptr, ptr [[VA]], align 4
216 // CHECK-NEXT: [[ARGP_NEXT8:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR7]], i32 4
217 // CHECK-NEXT: store ptr [[ARGP_NEXT8]], ptr [[VA]], align 4
218 // CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARGP_CUR7]], align 4
219 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[LS]], ptr align 4 [[TMP3]], i32 16, i1 false)
220 // CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
221 // CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[V]], align 4
222 // CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to fp128
223 // CHECK-NEXT: [[TMP5:%.*]] = load fp128, ptr [[LD]], align 16
224 // CHECK-NEXT: [[ADD:%.*]] = fadd fp128 [[CONV]], [[TMP5]]
225 // CHECK-NEXT: [[CONV9:%.*]] = fptosi fp128 [[ADD]] to i32
226 // CHECK-NEXT: store i32 [[CONV9]], ptr [[RET]], align 4
227 // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[RET]], align 4
228 // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[TS]], i32 0, i32 0
229 // CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[A]], align 1
230 // CHECK-NEXT: [[CONV10:%.*]] = zext i8 [[TMP7]] to i32
231 // CHECK-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP6]], [[CONV10]]
232 // CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[TS]], i32 0, i32 1
233 // CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[B]], align 1
234 // CHECK-NEXT: [[CONV12:%.*]] = zext i8 [[TMP8]] to i32
235 // CHECK-NEXT: [[ADD13:%.*]] = add nsw i32 [[ADD11]], [[CONV12]]
236 // CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[TS]], i32 0, i32 2
237 // CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[C]], align 1
238 // CHECK-NEXT: [[CONV14:%.*]] = zext i8 [[TMP9]] to i32
239 // CHECK-NEXT: [[ADD15:%.*]] = add nsw i32 [[ADD13]], [[CONV14]]
240 // CHECK-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[TS]], i32 0, i32 3
241 // CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[D]], align 1
242 // CHECK-NEXT: [[CONV16:%.*]] = zext i8 [[TMP10]] to i32
243 // CHECK-NEXT: [[ADD17:%.*]] = add nsw i32 [[ADD15]], [[CONV16]]
244 // CHECK-NEXT: store i32 [[ADD17]], ptr [[RET]], align 4
245 // CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[RET]], align 4
246 // CHECK-NEXT: [[A18:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[SS]], i32 0, i32 0
247 // CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[A18]], align 4
248 // CHECK-NEXT: [[ADD19:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
249 // CHECK-NEXT: [[B20:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[SS]], i32 0, i32 1
250 // CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[B20]], align 4
251 // CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i32
252 // CHECK-NEXT: [[ADD21:%.*]] = add nsw i32 [[ADD19]], [[TMP14]]
253 // CHECK-NEXT: store i32 [[ADD21]], ptr [[RET]], align 4
254 // CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[RET]], align 4
255 // CHECK-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[LS]], i32 0, i32 0
256 // CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[A22]], align 4
257 // CHECK-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
258 // CHECK-NEXT: [[B24:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[LS]], i32 0, i32 1
259 // CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[B24]], align 4
260 // CHECK-NEXT: [[ADD25:%.*]] = add nsw i32 [[ADD23]], [[TMP17]]
261 // CHECK-NEXT: [[C26:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[LS]], i32 0, i32 2
262 // CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[C26]], align 4
263 // CHECK-NEXT: [[ADD27:%.*]] = add nsw i32 [[ADD25]], [[TMP18]]
264 // CHECK-NEXT: [[D28:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[LS]], i32 0, i32 3
265 // CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[D28]], align 4
266 // CHECK-NEXT: [[ADD29:%.*]] = add nsw i32 [[ADD27]], [[TMP19]]
267 // CHECK-NEXT: store i32 [[ADD29]], ptr [[RET]], align 4
268 // CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[RET]], align 4
269 // CHECK-NEXT: ret i32 [[TMP20]]
271 int f_va_4(char *fmt, ...) {
272 __builtin_va_list va;
274 __builtin_va_start(va, fmt);
275 int v = __builtin_va_arg(va, int);
276 long double ld = __builtin_va_arg(va, long double);
277 struct tiny ts = __builtin_va_arg(va, struct tiny);
278 struct small ss = __builtin_va_arg(va, struct small);
279 struct large ls = __builtin_va_arg(va, struct large);
280 __builtin_va_end(va);
282 int ret = (int)((long double)v + ld);
283 ret = ret + ts.a + ts.b + ts.c + ts.d;
284 ret = ret + ss.a + (int)ss.b;
285 ret = ret + ls.a + ls.b + ls.c + ls.d;
287 return ret;