1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // RUN: %clang_cc1 -triple riscv64 -emit-llvm %s -o - | FileCheck %s
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-abi lp64f -emit-llvm %s -o - \
5 // RUN: %clang_cc1 -triple riscv64 -target-feature +d -target-feature +f -target-abi lp64d -emit-llvm %s -o - \
19 struct small_aligned
{
27 // Ensure that ABI lowering happens as expected for vararg calls.
28 // Specifically, ensure that signext is emitted for varargs that will be
29 // passed in registers but not on the stack. Ensure this takes into account
30 // the use of "aligned" register pairs for varargs with 2*xlen alignment.
32 int f_va_callee(int, ...);
34 // CHECK-LABEL: define dso_local void @f_va_caller
35 // CHECK-SAME: () #[[ATTR0:[0-9]+]] {
37 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL:%.*]] = alloca [[STRUCT_TINY:%.*]], align 2
38 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL1:%.*]] = alloca [[STRUCT_SMALL:%.*]], align 8
39 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL4:%.*]] = alloca [[STRUCT_SMALL_ALIGNED:%.*]], align 16
40 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL6:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 8
41 // CHECK-NEXT: [[BYVAL_TEMP:%.*]] = alloca [[STRUCT_LARGE]], align 8
42 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL12:%.*]] = alloca [[STRUCT_SMALL_ALIGNED]], align 16
43 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL16:%.*]] = alloca [[STRUCT_SMALL]], align 8
44 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL21:%.*]] = alloca [[STRUCT_SMALL_ALIGNED]], align 16
45 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL25:%.*]] = alloca [[STRUCT_SMALL]], align 8
46 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL30:%.*]] = alloca [[STRUCT_SMALL_ALIGNED]], align 16
47 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL34:%.*]] = alloca [[STRUCT_SMALL]], align 8
48 // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 0
49 // CHECK-NEXT: store i16 6, ptr [[A]], align 2
50 // CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 1
51 // CHECK-NEXT: store i16 7, ptr [[B]], align 2
52 // CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 2
53 // CHECK-NEXT: store i16 8, ptr [[C]], align 2
54 // CHECK-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 3
55 // CHECK-NEXT: store i16 9, ptr [[D]], align 2
56 // CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL1]], i32 0, i32 0
57 // CHECK-NEXT: store i64 10, ptr [[A2]], align 8
58 // CHECK-NEXT: [[B3:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL1]], i32 0, i32 1
59 // CHECK-NEXT: store ptr null, ptr [[B3]], align 8
60 // CHECK-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL4]], i32 0, i32 0
61 // CHECK-NEXT: store i128 11, ptr [[A5]], align 16
62 // CHECK-NEXT: [[A7:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 0
63 // CHECK-NEXT: store i64 12, ptr [[A7]], align 8
64 // CHECK-NEXT: [[B8:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 1
65 // CHECK-NEXT: store i64 13, ptr [[B8]], align 8
66 // CHECK-NEXT: [[C9:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 2
67 // CHECK-NEXT: store i64 14, ptr [[C9]], align 8
68 // CHECK-NEXT: [[D10:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[DOTCOMPOUNDLITERAL6]], i32 0, i32 3
69 // CHECK-NEXT: store i64 15, ptr [[D10]], align 8
70 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTCOMPOUNDLITERAL]], align 2
71 // CHECK-NEXT: [[TMP1:%.*]] = load [2 x i64], ptr [[DOTCOMPOUNDLITERAL1]], align 8
72 // CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL4]], i32 0, i32 0
73 // CHECK-NEXT: [[TMP2:%.*]] = load i128, ptr [[COERCE_DIVE]], align 16
74 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[BYVAL_TEMP]], ptr align 8 [[DOTCOMPOUNDLITERAL6]], i64 32, i1 false)
75 // CHECK-NEXT: [[CALL:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i64 noundef 3, double noundef 4.000000e+00, double noundef 5.000000e+00, i64 [[TMP0]], [2 x i64] [[TMP1]], i128 [[TMP2]], ptr noundef [[BYVAL_TEMP]])
76 // CHECK-NEXT: [[CALL11:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, fp128 noundef 0xL00000000000000004001400000000000, i32 noundef signext 6, i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
77 // CHECK-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL12]], i32 0, i32 0
78 // CHECK-NEXT: store i128 5, ptr [[A13]], align 16
79 // CHECK-NEXT: [[COERCE_DIVE14:%.*]] = getelementptr inbounds [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL12]], i32 0, i32 0
80 // CHECK-NEXT: [[TMP3:%.*]] = load i128, ptr [[COERCE_DIVE14]], align 16
81 // CHECK-NEXT: [[CALL15:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i128 [[TMP3]], i32 noundef signext 6, i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
82 // CHECK-NEXT: [[A17:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL16]], i32 0, i32 0
83 // CHECK-NEXT: store i64 5, ptr [[A17]], align 8
84 // CHECK-NEXT: [[B18:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL16]], i32 0, i32 1
85 // CHECK-NEXT: store ptr null, ptr [[B18]], align 8
86 // CHECK-NEXT: [[TMP4:%.*]] = load [2 x i64], ptr [[DOTCOMPOUNDLITERAL16]], align 8
87 // CHECK-NEXT: [[CALL19:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, [2 x i64] [[TMP4]], i32 noundef signext 6, i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
88 // CHECK-NEXT: [[CALL20:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, fp128 noundef 0xL00000000000000004001800000000000, i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
89 // CHECK-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL21]], i32 0, i32 0
90 // CHECK-NEXT: store i128 6, ptr [[A22]], align 16
91 // CHECK-NEXT: [[COERCE_DIVE23:%.*]] = getelementptr inbounds [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL21]], i32 0, i32 0
92 // CHECK-NEXT: [[TMP5:%.*]] = load i128, ptr [[COERCE_DIVE23]], align 16
93 // CHECK-NEXT: [[CALL24:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, i128 [[TMP5]], i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
94 // CHECK-NEXT: [[A26:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL25]], i32 0, i32 0
95 // CHECK-NEXT: store i64 6, ptr [[A26]], align 8
96 // CHECK-NEXT: [[B27:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL25]], i32 0, i32 1
97 // CHECK-NEXT: store ptr null, ptr [[B27]], align 8
98 // CHECK-NEXT: [[TMP6:%.*]] = load [2 x i64], ptr [[DOTCOMPOUNDLITERAL25]], align 8
99 // CHECK-NEXT: [[CALL28:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, [2 x i64] [[TMP6]], i32 noundef signext 7, i32 noundef signext 8, i32 noundef signext 9)
100 // CHECK-NEXT: [[CALL29:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, i32 noundef signext 6, fp128 noundef 0xL00000000000000004001C00000000000, i32 noundef signext 8, i32 noundef signext 9)
101 // CHECK-NEXT: [[A31:%.*]] = getelementptr inbounds [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL30]], i32 0, i32 0
102 // CHECK-NEXT: store i128 7, ptr [[A31]], align 16
103 // CHECK-NEXT: [[COERCE_DIVE32:%.*]] = getelementptr inbounds [[STRUCT_SMALL_ALIGNED]], ptr [[DOTCOMPOUNDLITERAL30]], i32 0, i32 0
104 // CHECK-NEXT: [[TMP7:%.*]] = load i128, ptr [[COERCE_DIVE32]], align 16
105 // CHECK-NEXT: [[CALL33:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, i32 noundef signext 6, i128 [[TMP7]], i32 noundef signext 8, i32 noundef signext 9)
106 // CHECK-NEXT: [[A35:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL34]], i32 0, i32 0
107 // CHECK-NEXT: store i64 7, ptr [[A35]], align 8
108 // CHECK-NEXT: [[B36:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[DOTCOMPOUNDLITERAL34]], i32 0, i32 1
109 // CHECK-NEXT: store ptr null, ptr [[B36]], align 8
110 // CHECK-NEXT: [[TMP8:%.*]] = load [2 x i64], ptr [[DOTCOMPOUNDLITERAL34]], align 8
111 // CHECK-NEXT: [[CALL37:%.*]] = call signext i32 (i32, ...) @f_va_callee(i32 noundef signext 1, i32 noundef signext 2, i32 noundef signext 3, i32 noundef signext 4, i32 noundef signext 5, i32 noundef signext 6, [2 x i64] [[TMP8]], i32 noundef signext 8, i32 noundef signext 9)
112 // CHECK-NEXT: ret void
114 void f_va_caller(void) {
115 f_va_callee(1, 2, 3LL, 4.0f
, 5.0, (struct tiny
){6, 7, 8, 9},
116 (struct small
){10, NULL
}, (struct small_aligned
){11},
117 (struct large
){12, 13, 14, 15});
118 f_va_callee(1, 2, 3, 4, 5.0L, 6, 7, 8, 9);
119 f_va_callee(1, 2, 3, 4, (struct small_aligned
){5}, 6, 7, 8, 9);
120 f_va_callee(1, 2, 3, 4, (struct small
){5, NULL
}, 6, 7, 8, 9);
121 f_va_callee(1, 2, 3, 4, 5, 6.0L, 7, 8, 9);
122 f_va_callee(1, 2, 3, 4, 5, (struct small_aligned
){6}, 7, 8, 9);
123 f_va_callee(1, 2, 3, 4, 5, (struct small
){6, NULL
}, 7, 8, 9);
124 f_va_callee(1, 2, 3, 4, 5, 6, 7.0L, 8, 9);
125 f_va_callee(1, 2, 3, 4, 5, 6, (struct small_aligned
){7}, 8, 9);
126 f_va_callee(1, 2, 3, 4, 5, 6, (struct small
){7, NULL
}, 8, 9);
129 // CHECK-LABEL: define dso_local signext i32 @f_va_1
130 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
131 // CHECK-NEXT: entry:
132 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 8
133 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 8
134 // CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
135 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
136 // CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
137 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
138 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i64 8
139 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
140 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 8
141 // CHECK-NEXT: store i32 [[TMP0]], ptr [[V]], align 4
142 // CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
143 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[V]], align 4
144 // CHECK-NEXT: ret i32 [[TMP1]]
146 int f_va_1(char *fmt
, ...) {
147 __builtin_va_list va
;
149 __builtin_va_start(va
, fmt
);
150 int v
= __builtin_va_arg(va
, int);
151 __builtin_va_end(va
);
156 // An "aligned" register pair (where the first register is even-numbered) is
157 // used to pass varargs with 2x xlen alignment and 2x xlen size. Ensure the
158 // correct offsets are used.
160 // CHECK-LABEL: define dso_local fp128 @f_va_2
161 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
162 // CHECK-NEXT: entry:
163 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 8
164 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 8
165 // CHECK-NEXT: [[V:%.*]] = alloca fp128, align 16
166 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
167 // CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
168 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
169 // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 15
170 // CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -16)
171 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 16
172 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
173 // CHECK-NEXT: [[TMP1:%.*]] = load fp128, ptr [[ARGP_CUR_ALIGNED]], align 16
174 // CHECK-NEXT: store fp128 [[TMP1]], ptr [[V]], align 16
175 // CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
176 // CHECK-NEXT: [[TMP2:%.*]] = load fp128, ptr [[V]], align 16
177 // CHECK-NEXT: ret fp128 [[TMP2]]
179 long double f_va_2(char *fmt
, ...) {
180 __builtin_va_list va
;
182 __builtin_va_start(va
, fmt
);
183 long double v
= __builtin_va_arg(va
, long double);
184 __builtin_va_end(va
);
189 // Two "aligned" register pairs.
191 // CHECK-LABEL: define dso_local fp128 @f_va_3
192 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
193 // CHECK-NEXT: entry:
194 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 8
195 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 8
196 // CHECK-NEXT: [[V:%.*]] = alloca fp128, align 16
197 // CHECK-NEXT: [[W:%.*]] = alloca i32, align 4
198 // CHECK-NEXT: [[X:%.*]] = alloca fp128, align 16
199 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
200 // CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
201 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
202 // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 15
203 // CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -16)
204 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 16
205 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
206 // CHECK-NEXT: [[TMP1:%.*]] = load fp128, ptr [[ARGP_CUR_ALIGNED]], align 16
207 // CHECK-NEXT: store fp128 [[TMP1]], ptr [[V]], align 16
208 // CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[VA]], align 8
209 // CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i64 8
210 // CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[VA]], align 8
211 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGP_CUR1]], align 8
212 // CHECK-NEXT: store i32 [[TMP2]], ptr [[W]], align 4
213 // CHECK-NEXT: [[ARGP_CUR3:%.*]] = load ptr, ptr [[VA]], align 8
214 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3]], i32 15
215 // CHECK-NEXT: [[ARGP_CUR3_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP3]], i64 -16)
216 // CHECK-NEXT: [[ARGP_NEXT4:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3_ALIGNED]], i64 16
217 // CHECK-NEXT: store ptr [[ARGP_NEXT4]], ptr [[VA]], align 8
218 // CHECK-NEXT: [[TMP4:%.*]] = load fp128, ptr [[ARGP_CUR3_ALIGNED]], align 16
219 // CHECK-NEXT: store fp128 [[TMP4]], ptr [[X]], align 16
220 // CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
221 // CHECK-NEXT: [[TMP5:%.*]] = load fp128, ptr [[V]], align 16
222 // CHECK-NEXT: [[TMP6:%.*]] = load fp128, ptr [[X]], align 16
223 // CHECK-NEXT: [[ADD:%.*]] = fadd fp128 [[TMP5]], [[TMP6]]
224 // CHECK-NEXT: ret fp128 [[ADD]]
226 long double f_va_3(char *fmt
, ...) {
227 __builtin_va_list va
;
229 __builtin_va_start(va
, fmt
);
230 long double v
= __builtin_va_arg(va
, long double);
231 int w
= __builtin_va_arg(va
, int);
232 long double x
= __builtin_va_arg(va
, long double);
233 __builtin_va_end(va
);
238 // CHECK-LABEL: define dso_local signext i32 @f_va_4
239 // CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
240 // CHECK-NEXT: entry:
241 // CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 8
242 // CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 8
243 // CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
244 // CHECK-NEXT: [[TS:%.*]] = alloca [[STRUCT_TINY:%.*]], align 2
245 // CHECK-NEXT: [[SS:%.*]] = alloca [[STRUCT_SMALL:%.*]], align 8
246 // CHECK-NEXT: [[LS:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 8
247 // CHECK-NEXT: [[RET:%.*]] = alloca i32, align 4
248 // CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
249 // CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
250 // CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
251 // CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i64 8
252 // CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
253 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 8
254 // CHECK-NEXT: store i32 [[TMP0]], ptr [[V]], align 4
255 // CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[VA]], align 8
256 // CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i64 8
257 // CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[VA]], align 8
258 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 2 [[TS]], ptr align 8 [[ARGP_CUR1]], i64 8, i1 false)
259 // CHECK-NEXT: [[ARGP_CUR3:%.*]] = load ptr, ptr [[VA]], align 8
260 // CHECK-NEXT: [[ARGP_NEXT4:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3]], i64 16
261 // CHECK-NEXT: store ptr [[ARGP_NEXT4]], ptr [[VA]], align 8
262 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[SS]], ptr align 8 [[ARGP_CUR3]], i64 16, i1 false)
263 // CHECK-NEXT: [[ARGP_CUR5:%.*]] = load ptr, ptr [[VA]], align 8
264 // CHECK-NEXT: [[ARGP_NEXT6:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR5]], i64 8
265 // CHECK-NEXT: store ptr [[ARGP_NEXT6]], ptr [[VA]], align 8
266 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGP_CUR5]], align 8
267 // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[LS]], ptr align 8 [[TMP1]], i64 32, i1 false)
268 // CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
269 // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[TS]], i32 0, i32 0
270 // CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
271 // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[TMP2]] to i64
272 // CHECK-NEXT: [[A7:%.*]] = getelementptr inbounds [[STRUCT_SMALL]], ptr [[SS]], i32 0, i32 0
273 // CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[A7]], align 8
274 // CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[CONV]], [[TMP3]]
275 // CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_LARGE]], ptr [[LS]], i32 0, i32 2
276 // CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr [[C]], align 8
277 // CHECK-NEXT: [[ADD8:%.*]] = add nsw i64 [[ADD]], [[TMP4]]
278 // CHECK-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD8]] to i32
279 // CHECK-NEXT: store i32 [[CONV9]], ptr [[RET]], align 4
280 // CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[RET]], align 4
281 // CHECK-NEXT: ret i32 [[TMP5]]
283 int f_va_4(char *fmt
, ...) {
284 __builtin_va_list va
;
286 __builtin_va_start(va
, fmt
);
287 int v
= __builtin_va_arg(va
, int);
288 struct tiny ts
= __builtin_va_arg(va
, struct tiny
);
289 struct small ss
= __builtin_va_arg(va
, struct small
);
290 struct large ls
= __builtin_va_arg(va
, struct large
);
291 __builtin_va_end(va
);
293 int ret
= ts
.a
+ ss
.a
+ ls
.c
;