1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2 ; RUN: llc < %s -mtriple=nvptx64-- -march=nvptx64 -mcpu=sm_52 -mattr=+ptx64 < %s | FileCheck %s --check-prefix=CHECK-PTX
3 ; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64-- -march=nvptx64 -mcpu=sm_52 -mattr=+ptx64 | %ptxas-verify %}
5 %struct.S1 = type { i32, i8, i64 }
6 %struct.S2 = type { i64, i64 }
8 @__const.bar.s1 = private unnamed_addr constant %struct.S1 { i32 1, i8 1, i64 1 }, align 8
9 @__const.qux.s = private unnamed_addr constant %struct.S2 { i64 1, i64 1 }, align 8
11 define dso_local i32 @variadics1(i32 noundef %first, ...) {
12 ; CHECK-PTX-LABEL: variadics1(
14 ; CHECK-PTX-NEXT: .reg .b32 %r<11>;
15 ; CHECK-PTX-NEXT: .reg .b64 %rd<11>;
16 ; CHECK-PTX-NEXT: .reg .f64 %fd<7>;
18 ; CHECK-PTX-NEXT: // %bb.0: // %entry
19 ; CHECK-PTX-NEXT: ld.param.u32 %r1, [variadics1_param_0];
20 ; CHECK-PTX-NEXT: ld.param.u64 %rd1, [variadics1_param_1];
21 ; CHECK-PTX-NEXT: ld.u32 %r2, [%rd1];
22 ; CHECK-PTX-NEXT: add.s32 %r3, %r1, %r2;
23 ; CHECK-PTX-NEXT: ld.u32 %r4, [%rd1+4];
24 ; CHECK-PTX-NEXT: add.s32 %r5, %r3, %r4;
25 ; CHECK-PTX-NEXT: ld.u32 %r6, [%rd1+8];
26 ; CHECK-PTX-NEXT: add.s32 %r7, %r5, %r6;
27 ; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 19;
28 ; CHECK-PTX-NEXT: and.b64 %rd3, %rd2, -8;
29 ; CHECK-PTX-NEXT: ld.u64 %rd4, [%rd3];
30 ; CHECK-PTX-NEXT: cvt.u64.u32 %rd5, %r7;
31 ; CHECK-PTX-NEXT: add.s64 %rd6, %rd5, %rd4;
32 ; CHECK-PTX-NEXT: cvt.u32.u64 %r8, %rd6;
33 ; CHECK-PTX-NEXT: add.s64 %rd7, %rd3, 15;
34 ; CHECK-PTX-NEXT: and.b64 %rd8, %rd7, -8;
35 ; CHECK-PTX-NEXT: ld.f64 %fd1, [%rd8];
36 ; CHECK-PTX-NEXT: cvt.rn.f64.s32 %fd2, %r8;
37 ; CHECK-PTX-NEXT: add.rn.f64 %fd3, %fd2, %fd1;
38 ; CHECK-PTX-NEXT: cvt.rzi.s32.f64 %r9, %fd3;
39 ; CHECK-PTX-NEXT: add.s64 %rd9, %rd8, 15;
40 ; CHECK-PTX-NEXT: and.b64 %rd10, %rd9, -8;
41 ; CHECK-PTX-NEXT: ld.f64 %fd4, [%rd10];
42 ; CHECK-PTX-NEXT: cvt.rn.f64.s32 %fd5, %r9;
43 ; CHECK-PTX-NEXT: add.rn.f64 %fd6, %fd5, %fd4;
44 ; CHECK-PTX-NEXT: cvt.rzi.s32.f64 %r10, %fd6;
45 ; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r10;
46 ; CHECK-PTX-NEXT: ret;
48 %vlist = alloca ptr, align 8
49 call void @llvm.va_start.p0(ptr %vlist)
50 %argp.cur = load ptr, ptr %vlist, align 8
51 %argp.next = getelementptr inbounds i8, ptr %argp.cur, i64 4
52 store ptr %argp.next, ptr %vlist, align 8
53 %0 = load i32, ptr %argp.cur, align 4
54 %add = add nsw i32 %first, %0
55 %argp.cur1 = load ptr, ptr %vlist, align 8
56 %argp.next2 = getelementptr inbounds i8, ptr %argp.cur1, i64 4
57 store ptr %argp.next2, ptr %vlist, align 8
58 %1 = load i32, ptr %argp.cur1, align 4
59 %add3 = add nsw i32 %add, %1
60 %argp.cur4 = load ptr, ptr %vlist, align 8
61 %argp.next5 = getelementptr inbounds i8, ptr %argp.cur4, i64 4
62 store ptr %argp.next5, ptr %vlist, align 8
63 %2 = load i32, ptr %argp.cur4, align 4
64 %add6 = add nsw i32 %add3, %2
65 %argp.cur7 = load ptr, ptr %vlist, align 8
66 %3 = getelementptr inbounds i8, ptr %argp.cur7, i32 7
67 %argp.cur7.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %3, i64 -8)
68 %argp.next8 = getelementptr inbounds i8, ptr %argp.cur7.aligned, i64 8
69 store ptr %argp.next8, ptr %vlist, align 8
70 %4 = load i64, ptr %argp.cur7.aligned, align 8
71 %conv = sext i32 %add6 to i64
72 %add9 = add nsw i64 %conv, %4
73 %conv10 = trunc i64 %add9 to i32
74 %argp.cur11 = load ptr, ptr %vlist, align 8
75 %5 = getelementptr inbounds i8, ptr %argp.cur11, i32 7
76 %argp.cur11.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %5, i64 -8)
77 %argp.next12 = getelementptr inbounds i8, ptr %argp.cur11.aligned, i64 8
78 store ptr %argp.next12, ptr %vlist, align 8
79 %6 = load double, ptr %argp.cur11.aligned, align 8
80 %conv13 = sitofp i32 %conv10 to double
81 %add14 = fadd double %conv13, %6
82 %conv15 = fptosi double %add14 to i32
83 %argp.cur16 = load ptr, ptr %vlist, align 8
84 %7 = getelementptr inbounds i8, ptr %argp.cur16, i32 7
85 %argp.cur16.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %7, i64 -8)
86 %argp.next17 = getelementptr inbounds i8, ptr %argp.cur16.aligned, i64 8
87 store ptr %argp.next17, ptr %vlist, align 8
88 %8 = load double, ptr %argp.cur16.aligned, align 8
89 %conv18 = sitofp i32 %conv15 to double
90 %add19 = fadd double %conv18, %8
91 %conv20 = fptosi double %add19 to i32
92 call void @llvm.va_end.p0(ptr %vlist)
96 declare void @llvm.va_start.p0(ptr)
98 declare ptr @llvm.ptrmask.p0.i64(ptr, i64)
100 declare void @llvm.va_end.p0(ptr)
102 define dso_local i32 @foo() {
103 ; CHECK-PTX-LABEL: foo(
105 ; CHECK-PTX-NEXT: .local .align 8 .b8 __local_depot1[40];
106 ; CHECK-PTX-NEXT: .reg .b64 %SP;
107 ; CHECK-PTX-NEXT: .reg .b64 %SPL;
108 ; CHECK-PTX-NEXT: .reg .b32 %r<4>;
109 ; CHECK-PTX-NEXT: .reg .b64 %rd<5>;
111 ; CHECK-PTX-NEXT: // %bb.0: // %entry
112 ; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot1;
113 ; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL;
114 ; CHECK-PTX-NEXT: mov.u64 %rd1, 4294967297;
115 ; CHECK-PTX-NEXT: st.u64 [%SP+0], %rd1;
116 ; CHECK-PTX-NEXT: mov.b32 %r1, 1;
117 ; CHECK-PTX-NEXT: st.u32 [%SP+8], %r1;
118 ; CHECK-PTX-NEXT: mov.u64 %rd2, 1;
119 ; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd2;
120 ; CHECK-PTX-NEXT: mov.u64 %rd3, 4607182418800017408;
121 ; CHECK-PTX-NEXT: st.u64 [%SP+24], %rd3;
122 ; CHECK-PTX-NEXT: st.u64 [%SP+32], %rd3;
123 ; CHECK-PTX-NEXT: add.u64 %rd4, %SP, 0;
124 ; CHECK-PTX-NEXT: { // callseq 0, 0
125 ; CHECK-PTX-NEXT: .param .b32 param0;
126 ; CHECK-PTX-NEXT: st.param.b32 [param0+0], 1;
127 ; CHECK-PTX-NEXT: .param .b64 param1;
128 ; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd4;
129 ; CHECK-PTX-NEXT: .param .b32 retval0;
130 ; CHECK-PTX-NEXT: call.uni (retval0),
131 ; CHECK-PTX-NEXT: variadics1,
133 ; CHECK-PTX-NEXT: param0,
134 ; CHECK-PTX-NEXT: param1
136 ; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0+0];
137 ; CHECK-PTX-NEXT: } // callseq 0
138 ; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r2;
139 ; CHECK-PTX-NEXT: ret;
141 %conv = sext i8 1 to i32
142 %conv1 = sext i16 1 to i32
143 %conv2 = fpext float 1.000000e+00 to double
144 %call = call i32 (i32, ...) @variadics1(i32 noundef 1, i32 noundef %conv, i32 noundef %conv1, i32 noundef 1, i64 noundef 1, double noundef %conv2, double noundef 1.000000e+00)
148 define dso_local i32 @variadics2(i32 noundef %first, ...) {
149 ; CHECK-PTX-LABEL: variadics2(
151 ; CHECK-PTX-NEXT: .local .align 2 .b8 __local_depot2[4];
152 ; CHECK-PTX-NEXT: .reg .b64 %SP;
153 ; CHECK-PTX-NEXT: .reg .b64 %SPL;
154 ; CHECK-PTX-NEXT: .reg .b16 %rs<6>;
155 ; CHECK-PTX-NEXT: .reg .b32 %r<7>;
156 ; CHECK-PTX-NEXT: .reg .b64 %rd<11>;
158 ; CHECK-PTX-NEXT: // %bb.0: // %entry
159 ; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot2;
160 ; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL;
161 ; CHECK-PTX-NEXT: ld.param.u32 %r1, [variadics2_param_0];
162 ; CHECK-PTX-NEXT: ld.param.u64 %rd1, [variadics2_param_1];
163 ; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 7;
164 ; CHECK-PTX-NEXT: and.b64 %rd3, %rd2, -8;
165 ; CHECK-PTX-NEXT: ld.u32 %r2, [%rd3];
166 ; CHECK-PTX-NEXT: or.b64 %rd4, %rd3, 4;
167 ; CHECK-PTX-NEXT: ld.s8 %r3, [%rd4];
168 ; CHECK-PTX-NEXT: or.b64 %rd5, %rd3, 5;
169 ; CHECK-PTX-NEXT: or.b64 %rd6, %rd3, 7;
170 ; CHECK-PTX-NEXT: ld.u8 %rs1, [%rd6];
171 ; CHECK-PTX-NEXT: st.u8 [%SP+2], %rs1;
172 ; CHECK-PTX-NEXT: ld.u8 %rs2, [%rd5];
173 ; CHECK-PTX-NEXT: or.b64 %rd7, %rd3, 6;
174 ; CHECK-PTX-NEXT: ld.u8 %rs3, [%rd7];
175 ; CHECK-PTX-NEXT: shl.b16 %rs4, %rs3, 8;
176 ; CHECK-PTX-NEXT: or.b16 %rs5, %rs4, %rs2;
177 ; CHECK-PTX-NEXT: st.u16 [%SP+0], %rs5;
178 ; CHECK-PTX-NEXT: ld.u64 %rd8, [%rd3+8];
179 ; CHECK-PTX-NEXT: add.s32 %r4, %r1, %r2;
180 ; CHECK-PTX-NEXT: add.s32 %r5, %r4, %r3;
181 ; CHECK-PTX-NEXT: cvt.u64.u32 %rd9, %r5;
182 ; CHECK-PTX-NEXT: add.s64 %rd10, %rd9, %rd8;
183 ; CHECK-PTX-NEXT: cvt.u32.u64 %r6, %rd10;
184 ; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r6;
185 ; CHECK-PTX-NEXT: ret;
187 %vlist = alloca ptr, align 8
188 %s1.sroa.3 = alloca [3 x i8], align 1
189 call void @llvm.va_start.p0(ptr %vlist)
190 %argp.cur = load ptr, ptr %vlist, align 8
191 %0 = getelementptr inbounds i8, ptr %argp.cur, i32 7
192 %argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -8)
193 %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 16
194 store ptr %argp.next, ptr %vlist, align 8
195 %s1.sroa.0.0.copyload = load i32, ptr %argp.cur.aligned, align 8
196 %s1.sroa.2.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 4
197 %s1.sroa.2.0.copyload = load i8, ptr %s1.sroa.2.0.argp.cur.aligned.sroa_idx, align 4
198 %s1.sroa.3.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 5
199 call void @llvm.memcpy.p0.p0.i64(ptr align 1 %s1.sroa.3, ptr align 1 %s1.sroa.3.0.argp.cur.aligned.sroa_idx, i64 3, i1 false)
200 %s1.sroa.31.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 8
201 %s1.sroa.31.0.copyload = load i64, ptr %s1.sroa.31.0.argp.cur.aligned.sroa_idx, align 8
202 %add = add nsw i32 %first, %s1.sroa.0.0.copyload
203 %conv = sext i8 %s1.sroa.2.0.copyload to i32
204 %add1 = add nsw i32 %add, %conv
205 %conv2 = sext i32 %add1 to i64
206 %add3 = add nsw i64 %conv2, %s1.sroa.31.0.copyload
207 %conv4 = trunc i64 %add3 to i32
208 call void @llvm.va_end.p0(ptr %vlist)
212 declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
214 define dso_local i32 @bar() {
215 ; CHECK-PTX-LABEL: bar(
217 ; CHECK-PTX-NEXT: .local .align 8 .b8 __local_depot3[24];
218 ; CHECK-PTX-NEXT: .reg .b64 %SP;
219 ; CHECK-PTX-NEXT: .reg .b64 %SPL;
220 ; CHECK-PTX-NEXT: .reg .b16 %rs<10>;
221 ; CHECK-PTX-NEXT: .reg .b32 %r<4>;
222 ; CHECK-PTX-NEXT: .reg .b64 %rd<8>;
224 ; CHECK-PTX-NEXT: // %bb.0: // %entry
225 ; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot3;
226 ; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL;
227 ; CHECK-PTX-NEXT: mov.u64 %rd1, __const_$_bar_$_s1;
228 ; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 7;
229 ; CHECK-PTX-NEXT: ld.global.nc.u8 %rs1, [%rd2];
230 ; CHECK-PTX-NEXT: cvt.u16.u8 %rs2, %rs1;
231 ; CHECK-PTX-NEXT: st.u8 [%SP+2], %rs2;
232 ; CHECK-PTX-NEXT: add.s64 %rd3, %rd1, 5;
233 ; CHECK-PTX-NEXT: ld.global.nc.u8 %rs3, [%rd3];
234 ; CHECK-PTX-NEXT: cvt.u16.u8 %rs4, %rs3;
235 ; CHECK-PTX-NEXT: add.s64 %rd4, %rd1, 6;
236 ; CHECK-PTX-NEXT: ld.global.nc.u8 %rs5, [%rd4];
237 ; CHECK-PTX-NEXT: cvt.u16.u8 %rs6, %rs5;
238 ; CHECK-PTX-NEXT: shl.b16 %rs7, %rs6, 8;
239 ; CHECK-PTX-NEXT: or.b16 %rs8, %rs7, %rs4;
240 ; CHECK-PTX-NEXT: st.u16 [%SP+0], %rs8;
241 ; CHECK-PTX-NEXT: mov.b32 %r1, 1;
242 ; CHECK-PTX-NEXT: st.u32 [%SP+8], %r1;
243 ; CHECK-PTX-NEXT: add.u64 %rd5, %SP, 8;
244 ; CHECK-PTX-NEXT: or.b64 %rd6, %rd5, 4;
245 ; CHECK-PTX-NEXT: mov.u16 %rs9, 1;
246 ; CHECK-PTX-NEXT: st.u8 [%rd6], %rs9;
247 ; CHECK-PTX-NEXT: mov.u64 %rd7, 1;
248 ; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd7;
249 ; CHECK-PTX-NEXT: { // callseq 1, 0
250 ; CHECK-PTX-NEXT: .param .b32 param0;
251 ; CHECK-PTX-NEXT: st.param.b32 [param0+0], 1;
252 ; CHECK-PTX-NEXT: .param .b64 param1;
253 ; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd5;
254 ; CHECK-PTX-NEXT: .param .b32 retval0;
255 ; CHECK-PTX-NEXT: call.uni (retval0),
256 ; CHECK-PTX-NEXT: variadics2,
258 ; CHECK-PTX-NEXT: param0,
259 ; CHECK-PTX-NEXT: param1
261 ; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0+0];
262 ; CHECK-PTX-NEXT: } // callseq 1
263 ; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r2;
264 ; CHECK-PTX-NEXT: ret;
266 %s1.sroa.3 = alloca [3 x i8], align 1
267 %s1.sroa.0.0.copyload = load i32, ptr @__const.bar.s1, align 8
268 %s1.sroa.2.0.copyload = load i8, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 4), align 4
269 call void @llvm.memcpy.p0.p0.i64(ptr align 1 %s1.sroa.3, ptr align 1 getelementptr inbounds (i8, ptr @__const.bar.s1, i64 5), i64 3, i1 false)
270 %s1.sroa.31.0.copyload = load i64, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 8), align 8
271 %call = call i32 (i32, ...) @variadics2(i32 noundef 1, i32 %s1.sroa.0.0.copyload, i8 %s1.sroa.2.0.copyload, i64 %s1.sroa.31.0.copyload)
275 define dso_local i32 @variadics3(i32 noundef %first, ...) {
276 ; CHECK-PTX-LABEL: variadics3(
278 ; CHECK-PTX-NEXT: .reg .b32 %r<8>;
279 ; CHECK-PTX-NEXT: .reg .b64 %rd<4>;
281 ; CHECK-PTX-NEXT: // %bb.0: // %entry
282 ; CHECK-PTX-NEXT: ld.param.u64 %rd1, [variadics3_param_1];
283 ; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 15;
284 ; CHECK-PTX-NEXT: and.b64 %rd3, %rd2, -16;
285 ; CHECK-PTX-NEXT: ld.v4.u32 {%r1, %r2, %r3, %r4}, [%rd3];
286 ; CHECK-PTX-NEXT: add.s32 %r5, %r1, %r2;
287 ; CHECK-PTX-NEXT: add.s32 %r6, %r5, %r3;
288 ; CHECK-PTX-NEXT: add.s32 %r7, %r6, %r4;
289 ; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r7;
290 ; CHECK-PTX-NEXT: ret;
292 %vlist = alloca ptr, align 8
293 call void @llvm.va_start.p0(ptr %vlist)
294 %argp.cur = load ptr, ptr %vlist, align 8
295 %0 = getelementptr inbounds i8, ptr %argp.cur, i32 15
296 %argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -16)
297 %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 16
298 store ptr %argp.next, ptr %vlist, align 8
299 %1 = load <4 x i32>, ptr %argp.cur.aligned, align 16
300 call void @llvm.va_end.p0(ptr %vlist)
301 %2 = extractelement <4 x i32> %1, i64 0
302 %3 = extractelement <4 x i32> %1, i64 1
303 %add = add nsw i32 %2, %3
304 %4 = extractelement <4 x i32> %1, i64 2
305 %add1 = add nsw i32 %add, %4
306 %5 = extractelement <4 x i32> %1, i64 3
307 %add2 = add nsw i32 %add1, %5
311 define dso_local i32 @baz() {
312 ; CHECK-PTX-LABEL: baz(
314 ; CHECK-PTX-NEXT: .local .align 16 .b8 __local_depot5[16];
315 ; CHECK-PTX-NEXT: .reg .b64 %SP;
316 ; CHECK-PTX-NEXT: .reg .b64 %SPL;
317 ; CHECK-PTX-NEXT: .reg .b32 %r<4>;
318 ; CHECK-PTX-NEXT: .reg .b64 %rd<2>;
320 ; CHECK-PTX-NEXT: // %bb.0: // %entry
321 ; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot5;
322 ; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL;
323 ; CHECK-PTX-NEXT: mov.b32 %r1, 1;
324 ; CHECK-PTX-NEXT: st.v4.u32 [%SP+0], {%r1, %r1, %r1, %r1};
325 ; CHECK-PTX-NEXT: add.u64 %rd1, %SP, 0;
326 ; CHECK-PTX-NEXT: { // callseq 2, 0
327 ; CHECK-PTX-NEXT: .param .b32 param0;
328 ; CHECK-PTX-NEXT: st.param.b32 [param0+0], 1;
329 ; CHECK-PTX-NEXT: .param .b64 param1;
330 ; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd1;
331 ; CHECK-PTX-NEXT: .param .b32 retval0;
332 ; CHECK-PTX-NEXT: call.uni (retval0),
333 ; CHECK-PTX-NEXT: variadics3,
335 ; CHECK-PTX-NEXT: param0,
336 ; CHECK-PTX-NEXT: param1
338 ; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0+0];
339 ; CHECK-PTX-NEXT: } // callseq 2
340 ; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r2;
341 ; CHECK-PTX-NEXT: ret;
343 %call = call i32 (i32, ...) @variadics3(i32 noundef 1, <4 x i32> noundef <i32 1, i32 1, i32 1, i32 1>)
347 define dso_local i32 @variadics4(ptr noundef byval(%struct.S2) align 8 %first, ...) {
348 ; CHECK-PTX-LABEL: variadics4(
350 ; CHECK-PTX-NEXT: .reg .b32 %r<2>;
351 ; CHECK-PTX-NEXT: .reg .b64 %rd<9>;
353 ; CHECK-PTX-NEXT: // %bb.0: // %entry
354 ; CHECK-PTX-NEXT: ld.param.u64 %rd1, [variadics4_param_1];
355 ; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 7;
356 ; CHECK-PTX-NEXT: and.b64 %rd3, %rd2, -8;
357 ; CHECK-PTX-NEXT: ld.u64 %rd4, [%rd3];
358 ; CHECK-PTX-NEXT: ld.param.u64 %rd5, [variadics4_param_0];
359 ; CHECK-PTX-NEXT: ld.param.u64 %rd6, [variadics4_param_0+8];
360 ; CHECK-PTX-NEXT: add.s64 %rd7, %rd5, %rd6;
361 ; CHECK-PTX-NEXT: add.s64 %rd8, %rd7, %rd4;
362 ; CHECK-PTX-NEXT: cvt.u32.u64 %r1, %rd8;
363 ; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r1;
364 ; CHECK-PTX-NEXT: ret;
366 %vlist = alloca ptr, align 8
367 call void @llvm.va_start.p0(ptr %vlist)
368 %argp.cur = load ptr, ptr %vlist, align 8
369 %0 = getelementptr inbounds i8, ptr %argp.cur, i32 7
370 %argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -8)
371 %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 8
372 store ptr %argp.next, ptr %vlist, align 8
373 %1 = load i64, ptr %argp.cur.aligned, align 8
374 %x1 = getelementptr inbounds %struct.S2, ptr %first, i32 0, i32 0
375 %2 = load i64, ptr %x1, align 8
376 %y = getelementptr inbounds %struct.S2, ptr %first, i32 0, i32 1
377 %3 = load i64, ptr %y, align 8
378 %add = add nsw i64 %2, %3
379 %add2 = add nsw i64 %add, %1
380 %conv = trunc i64 %add2 to i32
381 call void @llvm.va_end.p0(ptr %vlist)
385 define dso_local void @qux() {
386 ; CHECK-PTX-LABEL: qux(
388 ; CHECK-PTX-NEXT: .local .align 8 .b8 __local_depot7[24];
389 ; CHECK-PTX-NEXT: .reg .b64 %SP;
390 ; CHECK-PTX-NEXT: .reg .b64 %SPL;
391 ; CHECK-PTX-NEXT: .reg .b32 %r<3>;
392 ; CHECK-PTX-NEXT: .reg .b64 %rd<7>;
394 ; CHECK-PTX-NEXT: // %bb.0: // %entry
395 ; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot7;
396 ; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL;
397 ; CHECK-PTX-NEXT: ld.global.nc.u64 %rd1, [__const_$_qux_$_s];
398 ; CHECK-PTX-NEXT: st.u64 [%SP+0], %rd1;
399 ; CHECK-PTX-NEXT: mov.u64 %rd2, __const_$_qux_$_s;
400 ; CHECK-PTX-NEXT: add.s64 %rd3, %rd2, 8;
401 ; CHECK-PTX-NEXT: ld.global.nc.u64 %rd4, [%rd3];
402 ; CHECK-PTX-NEXT: st.u64 [%SP+8], %rd4;
403 ; CHECK-PTX-NEXT: mov.u64 %rd5, 1;
404 ; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd5;
405 ; CHECK-PTX-NEXT: add.u64 %rd6, %SP, 16;
406 ; CHECK-PTX-NEXT: { // callseq 3, 0
407 ; CHECK-PTX-NEXT: .param .align 8 .b8 param0[16];
408 ; CHECK-PTX-NEXT: st.param.b64 [param0+0], %rd1;
409 ; CHECK-PTX-NEXT: st.param.b64 [param0+8], %rd4;
410 ; CHECK-PTX-NEXT: .param .b64 param1;
411 ; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd6;
412 ; CHECK-PTX-NEXT: .param .b32 retval0;
413 ; CHECK-PTX-NEXT: call.uni (retval0),
414 ; CHECK-PTX-NEXT: variadics4,
416 ; CHECK-PTX-NEXT: param0,
417 ; CHECK-PTX-NEXT: param1
419 ; CHECK-PTX-NEXT: ld.param.b32 %r1, [retval0+0];
420 ; CHECK-PTX-NEXT: } // callseq 3
421 ; CHECK-PTX-NEXT: ret;
423 %s = alloca %struct.S2, align 8
424 call void @llvm.memcpy.p0.p0.i64(ptr align 8 %s, ptr align 8 @__const.qux.s, i64 16, i1 false)
425 %call = call i32 (ptr, ...) @variadics4(ptr noundef byval(%struct.S2) align 8 %s, i64 noundef 1)