1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
3 ;;; Test vector load intrinsic instructions
6 ;;; We test VLD*rrl, VLD*irl, VLD*rrl_v, and VLD*irl_v instructions.
8 ; Function Attrs: nounwind
9 define void @vld_vssl(ptr %0, i64 %1) {
10 ; CHECK-LABEL: vld_vssl:
12 ; CHECK-NEXT: lea %s2, 256
14 ; CHECK-NEXT: vld %v0, %s1, %s0
16 ; CHECK-NEXT: vst %v0, %s1, %s0
18 ; CHECK-NEXT: b.l.t (, %s10)
19 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
20 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
24 ; Function Attrs: nounwind readonly
25 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, ptr, i32)
27 ; Function Attrs: nounwind
28 define void @vld_vssvl(ptr %0, i64 %1, ptr %2) {
29 ; CHECK-LABEL: vld_vssvl:
31 ; CHECK-NEXT: lea %s3, 256
33 ; CHECK-NEXT: vld %v0, %s1, %s2
34 ; CHECK-NEXT: vld %v0, %s1, %s0
36 ; CHECK-NEXT: vst %v0, %s1, %s0
38 ; CHECK-NEXT: b.l.t (, %s10)
39 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %2, i32 256)
40 %5 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
41 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
45 ; Function Attrs: nounwind readonly
46 declare <256 x double> @llvm.ve.vl.vld.vssvl(i64, ptr, <256 x double>, i32)
48 ; Function Attrs: nounwind
49 define void @vld_vssl_imm(ptr %0) {
50 ; CHECK-LABEL: vld_vssl_imm:
52 ; CHECK-NEXT: lea %s1, 256
54 ; CHECK-NEXT: vld %v0, 8, %s0
56 ; CHECK-NEXT: vst %v0, 8, %s0
58 ; CHECK-NEXT: b.l.t (, %s10)
59 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
60 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
64 ; Function Attrs: nounwind
65 define void @vld_vssvl_imm(ptr %0, ptr %1) {
66 ; CHECK-LABEL: vld_vssvl_imm:
68 ; CHECK-NEXT: lea %s2, 256
70 ; CHECK-NEXT: vld %v0, 8, %s1
71 ; CHECK-NEXT: vld %v0, 8, %s0
73 ; CHECK-NEXT: vst %v0, 8, %s0
75 ; CHECK-NEXT: b.l.t (, %s10)
76 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
77 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
78 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
82 ; Function Attrs: nounwind
83 define void @vldnc_vssl(ptr %0, i64 %1) {
84 ; CHECK-LABEL: vldnc_vssl:
86 ; CHECK-NEXT: lea %s2, 256
88 ; CHECK-NEXT: vld.nc %v0, %s1, %s0
90 ; CHECK-NEXT: vst %v0, %s1, %s0
92 ; CHECK-NEXT: b.l.t (, %s10)
93 %3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, ptr %0, i32 256)
94 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
98 ; Function Attrs: nounwind readonly
99 declare <256 x double> @llvm.ve.vl.vldnc.vssl(i64, ptr, i32)
101 ; Function Attrs: nounwind
102 define void @vldnc_vssvl(ptr %0, i64 %1, ptr %2) {
103 ; CHECK-LABEL: vldnc_vssvl:
105 ; CHECK-NEXT: lea %s3, 256
106 ; CHECK-NEXT: lvl %s3
107 ; CHECK-NEXT: vld.nc %v0, %s1, %s2
108 ; CHECK-NEXT: vld.nc %v0, %s1, %s0
110 ; CHECK-NEXT: vst %v0, %s1, %s0
111 ; CHECK-NEXT: #NO_APP
112 ; CHECK-NEXT: b.l.t (, %s10)
113 %4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, ptr %2, i32 256)
114 %5 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
115 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
119 ; Function Attrs: nounwind readonly
120 declare <256 x double> @llvm.ve.vl.vldnc.vssvl(i64, ptr, <256 x double>, i32)
122 ; Function Attrs: nounwind
123 define void @vldnc_vssl_imm(ptr %0) {
124 ; CHECK-LABEL: vldnc_vssl_imm:
126 ; CHECK-NEXT: lea %s1, 256
127 ; CHECK-NEXT: lvl %s1
128 ; CHECK-NEXT: vld.nc %v0, 8, %s0
130 ; CHECK-NEXT: vst %v0, 8, %s0
131 ; CHECK-NEXT: #NO_APP
132 ; CHECK-NEXT: b.l.t (, %s10)
133 %2 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, ptr %0, i32 256)
134 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
138 ; Function Attrs: nounwind
139 define void @vldnc_vssvl_imm(ptr %0, ptr %1) {
140 ; CHECK-LABEL: vldnc_vssvl_imm:
142 ; CHECK-NEXT: lea %s2, 256
143 ; CHECK-NEXT: lvl %s2
144 ; CHECK-NEXT: vld.nc %v0, 8, %s1
145 ; CHECK-NEXT: vld.nc %v0, 8, %s0
147 ; CHECK-NEXT: vst %v0, 8, %s0
148 ; CHECK-NEXT: #NO_APP
149 ; CHECK-NEXT: b.l.t (, %s10)
150 %3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, ptr %1, i32 256)
151 %4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
152 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
156 ; Function Attrs: nounwind
157 define void @vldu_vssl(ptr %0, i64 %1) {
158 ; CHECK-LABEL: vldu_vssl:
160 ; CHECK-NEXT: lea %s2, 256
161 ; CHECK-NEXT: lvl %s2
162 ; CHECK-NEXT: vldu %v0, %s1, %s0
164 ; CHECK-NEXT: vst %v0, %s1, %s0
165 ; CHECK-NEXT: #NO_APP
166 ; CHECK-NEXT: b.l.t (, %s10)
167 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, ptr %0, i32 256)
168 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
172 ; Function Attrs: nounwind readonly
173 declare <256 x double> @llvm.ve.vl.vldu.vssl(i64, ptr, i32)
175 ; Function Attrs: nounwind
176 define void @vldu_vssvl(ptr %0, i64 %1, ptr %2) {
177 ; CHECK-LABEL: vldu_vssvl:
179 ; CHECK-NEXT: lea %s3, 256
180 ; CHECK-NEXT: lvl %s3
181 ; CHECK-NEXT: vldu %v0, %s1, %s2
182 ; CHECK-NEXT: vldu %v0, %s1, %s0
184 ; CHECK-NEXT: vst %v0, %s1, %s0
185 ; CHECK-NEXT: #NO_APP
186 ; CHECK-NEXT: b.l.t (, %s10)
187 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, ptr %2, i32 256)
188 %5 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
189 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
193 ; Function Attrs: nounwind readonly
194 declare <256 x double> @llvm.ve.vl.vldu.vssvl(i64, ptr, <256 x double>, i32)
196 ; Function Attrs: nounwind
197 define void @vldu_vssl_imm(ptr %0) {
198 ; CHECK-LABEL: vldu_vssl_imm:
200 ; CHECK-NEXT: lea %s1, 256
201 ; CHECK-NEXT: lvl %s1
202 ; CHECK-NEXT: vldu %v0, 8, %s0
204 ; CHECK-NEXT: vst %v0, 8, %s0
205 ; CHECK-NEXT: #NO_APP
206 ; CHECK-NEXT: b.l.t (, %s10)
207 %2 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, ptr %0, i32 256)
208 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
212 ; Function Attrs: nounwind
213 define void @vldu_vssvl_imm(ptr %0, ptr %1) {
214 ; CHECK-LABEL: vldu_vssvl_imm:
216 ; CHECK-NEXT: lea %s2, 256
217 ; CHECK-NEXT: lvl %s2
218 ; CHECK-NEXT: vldu %v0, 8, %s1
219 ; CHECK-NEXT: vldu %v0, 8, %s0
221 ; CHECK-NEXT: vst %v0, 8, %s0
222 ; CHECK-NEXT: #NO_APP
223 ; CHECK-NEXT: b.l.t (, %s10)
224 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, ptr %1, i32 256)
225 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
226 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
230 ; Function Attrs: nounwind
231 define void @vldunc_vssl(ptr %0, i64 %1) {
232 ; CHECK-LABEL: vldunc_vssl:
234 ; CHECK-NEXT: lea %s2, 256
235 ; CHECK-NEXT: lvl %s2
236 ; CHECK-NEXT: vldu.nc %v0, %s1, %s0
238 ; CHECK-NEXT: vst %v0, %s1, %s0
239 ; CHECK-NEXT: #NO_APP
240 ; CHECK-NEXT: b.l.t (, %s10)
241 %3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, ptr %0, i32 256)
242 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
246 ; Function Attrs: nounwind readonly
247 declare <256 x double> @llvm.ve.vl.vldunc.vssl(i64, ptr, i32)
249 ; Function Attrs: nounwind
250 define void @vldunc_vssvl(ptr %0, i64 %1, ptr %2) {
251 ; CHECK-LABEL: vldunc_vssvl:
253 ; CHECK-NEXT: lea %s3, 256
254 ; CHECK-NEXT: lvl %s3
255 ; CHECK-NEXT: vldu.nc %v0, %s1, %s2
256 ; CHECK-NEXT: vldu.nc %v0, %s1, %s0
258 ; CHECK-NEXT: vst %v0, %s1, %s0
259 ; CHECK-NEXT: #NO_APP
260 ; CHECK-NEXT: b.l.t (, %s10)
261 %4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, ptr %2, i32 256)
262 %5 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
263 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
267 ; Function Attrs: nounwind readonly
268 declare <256 x double> @llvm.ve.vl.vldunc.vssvl(i64, ptr, <256 x double>, i32)
270 ; Function Attrs: nounwind
271 define void @vldunc_vssl_imm(ptr %0) {
272 ; CHECK-LABEL: vldunc_vssl_imm:
274 ; CHECK-NEXT: lea %s1, 256
275 ; CHECK-NEXT: lvl %s1
276 ; CHECK-NEXT: vldu.nc %v0, 8, %s0
278 ; CHECK-NEXT: vst %v0, 8, %s0
279 ; CHECK-NEXT: #NO_APP
280 ; CHECK-NEXT: b.l.t (, %s10)
281 %2 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, ptr %0, i32 256)
282 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
286 ; Function Attrs: nounwind
287 define void @vldunc_vssvl_imm(ptr %0, ptr %1) {
288 ; CHECK-LABEL: vldunc_vssvl_imm:
290 ; CHECK-NEXT: lea %s2, 256
291 ; CHECK-NEXT: lvl %s2
292 ; CHECK-NEXT: vldu.nc %v0, 8, %s1
293 ; CHECK-NEXT: vldu.nc %v0, 8, %s0
295 ; CHECK-NEXT: vst %v0, 8, %s0
296 ; CHECK-NEXT: #NO_APP
297 ; CHECK-NEXT: b.l.t (, %s10)
298 %3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, ptr %1, i32 256)
299 %4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
300 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
304 ; Function Attrs: nounwind
305 define void @vldlsx_vssl(ptr %0, i64 %1) {
306 ; CHECK-LABEL: vldlsx_vssl:
308 ; CHECK-NEXT: lea %s2, 256
309 ; CHECK-NEXT: lvl %s2
310 ; CHECK-NEXT: vldl.sx %v0, %s1, %s0
312 ; CHECK-NEXT: vst %v0, %s1, %s0
313 ; CHECK-NEXT: #NO_APP
314 ; CHECK-NEXT: b.l.t (, %s10)
315 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, ptr %0, i32 256)
316 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
320 ; Function Attrs: nounwind readonly
321 declare <256 x double> @llvm.ve.vl.vldlsx.vssl(i64, ptr, i32)
323 ; Function Attrs: nounwind
324 define void @vldlsx_vssvl(ptr %0, i64 %1, ptr %2) {
325 ; CHECK-LABEL: vldlsx_vssvl:
327 ; CHECK-NEXT: lea %s3, 256
328 ; CHECK-NEXT: lvl %s3
329 ; CHECK-NEXT: vldl.sx %v0, %s1, %s2
330 ; CHECK-NEXT: vldl.sx %v0, %s1, %s0
332 ; CHECK-NEXT: vst %v0, %s1, %s0
333 ; CHECK-NEXT: #NO_APP
334 ; CHECK-NEXT: b.l.t (, %s10)
335 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, ptr %2, i32 256)
336 %5 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
337 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
341 ; Function Attrs: nounwind readonly
342 declare <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64, ptr, <256 x double>, i32)
344 ; Function Attrs: nounwind
345 define void @vldlsx_vssl_imm(ptr %0) {
346 ; CHECK-LABEL: vldlsx_vssl_imm:
348 ; CHECK-NEXT: lea %s1, 256
349 ; CHECK-NEXT: lvl %s1
350 ; CHECK-NEXT: vldl.sx %v0, 8, %s0
352 ; CHECK-NEXT: vst %v0, 8, %s0
353 ; CHECK-NEXT: #NO_APP
354 ; CHECK-NEXT: b.l.t (, %s10)
355 %2 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, ptr %0, i32 256)
356 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
360 ; Function Attrs: nounwind
361 define void @vldlsx_vssvl_imm(ptr %0, ptr %1) {
362 ; CHECK-LABEL: vldlsx_vssvl_imm:
364 ; CHECK-NEXT: lea %s2, 256
365 ; CHECK-NEXT: lvl %s2
366 ; CHECK-NEXT: vldl.sx %v0, 8, %s1
367 ; CHECK-NEXT: vldl.sx %v0, 8, %s0
369 ; CHECK-NEXT: vst %v0, 8, %s0
370 ; CHECK-NEXT: #NO_APP
371 ; CHECK-NEXT: b.l.t (, %s10)
372 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, ptr %1, i32 256)
373 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
374 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
378 ; Function Attrs: nounwind
379 define void @vldlsxnc_vssl(ptr %0, i64 %1) {
380 ; CHECK-LABEL: vldlsxnc_vssl:
382 ; CHECK-NEXT: lea %s2, 256
383 ; CHECK-NEXT: lvl %s2
384 ; CHECK-NEXT: vldl.sx.nc %v0, %s1, %s0
386 ; CHECK-NEXT: vst %v0, %s1, %s0
387 ; CHECK-NEXT: #NO_APP
388 ; CHECK-NEXT: b.l.t (, %s10)
389 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, ptr %0, i32 256)
390 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
394 ; Function Attrs: nounwind readonly
395 declare <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64, ptr, i32)
397 ; Function Attrs: nounwind
398 define void @vldlsxnc_vssvl(ptr %0, i64 %1, ptr %2) {
399 ; CHECK-LABEL: vldlsxnc_vssvl:
401 ; CHECK-NEXT: lea %s3, 256
402 ; CHECK-NEXT: lvl %s3
403 ; CHECK-NEXT: vldl.sx.nc %v0, %s1, %s2
404 ; CHECK-NEXT: vldl.sx.nc %v0, %s1, %s0
406 ; CHECK-NEXT: vst %v0, %s1, %s0
407 ; CHECK-NEXT: #NO_APP
408 ; CHECK-NEXT: b.l.t (, %s10)
409 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, ptr %2, i32 256)
410 %5 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
411 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
415 ; Function Attrs: nounwind readonly
416 declare <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64, ptr, <256 x double>, i32)
418 ; Function Attrs: nounwind
419 define void @vldlsxnc_vssl_imm(ptr %0) {
420 ; CHECK-LABEL: vldlsxnc_vssl_imm:
422 ; CHECK-NEXT: lea %s1, 256
423 ; CHECK-NEXT: lvl %s1
424 ; CHECK-NEXT: vldl.sx.nc %v0, 8, %s0
426 ; CHECK-NEXT: vst %v0, 8, %s0
427 ; CHECK-NEXT: #NO_APP
428 ; CHECK-NEXT: b.l.t (, %s10)
429 %2 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, ptr %0, i32 256)
430 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
434 ; Function Attrs: nounwind
435 define void @vldlsxnc_vssvl_imm(ptr %0, ptr %1) {
436 ; CHECK-LABEL: vldlsxnc_vssvl_imm:
438 ; CHECK-NEXT: lea %s2, 256
439 ; CHECK-NEXT: lvl %s2
440 ; CHECK-NEXT: vldl.sx.nc %v0, 8, %s1
441 ; CHECK-NEXT: vldl.sx.nc %v0, 8, %s0
443 ; CHECK-NEXT: vst %v0, 8, %s0
444 ; CHECK-NEXT: #NO_APP
445 ; CHECK-NEXT: b.l.t (, %s10)
446 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, ptr %1, i32 256)
447 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
448 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
452 ; Function Attrs: nounwind
453 define void @vldlzx_vssl(ptr %0, i64 %1) {
454 ; CHECK-LABEL: vldlzx_vssl:
456 ; CHECK-NEXT: lea %s2, 256
457 ; CHECK-NEXT: lvl %s2
458 ; CHECK-NEXT: vldl.zx %v0, %s1, %s0
460 ; CHECK-NEXT: vst %v0, %s1, %s0
461 ; CHECK-NEXT: #NO_APP
462 ; CHECK-NEXT: b.l.t (, %s10)
463 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, ptr %0, i32 256)
464 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
468 ; Function Attrs: nounwind readonly
469 declare <256 x double> @llvm.ve.vl.vldlzx.vssl(i64, ptr, i32)
471 ; Function Attrs: nounwind
472 define void @vldlzx_vssvl(ptr %0, i64 %1, ptr %2) {
473 ; CHECK-LABEL: vldlzx_vssvl:
475 ; CHECK-NEXT: lea %s3, 256
476 ; CHECK-NEXT: lvl %s3
477 ; CHECK-NEXT: vldl.zx %v0, %s1, %s2
478 ; CHECK-NEXT: vldl.zx %v0, %s1, %s0
480 ; CHECK-NEXT: vst %v0, %s1, %s0
481 ; CHECK-NEXT: #NO_APP
482 ; CHECK-NEXT: b.l.t (, %s10)
483 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, ptr %2, i32 256)
484 %5 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
485 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
489 ; Function Attrs: nounwind readonly
490 declare <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64, ptr, <256 x double>, i32)
492 ; Function Attrs: nounwind
493 define void @vldlzx_vssl_imm(ptr %0) {
494 ; CHECK-LABEL: vldlzx_vssl_imm:
496 ; CHECK-NEXT: lea %s1, 256
497 ; CHECK-NEXT: lvl %s1
498 ; CHECK-NEXT: vldl.zx %v0, 8, %s0
500 ; CHECK-NEXT: vst %v0, 8, %s0
501 ; CHECK-NEXT: #NO_APP
502 ; CHECK-NEXT: b.l.t (, %s10)
503 %2 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, ptr %0, i32 256)
504 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
508 ; Function Attrs: nounwind
509 define void @vldlzx_vssvl_imm(ptr %0, ptr %1) {
510 ; CHECK-LABEL: vldlzx_vssvl_imm:
512 ; CHECK-NEXT: lea %s2, 256
513 ; CHECK-NEXT: lvl %s2
514 ; CHECK-NEXT: vldl.zx %v0, 8, %s1
515 ; CHECK-NEXT: vldl.zx %v0, 8, %s0
517 ; CHECK-NEXT: vst %v0, 8, %s0
518 ; CHECK-NEXT: #NO_APP
519 ; CHECK-NEXT: b.l.t (, %s10)
520 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, ptr %1, i32 256)
521 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
522 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
526 ; Function Attrs: nounwind
527 define void @vldlzxnc_vssl(ptr %0, i64 %1) {
528 ; CHECK-LABEL: vldlzxnc_vssl:
530 ; CHECK-NEXT: lea %s2, 256
531 ; CHECK-NEXT: lvl %s2
532 ; CHECK-NEXT: vldl.zx.nc %v0, %s1, %s0
534 ; CHECK-NEXT: vst %v0, %s1, %s0
535 ; CHECK-NEXT: #NO_APP
536 ; CHECK-NEXT: b.l.t (, %s10)
537 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, ptr %0, i32 256)
538 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
542 ; Function Attrs: nounwind readonly
543 declare <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64, ptr, i32)
545 ; Function Attrs: nounwind
546 define void @vldlzxnc_vssvl(ptr %0, i64 %1, ptr %2) {
547 ; CHECK-LABEL: vldlzxnc_vssvl:
549 ; CHECK-NEXT: lea %s3, 256
550 ; CHECK-NEXT: lvl %s3
551 ; CHECK-NEXT: vldl.zx.nc %v0, %s1, %s2
552 ; CHECK-NEXT: vldl.zx.nc %v0, %s1, %s0
554 ; CHECK-NEXT: vst %v0, %s1, %s0
555 ; CHECK-NEXT: #NO_APP
556 ; CHECK-NEXT: b.l.t (, %s10)
557 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, ptr %2, i32 256)
558 %5 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
559 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
563 ; Function Attrs: nounwind readonly
564 declare <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64, ptr, <256 x double>, i32)
566 ; Function Attrs: nounwind
567 define void @vldlzxnc_vssl_imm(ptr %0) {
568 ; CHECK-LABEL: vldlzxnc_vssl_imm:
570 ; CHECK-NEXT: lea %s1, 256
571 ; CHECK-NEXT: lvl %s1
572 ; CHECK-NEXT: vldl.zx.nc %v0, 8, %s0
574 ; CHECK-NEXT: vst %v0, 8, %s0
575 ; CHECK-NEXT: #NO_APP
576 ; CHECK-NEXT: b.l.t (, %s10)
577 %2 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, ptr %0, i32 256)
578 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
582 ; Function Attrs: nounwind
583 define void @vldlzxnc_vssvl_imm(ptr %0, ptr %1) {
584 ; CHECK-LABEL: vldlzxnc_vssvl_imm:
586 ; CHECK-NEXT: lea %s2, 256
587 ; CHECK-NEXT: lvl %s2
588 ; CHECK-NEXT: vldl.zx.nc %v0, 8, %s1
589 ; CHECK-NEXT: vldl.zx.nc %v0, 8, %s0
591 ; CHECK-NEXT: vst %v0, 8, %s0
592 ; CHECK-NEXT: #NO_APP
593 ; CHECK-NEXT: b.l.t (, %s10)
594 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, ptr %1, i32 256)
595 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
596 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
600 ; Function Attrs: nounwind
601 define void @vld2d_vssl(ptr %0, i64 %1) {
602 ; CHECK-LABEL: vld2d_vssl:
604 ; CHECK-NEXT: lea %s2, 256
605 ; CHECK-NEXT: lvl %s2
606 ; CHECK-NEXT: vld2d %v0, %s1, %s0
608 ; CHECK-NEXT: vst %v0, %s1, %s0
609 ; CHECK-NEXT: #NO_APP
610 ; CHECK-NEXT: b.l.t (, %s10)
611 %3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, ptr %0, i32 256)
612 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
616 ; Function Attrs: nounwind readonly
617 declare <256 x double> @llvm.ve.vl.vld2d.vssl(i64, ptr, i32)
619 ; Function Attrs: nounwind
620 define void @vld2d_vssvl(ptr %0, i64 %1, ptr %2) {
621 ; CHECK-LABEL: vld2d_vssvl:
623 ; CHECK-NEXT: lea %s3, 256
624 ; CHECK-NEXT: lvl %s3
625 ; CHECK-NEXT: vld2d %v0, %s1, %s2
626 ; CHECK-NEXT: vld2d %v0, %s1, %s0
628 ; CHECK-NEXT: vst %v0, %s1, %s0
629 ; CHECK-NEXT: #NO_APP
630 ; CHECK-NEXT: b.l.t (, %s10)
631 %4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, ptr %2, i32 256)
632 %5 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
633 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
637 ; Function Attrs: nounwind readonly
638 declare <256 x double> @llvm.ve.vl.vld2d.vssvl(i64, ptr, <256 x double>, i32)
640 ; Function Attrs: nounwind
641 define void @vld2d_vssl_imm(ptr %0) {
642 ; CHECK-LABEL: vld2d_vssl_imm:
644 ; CHECK-NEXT: lea %s1, 256
645 ; CHECK-NEXT: lvl %s1
646 ; CHECK-NEXT: vld2d %v0, 8, %s0
648 ; CHECK-NEXT: vst %v0, 8, %s0
649 ; CHECK-NEXT: #NO_APP
650 ; CHECK-NEXT: b.l.t (, %s10)
651 %2 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, ptr %0, i32 256)
652 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
656 ; Function Attrs: nounwind
657 define void @vld2d_vssvl_imm(ptr %0, ptr %1) {
658 ; CHECK-LABEL: vld2d_vssvl_imm:
660 ; CHECK-NEXT: lea %s2, 256
661 ; CHECK-NEXT: lvl %s2
662 ; CHECK-NEXT: vld2d %v0, 8, %s1
663 ; CHECK-NEXT: vld2d %v0, 8, %s0
665 ; CHECK-NEXT: vst %v0, 8, %s0
666 ; CHECK-NEXT: #NO_APP
667 ; CHECK-NEXT: b.l.t (, %s10)
668 %3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, ptr %1, i32 256)
669 %4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
670 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
674 ; Function Attrs: nounwind
675 define void @vld2dnc_vssl(ptr %0, i64 %1) {
676 ; CHECK-LABEL: vld2dnc_vssl:
678 ; CHECK-NEXT: lea %s2, 256
679 ; CHECK-NEXT: lvl %s2
680 ; CHECK-NEXT: vld2d.nc %v0, %s1, %s0
682 ; CHECK-NEXT: vst %v0, %s1, %s0
683 ; CHECK-NEXT: #NO_APP
684 ; CHECK-NEXT: b.l.t (, %s10)
685 %3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, ptr %0, i32 256)
686 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
690 ; Function Attrs: nounwind readonly
691 declare <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64, ptr, i32)
693 ; Function Attrs: nounwind
694 define void @vld2dnc_vssvl(ptr %0, i64 %1, ptr %2) {
695 ; CHECK-LABEL: vld2dnc_vssvl:
697 ; CHECK-NEXT: lea %s3, 256
698 ; CHECK-NEXT: lvl %s3
699 ; CHECK-NEXT: vld2d.nc %v0, %s1, %s2
700 ; CHECK-NEXT: vld2d.nc %v0, %s1, %s0
702 ; CHECK-NEXT: vst %v0, %s1, %s0
703 ; CHECK-NEXT: #NO_APP
704 ; CHECK-NEXT: b.l.t (, %s10)
705 %4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, ptr %2, i32 256)
706 %5 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
707 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
711 ; Function Attrs: nounwind readonly
712 declare <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64, ptr, <256 x double>, i32)
714 ; Function Attrs: nounwind
715 define void @vld2dnc_vssl_imm(ptr %0) {
716 ; CHECK-LABEL: vld2dnc_vssl_imm:
718 ; CHECK-NEXT: lea %s1, 256
719 ; CHECK-NEXT: lvl %s1
720 ; CHECK-NEXT: vld2d.nc %v0, 8, %s0
722 ; CHECK-NEXT: vst %v0, 8, %s0
723 ; CHECK-NEXT: #NO_APP
724 ; CHECK-NEXT: b.l.t (, %s10)
725 %2 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, ptr %0, i32 256)
726 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
730 ; Function Attrs: nounwind
731 define void @vld2dnc_vssvl_imm(ptr %0, ptr %1) {
732 ; CHECK-LABEL: vld2dnc_vssvl_imm:
734 ; CHECK-NEXT: lea %s2, 256
735 ; CHECK-NEXT: lvl %s2
736 ; CHECK-NEXT: vld2d.nc %v0, 8, %s1
737 ; CHECK-NEXT: vld2d.nc %v0, 8, %s0
739 ; CHECK-NEXT: vst %v0, 8, %s0
740 ; CHECK-NEXT: #NO_APP
741 ; CHECK-NEXT: b.l.t (, %s10)
742 %3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, ptr %1, i32 256)
743 %4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
744 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
748 ; Function Attrs: nounwind
749 define void @vldu2d_vssl(ptr %0, i64 %1) {
750 ; CHECK-LABEL: vldu2d_vssl:
752 ; CHECK-NEXT: lea %s2, 256
753 ; CHECK-NEXT: lvl %s2
754 ; CHECK-NEXT: vldu2d %v0, %s1, %s0
756 ; CHECK-NEXT: vst %v0, %s1, %s0
757 ; CHECK-NEXT: #NO_APP
758 ; CHECK-NEXT: b.l.t (, %s10)
759 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, ptr %0, i32 256)
760 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
764 ; Function Attrs: nounwind readonly
765 declare <256 x double> @llvm.ve.vl.vldu2d.vssl(i64, ptr, i32)
767 ; Function Attrs: nounwind
768 define void @vldu2d_vssvl(ptr %0, i64 %1, ptr %2) {
769 ; CHECK-LABEL: vldu2d_vssvl:
771 ; CHECK-NEXT: lea %s3, 256
772 ; CHECK-NEXT: lvl %s3
773 ; CHECK-NEXT: vldu2d %v0, %s1, %s2
774 ; CHECK-NEXT: vldu2d %v0, %s1, %s0
776 ; CHECK-NEXT: vst %v0, %s1, %s0
777 ; CHECK-NEXT: #NO_APP
778 ; CHECK-NEXT: b.l.t (, %s10)
779 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, ptr %2, i32 256)
780 %5 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
781 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
785 ; Function Attrs: nounwind readonly
786 declare <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64, ptr, <256 x double>, i32)
788 ; Function Attrs: nounwind
789 define void @vldu2d_vssl_imm(ptr %0) {
790 ; CHECK-LABEL: vldu2d_vssl_imm:
792 ; CHECK-NEXT: lea %s1, 256
793 ; CHECK-NEXT: lvl %s1
794 ; CHECK-NEXT: vldu2d %v0, 8, %s0
796 ; CHECK-NEXT: vst %v0, 8, %s0
797 ; CHECK-NEXT: #NO_APP
798 ; CHECK-NEXT: b.l.t (, %s10)
799 %2 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, ptr %0, i32 256)
800 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
804 ; Function Attrs: nounwind
805 define void @vldu2d_vssvl_imm(ptr %0, ptr %1) {
806 ; CHECK-LABEL: vldu2d_vssvl_imm:
808 ; CHECK-NEXT: lea %s2, 256
809 ; CHECK-NEXT: lvl %s2
810 ; CHECK-NEXT: vldu2d %v0, 8, %s1
811 ; CHECK-NEXT: vldu2d %v0, 8, %s0
813 ; CHECK-NEXT: vst %v0, 8, %s0
814 ; CHECK-NEXT: #NO_APP
815 ; CHECK-NEXT: b.l.t (, %s10)
816 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, ptr %1, i32 256)
817 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
818 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
822 ; Function Attrs: nounwind
823 define void @vldu2dnc_vssl(ptr %0, i64 %1) {
824 ; CHECK-LABEL: vldu2dnc_vssl:
826 ; CHECK-NEXT: lea %s2, 256
827 ; CHECK-NEXT: lvl %s2
828 ; CHECK-NEXT: vldu2d.nc %v0, %s1, %s0
830 ; CHECK-NEXT: vst %v0, %s1, %s0
831 ; CHECK-NEXT: #NO_APP
832 ; CHECK-NEXT: b.l.t (, %s10)
833 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, ptr %0, i32 256)
834 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
838 ; Function Attrs: nounwind readonly
839 declare <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64, ptr, i32)
841 ; Function Attrs: nounwind
842 define void @vldu2dnc_vssvl(ptr %0, i64 %1, ptr %2) {
843 ; CHECK-LABEL: vldu2dnc_vssvl:
845 ; CHECK-NEXT: lea %s3, 256
846 ; CHECK-NEXT: lvl %s3
847 ; CHECK-NEXT: vldu2d.nc %v0, %s1, %s2
848 ; CHECK-NEXT: vldu2d.nc %v0, %s1, %s0
850 ; CHECK-NEXT: vst %v0, %s1, %s0
851 ; CHECK-NEXT: #NO_APP
852 ; CHECK-NEXT: b.l.t (, %s10)
853 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, ptr %2, i32 256)
854 %5 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
855 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
859 ; Function Attrs: nounwind readonly
860 declare <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64, ptr, <256 x double>, i32)
862 ; Function Attrs: nounwind
863 define void @vldu2dnc_vssl_imm(ptr %0) {
864 ; CHECK-LABEL: vldu2dnc_vssl_imm:
866 ; CHECK-NEXT: lea %s1, 256
867 ; CHECK-NEXT: lvl %s1
868 ; CHECK-NEXT: vldu2d.nc %v0, 8, %s0
870 ; CHECK-NEXT: vst %v0, 8, %s0
871 ; CHECK-NEXT: #NO_APP
872 ; CHECK-NEXT: b.l.t (, %s10)
873 %2 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, ptr %0, i32 256)
874 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
878 ; Function Attrs: nounwind
879 define void @vldu2dnc_vssvl_imm(ptr %0, ptr %1) {
880 ; CHECK-LABEL: vldu2dnc_vssvl_imm:
882 ; CHECK-NEXT: lea %s2, 256
883 ; CHECK-NEXT: lvl %s2
884 ; CHECK-NEXT: vldu2d.nc %v0, 8, %s1
885 ; CHECK-NEXT: vldu2d.nc %v0, 8, %s0
887 ; CHECK-NEXT: vst %v0, 8, %s0
888 ; CHECK-NEXT: #NO_APP
889 ; CHECK-NEXT: b.l.t (, %s10)
890 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, ptr %1, i32 256)
891 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
892 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
896 ; Function Attrs: nounwind
897 define void @vldl2dsx_vssl(ptr %0, i64 %1) {
898 ; CHECK-LABEL: vldl2dsx_vssl:
900 ; CHECK-NEXT: lea %s2, 256
901 ; CHECK-NEXT: lvl %s2
902 ; CHECK-NEXT: vldl2d.sx %v0, %s1, %s0
904 ; CHECK-NEXT: vst %v0, %s1, %s0
905 ; CHECK-NEXT: #NO_APP
906 ; CHECK-NEXT: b.l.t (, %s10)
907 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, ptr %0, i32 256)
908 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
912 ; Function Attrs: nounwind readonly
913 declare <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64, ptr, i32)
915 ; Function Attrs: nounwind
916 define void @vldl2dsx_vssvl(ptr %0, i64 %1, ptr %2) {
917 ; CHECK-LABEL: vldl2dsx_vssvl:
919 ; CHECK-NEXT: lea %s3, 256
920 ; CHECK-NEXT: lvl %s3
921 ; CHECK-NEXT: vldl2d.sx %v0, %s1, %s2
922 ; CHECK-NEXT: vldl2d.sx %v0, %s1, %s0
924 ; CHECK-NEXT: vst %v0, %s1, %s0
925 ; CHECK-NEXT: #NO_APP
926 ; CHECK-NEXT: b.l.t (, %s10)
927 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, ptr %2, i32 256)
928 %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
929 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
933 ; Function Attrs: nounwind readonly
934 declare <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64, ptr, <256 x double>, i32)
936 ; Function Attrs: nounwind
937 define void @vldl2dsx_vssl_imm(ptr %0) {
938 ; CHECK-LABEL: vldl2dsx_vssl_imm:
940 ; CHECK-NEXT: lea %s1, 256
941 ; CHECK-NEXT: lvl %s1
942 ; CHECK-NEXT: vldl2d.sx %v0, 8, %s0
944 ; CHECK-NEXT: vst %v0, 8, %s0
945 ; CHECK-NEXT: #NO_APP
946 ; CHECK-NEXT: b.l.t (, %s10)
947 %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, ptr %0, i32 256)
948 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
952 ; Function Attrs: nounwind
953 define void @vldl2dsx_vssvl_imm(ptr %0, ptr %1) {
954 ; CHECK-LABEL: vldl2dsx_vssvl_imm:
956 ; CHECK-NEXT: lea %s2, 256
957 ; CHECK-NEXT: lvl %s2
958 ; CHECK-NEXT: vldl2d.sx %v0, 8, %s1
959 ; CHECK-NEXT: vldl2d.sx %v0, 8, %s0
961 ; CHECK-NEXT: vst %v0, 8, %s0
962 ; CHECK-NEXT: #NO_APP
963 ; CHECK-NEXT: b.l.t (, %s10)
964 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, ptr %1, i32 256)
965 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
966 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
970 ; Function Attrs: nounwind
971 define void @vldl2dsxnc_vssl(ptr %0, i64 %1) {
972 ; CHECK-LABEL: vldl2dsxnc_vssl:
974 ; CHECK-NEXT: lea %s2, 256
975 ; CHECK-NEXT: lvl %s2
976 ; CHECK-NEXT: vldl2d.sx.nc %v0, %s1, %s0
978 ; CHECK-NEXT: vst %v0, %s1, %s0
979 ; CHECK-NEXT: #NO_APP
980 ; CHECK-NEXT: b.l.t (, %s10)
981 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, ptr %0, i32 256)
982 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
986 ; Function Attrs: nounwind readonly
987 declare <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64, ptr, i32)
989 ; Function Attrs: nounwind
990 define void @vldl2dsxnc_vssvl(ptr %0, i64 %1, ptr %2) {
991 ; CHECK-LABEL: vldl2dsxnc_vssvl:
993 ; CHECK-NEXT: lea %s3, 256
994 ; CHECK-NEXT: lvl %s3
995 ; CHECK-NEXT: vldl2d.sx.nc %v0, %s1, %s2
996 ; CHECK-NEXT: vldl2d.sx.nc %v0, %s1, %s0
998 ; CHECK-NEXT: vst %v0, %s1, %s0
999 ; CHECK-NEXT: #NO_APP
1000 ; CHECK-NEXT: b.l.t (, %s10)
1001 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, ptr %2, i32 256)
1002 %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
1003 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
1007 ; Function Attrs: nounwind readonly
1008 declare <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64, ptr, <256 x double>, i32)
1010 ; Function Attrs: nounwind
1011 define void @vldl2dsxnc_vssl_imm(ptr %0) {
1012 ; CHECK-LABEL: vldl2dsxnc_vssl_imm:
1014 ; CHECK-NEXT: lea %s1, 256
1015 ; CHECK-NEXT: lvl %s1
1016 ; CHECK-NEXT: vldl2d.sx.nc %v0, 8, %s0
1018 ; CHECK-NEXT: vst %v0, 8, %s0
1019 ; CHECK-NEXT: #NO_APP
1020 ; CHECK-NEXT: b.l.t (, %s10)
1021 %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, ptr %0, i32 256)
1022 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
1026 ; Function Attrs: nounwind
1027 define void @vldl2dsxnc_vssvl_imm(ptr %0, ptr %1) {
1028 ; CHECK-LABEL: vldl2dsxnc_vssvl_imm:
1030 ; CHECK-NEXT: lea %s2, 256
1031 ; CHECK-NEXT: lvl %s2
1032 ; CHECK-NEXT: vldl2d.sx.nc %v0, 8, %s1
1033 ; CHECK-NEXT: vldl2d.sx.nc %v0, 8, %s0
1035 ; CHECK-NEXT: vst %v0, 8, %s0
1036 ; CHECK-NEXT: #NO_APP
1037 ; CHECK-NEXT: b.l.t (, %s10)
1038 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, ptr %1, i32 256)
1039 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
1040 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
1044 ; Function Attrs: nounwind
1045 define void @vldl2dzx_vssl(ptr %0, i64 %1) {
1046 ; CHECK-LABEL: vldl2dzx_vssl:
1048 ; CHECK-NEXT: lea %s2, 256
1049 ; CHECK-NEXT: lvl %s2
1050 ; CHECK-NEXT: vldl2d.zx %v0, %s1, %s0
1052 ; CHECK-NEXT: vst %v0, %s1, %s0
1053 ; CHECK-NEXT: #NO_APP
1054 ; CHECK-NEXT: b.l.t (, %s10)
1055 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, ptr %0, i32 256)
1056 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
1060 ; Function Attrs: nounwind readonly
1061 declare <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64, ptr, i32)
1063 ; Function Attrs: nounwind
1064 define void @vldl2dzx_vssvl(ptr %0, i64 %1, ptr %2) {
1065 ; CHECK-LABEL: vldl2dzx_vssvl:
1067 ; CHECK-NEXT: lea %s3, 256
1068 ; CHECK-NEXT: lvl %s3
1069 ; CHECK-NEXT: vldl2d.zx %v0, %s1, %s2
1070 ; CHECK-NEXT: vldl2d.zx %v0, %s1, %s0
1072 ; CHECK-NEXT: vst %v0, %s1, %s0
1073 ; CHECK-NEXT: #NO_APP
1074 ; CHECK-NEXT: b.l.t (, %s10)
1075 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, ptr %2, i32 256)
1076 %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
1077 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
1081 ; Function Attrs: nounwind readonly
1082 declare <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64, ptr, <256 x double>, i32)
1084 ; Function Attrs: nounwind
1085 define void @vldl2dzx_vssl_imm(ptr %0) {
1086 ; CHECK-LABEL: vldl2dzx_vssl_imm:
1088 ; CHECK-NEXT: lea %s1, 256
1089 ; CHECK-NEXT: lvl %s1
1090 ; CHECK-NEXT: vldl2d.zx %v0, 8, %s0
1092 ; CHECK-NEXT: vst %v0, 8, %s0
1093 ; CHECK-NEXT: #NO_APP
1094 ; CHECK-NEXT: b.l.t (, %s10)
1095 %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, ptr %0, i32 256)
1096 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
1100 ; Function Attrs: nounwind
1101 define void @vldl2dzx_vssvl_imm(ptr %0, ptr %1) {
1102 ; CHECK-LABEL: vldl2dzx_vssvl_imm:
1104 ; CHECK-NEXT: lea %s2, 256
1105 ; CHECK-NEXT: lvl %s2
1106 ; CHECK-NEXT: vldl2d.zx %v0, 8, %s1
1107 ; CHECK-NEXT: vldl2d.zx %v0, 8, %s0
1109 ; CHECK-NEXT: vst %v0, 8, %s0
1110 ; CHECK-NEXT: #NO_APP
1111 ; CHECK-NEXT: b.l.t (, %s10)
1112 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, ptr %1, i32 256)
1113 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
1114 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
1118 ; Function Attrs: nounwind
1119 define void @vldl2dzxnc_vssl(ptr %0, i64 %1) {
1120 ; CHECK-LABEL: vldl2dzxnc_vssl:
1122 ; CHECK-NEXT: lea %s2, 256
1123 ; CHECK-NEXT: lvl %s2
1124 ; CHECK-NEXT: vldl2d.zx.nc %v0, %s1, %s0
1126 ; CHECK-NEXT: vst %v0, %s1, %s0
1127 ; CHECK-NEXT: #NO_APP
1128 ; CHECK-NEXT: b.l.t (, %s10)
1129 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, ptr %0, i32 256)
1130 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
1134 ; Function Attrs: nounwind readonly
1135 declare <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64, ptr, i32)
1137 ; Function Attrs: nounwind
1138 define void @vldl2dzxnc_vssvl(ptr %0, i64 %1, ptr %2) {
1139 ; CHECK-LABEL: vldl2dzxnc_vssvl:
1141 ; CHECK-NEXT: lea %s3, 256
1142 ; CHECK-NEXT: lvl %s3
1143 ; CHECK-NEXT: vldl2d.zx.nc %v0, %s1, %s2
1144 ; CHECK-NEXT: vldl2d.zx.nc %v0, %s1, %s0
1146 ; CHECK-NEXT: vst %v0, %s1, %s0
1147 ; CHECK-NEXT: #NO_APP
1148 ; CHECK-NEXT: b.l.t (, %s10)
1149 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, ptr %2, i32 256)
1150 %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
1151 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
1155 ; Function Attrs: nounwind readonly
1156 declare <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64, ptr, <256 x double>, i32)
1158 ; Function Attrs: nounwind
1159 define void @vldl2dzxnc_vssl_imm(ptr %0) {
1160 ; CHECK-LABEL: vldl2dzxnc_vssl_imm:
1162 ; CHECK-NEXT: lea %s1, 256
1163 ; CHECK-NEXT: lvl %s1
1164 ; CHECK-NEXT: vldl2d.zx.nc %v0, 8, %s0
1166 ; CHECK-NEXT: vst %v0, 8, %s0
1167 ; CHECK-NEXT: #NO_APP
1168 ; CHECK-NEXT: b.l.t (, %s10)
1169 %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, ptr %0, i32 256)
1170 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
1174 ; Function Attrs: nounwind
1175 define void @vldl2dzxnc_vssvl_imm(ptr %0, ptr %1) {
1176 ; CHECK-LABEL: vldl2dzxnc_vssvl_imm:
1178 ; CHECK-NEXT: lea %s2, 256
1179 ; CHECK-NEXT: lvl %s2
1180 ; CHECK-NEXT: vldl2d.zx.nc %v0, 8, %s1
1181 ; CHECK-NEXT: vldl2d.zx.nc %v0, 8, %s0
1183 ; CHECK-NEXT: vst %v0, 8, %s0
1184 ; CHECK-NEXT: #NO_APP
1185 ; CHECK-NEXT: b.l.t (, %s10)
1186 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, ptr %1, i32 256)
1187 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
1188 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)