1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
3 ;;; Test vector store intrinsic instructions
6 ;;; We test VST*rrvl, VST*rrvml, VST*irvl, and VST*irvml instructions.
8 ; Function Attrs: nounwind
9 define void @vst_vssl(ptr %0, i64 %1) {
10 ; CHECK-LABEL: vst_vssl:
12 ; CHECK-NEXT: lea %s2, 256
14 ; CHECK-NEXT: vld %v0, %s1, %s0
15 ; CHECK-NEXT: vst %v0, %s1, %s0
16 ; CHECK-NEXT: b.l.t (, %s10)
17 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
18 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
22 ; Function Attrs: nounwind readonly
23 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, ptr, i32)
25 ; Function Attrs: nounwind writeonly
26 declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, ptr, i32)
28 ; Function Attrs: nounwind
29 define void @vst_vssml(ptr %0, i64 %1) {
30 ; CHECK-LABEL: vst_vssml:
32 ; CHECK-NEXT: lea %s2, 256
34 ; CHECK-NEXT: vld %v0, %s1, %s0
35 ; CHECK-NEXT: vst %v0, %s1, %s0, %vm1
36 ; CHECK-NEXT: b.l.t (, %s10)
37 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
38 tail call void @llvm.ve.vl.vst.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
42 ; Function Attrs: nounwind writeonly
43 declare void @llvm.ve.vl.vst.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
45 ; Function Attrs: nounwind
46 define void @vst_vssl_imm(ptr %0) {
47 ; CHECK-LABEL: vst_vssl_imm:
49 ; CHECK-NEXT: lea %s1, 256
51 ; CHECK-NEXT: vld %v0, 8, %s0
52 ; CHECK-NEXT: vst %v0, 8, %s0
53 ; CHECK-NEXT: b.l.t (, %s10)
54 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
55 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
59 ; Function Attrs: nounwind
60 define void @vst_vssml_imm(ptr %0) {
61 ; CHECK-LABEL: vst_vssml_imm:
63 ; CHECK-NEXT: lea %s1, 256
65 ; CHECK-NEXT: vld %v0, 8, %s0
66 ; CHECK-NEXT: vst %v0, 8, %s0, %vm1
67 ; CHECK-NEXT: b.l.t (, %s10)
68 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
69 tail call void @llvm.ve.vl.vst.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
73 ; Function Attrs: nounwind
74 define void @vstnc_vssl(ptr %0, i64 %1) {
75 ; CHECK-LABEL: vstnc_vssl:
77 ; CHECK-NEXT: lea %s2, 256
79 ; CHECK-NEXT: vld %v0, %s1, %s0
80 ; CHECK-NEXT: vst.nc %v0, %s1, %s0
81 ; CHECK-NEXT: b.l.t (, %s10)
82 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
83 tail call void @llvm.ve.vl.vstnc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
87 ; Function Attrs: nounwind writeonly
88 declare void @llvm.ve.vl.vstnc.vssl(<256 x double>, i64, ptr, i32)
90 ; Function Attrs: nounwind
91 define void @vstnc_vssml(ptr %0, i64 %1) {
92 ; CHECK-LABEL: vstnc_vssml:
94 ; CHECK-NEXT: lea %s2, 256
96 ; CHECK-NEXT: vld %v0, %s1, %s0
97 ; CHECK-NEXT: vst.nc %v0, %s1, %s0, %vm1
98 ; CHECK-NEXT: b.l.t (, %s10)
99 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
100 tail call void @llvm.ve.vl.vstnc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
104 ; Function Attrs: nounwind writeonly
105 declare void @llvm.ve.vl.vstnc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
107 ; Function Attrs: nounwind
108 define void @vstnc_vssl_imm(ptr %0) {
109 ; CHECK-LABEL: vstnc_vssl_imm:
111 ; CHECK-NEXT: lea %s1, 256
112 ; CHECK-NEXT: lvl %s1
113 ; CHECK-NEXT: vld %v0, 8, %s0
114 ; CHECK-NEXT: vst.nc %v0, 8, %s0
115 ; CHECK-NEXT: b.l.t (, %s10)
116 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
117 tail call void @llvm.ve.vl.vstnc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
121 ; Function Attrs: nounwind
122 define void @vstnc_vssml_imm(ptr %0) {
123 ; CHECK-LABEL: vstnc_vssml_imm:
125 ; CHECK-NEXT: lea %s1, 256
126 ; CHECK-NEXT: lvl %s1
127 ; CHECK-NEXT: vld %v0, 8, %s0
128 ; CHECK-NEXT: vst.nc %v0, 8, %s0, %vm1
129 ; CHECK-NEXT: b.l.t (, %s10)
130 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
131 tail call void @llvm.ve.vl.vstnc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
135 ; Function Attrs: nounwind
136 define void @vstot_vssl(ptr %0, i64 %1) {
137 ; CHECK-LABEL: vstot_vssl:
139 ; CHECK-NEXT: lea %s2, 256
140 ; CHECK-NEXT: lvl %s2
141 ; CHECK-NEXT: vld %v0, %s1, %s0
142 ; CHECK-NEXT: vst.ot %v0, %s1, %s0
143 ; CHECK-NEXT: b.l.t (, %s10)
144 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
145 tail call void @llvm.ve.vl.vstot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
149 ; Function Attrs: nounwind writeonly
150 declare void @llvm.ve.vl.vstot.vssl(<256 x double>, i64, ptr, i32)
152 ; Function Attrs: nounwind
153 define void @vstot_vssml(ptr %0, i64 %1) {
154 ; CHECK-LABEL: vstot_vssml:
156 ; CHECK-NEXT: lea %s2, 256
157 ; CHECK-NEXT: lvl %s2
158 ; CHECK-NEXT: vld %v0, %s1, %s0
159 ; CHECK-NEXT: vst.ot %v0, %s1, %s0, %vm1
160 ; CHECK-NEXT: b.l.t (, %s10)
161 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
162 tail call void @llvm.ve.vl.vstot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
166 ; Function Attrs: nounwind writeonly
167 declare void @llvm.ve.vl.vstot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
169 ; Function Attrs: nounwind
170 define void @vstot_vssl_imm(ptr %0) {
171 ; CHECK-LABEL: vstot_vssl_imm:
173 ; CHECK-NEXT: lea %s1, 256
174 ; CHECK-NEXT: lvl %s1
175 ; CHECK-NEXT: vld %v0, 8, %s0
176 ; CHECK-NEXT: vst.ot %v0, 8, %s0
177 ; CHECK-NEXT: b.l.t (, %s10)
178 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
179 tail call void @llvm.ve.vl.vstot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
183 ; Function Attrs: nounwind
184 define void @vstot_vssml_imm(ptr %0) {
185 ; CHECK-LABEL: vstot_vssml_imm:
187 ; CHECK-NEXT: lea %s1, 256
188 ; CHECK-NEXT: lvl %s1
189 ; CHECK-NEXT: vld %v0, 8, %s0
190 ; CHECK-NEXT: vst.ot %v0, 8, %s0, %vm1
191 ; CHECK-NEXT: b.l.t (, %s10)
192 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
193 tail call void @llvm.ve.vl.vstot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
197 ; Function Attrs: nounwind
198 define void @vstncot_vssl(ptr %0, i64 %1) {
199 ; CHECK-LABEL: vstncot_vssl:
201 ; CHECK-NEXT: lea %s2, 256
202 ; CHECK-NEXT: lvl %s2
203 ; CHECK-NEXT: vld %v0, %s1, %s0
204 ; CHECK-NEXT: vst.nc.ot %v0, %s1, %s0
205 ; CHECK-NEXT: b.l.t (, %s10)
206 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
207 tail call void @llvm.ve.vl.vstncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
211 ; Function Attrs: nounwind writeonly
212 declare void @llvm.ve.vl.vstncot.vssl(<256 x double>, i64, ptr, i32)
214 ; Function Attrs: nounwind
215 define void @vstncot_vssml(ptr %0, i64 %1) {
216 ; CHECK-LABEL: vstncot_vssml:
218 ; CHECK-NEXT: lea %s2, 256
219 ; CHECK-NEXT: lvl %s2
220 ; CHECK-NEXT: vld %v0, %s1, %s0
221 ; CHECK-NEXT: vst.nc.ot %v0, %s1, %s0, %vm1
222 ; CHECK-NEXT: b.l.t (, %s10)
223 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
224 tail call void @llvm.ve.vl.vstncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
228 ; Function Attrs: nounwind writeonly
229 declare void @llvm.ve.vl.vstncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
231 ; Function Attrs: nounwind
232 define void @vstncot_vssl_imm(ptr %0) {
233 ; CHECK-LABEL: vstncot_vssl_imm:
235 ; CHECK-NEXT: lea %s1, 256
236 ; CHECK-NEXT: lvl %s1
237 ; CHECK-NEXT: vld %v0, 8, %s0
238 ; CHECK-NEXT: vst.nc.ot %v0, 8, %s0
239 ; CHECK-NEXT: b.l.t (, %s10)
240 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
241 tail call void @llvm.ve.vl.vstncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
245 ; Function Attrs: nounwind
246 define void @vstncot_vssml_imm(ptr %0) {
247 ; CHECK-LABEL: vstncot_vssml_imm:
249 ; CHECK-NEXT: lea %s1, 256
250 ; CHECK-NEXT: lvl %s1
251 ; CHECK-NEXT: vld %v0, 8, %s0
252 ; CHECK-NEXT: vst.nc.ot %v0, 8, %s0, %vm1
253 ; CHECK-NEXT: b.l.t (, %s10)
254 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
255 tail call void @llvm.ve.vl.vstncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
259 ; Function Attrs: nounwind
260 define void @vstu_vssl(ptr %0, i64 %1) {
261 ; CHECK-LABEL: vstu_vssl:
263 ; CHECK-NEXT: lea %s2, 256
264 ; CHECK-NEXT: lvl %s2
265 ; CHECK-NEXT: vld %v0, %s1, %s0
266 ; CHECK-NEXT: vstu %v0, %s1, %s0
267 ; CHECK-NEXT: b.l.t (, %s10)
268 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
269 tail call void @llvm.ve.vl.vstu.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
273 ; Function Attrs: nounwind writeonly
274 declare void @llvm.ve.vl.vstu.vssl(<256 x double>, i64, ptr, i32)
276 ; Function Attrs: nounwind
277 define void @vstu_vssml(ptr %0, i64 %1) {
278 ; CHECK-LABEL: vstu_vssml:
280 ; CHECK-NEXT: lea %s2, 256
281 ; CHECK-NEXT: lvl %s2
282 ; CHECK-NEXT: vld %v0, %s1, %s0
283 ; CHECK-NEXT: vstu %v0, %s1, %s0, %vm1
284 ; CHECK-NEXT: b.l.t (, %s10)
285 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
286 tail call void @llvm.ve.vl.vstu.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
290 ; Function Attrs: nounwind writeonly
291 declare void @llvm.ve.vl.vstu.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
293 ; Function Attrs: nounwind
294 define void @vstu_vssl_imm(ptr %0) {
295 ; CHECK-LABEL: vstu_vssl_imm:
297 ; CHECK-NEXT: lea %s1, 256
298 ; CHECK-NEXT: lvl %s1
299 ; CHECK-NEXT: vld %v0, 8, %s0
300 ; CHECK-NEXT: vstu %v0, 8, %s0
301 ; CHECK-NEXT: b.l.t (, %s10)
302 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
303 tail call void @llvm.ve.vl.vstu.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
307 ; Function Attrs: nounwind
308 define void @vstu_vssml_imm(ptr %0) {
309 ; CHECK-LABEL: vstu_vssml_imm:
311 ; CHECK-NEXT: lea %s1, 256
312 ; CHECK-NEXT: lvl %s1
313 ; CHECK-NEXT: vld %v0, 8, %s0
314 ; CHECK-NEXT: vstu %v0, 8, %s0, %vm1
315 ; CHECK-NEXT: b.l.t (, %s10)
316 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
317 tail call void @llvm.ve.vl.vstu.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
321 ; Function Attrs: nounwind
322 define void @vstunc_vssl(ptr %0, i64 %1) {
323 ; CHECK-LABEL: vstunc_vssl:
325 ; CHECK-NEXT: lea %s2, 256
326 ; CHECK-NEXT: lvl %s2
327 ; CHECK-NEXT: vld %v0, %s1, %s0
328 ; CHECK-NEXT: vstu.nc %v0, %s1, %s0
329 ; CHECK-NEXT: b.l.t (, %s10)
330 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
331 tail call void @llvm.ve.vl.vstunc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
335 ; Function Attrs: nounwind writeonly
336 declare void @llvm.ve.vl.vstunc.vssl(<256 x double>, i64, ptr, i32)
338 ; Function Attrs: nounwind
339 define void @vstunc_vssml(ptr %0, i64 %1) {
340 ; CHECK-LABEL: vstunc_vssml:
342 ; CHECK-NEXT: lea %s2, 256
343 ; CHECK-NEXT: lvl %s2
344 ; CHECK-NEXT: vld %v0, %s1, %s0
345 ; CHECK-NEXT: vstu.nc %v0, %s1, %s0, %vm1
346 ; CHECK-NEXT: b.l.t (, %s10)
347 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
348 tail call void @llvm.ve.vl.vstunc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
352 ; Function Attrs: nounwind writeonly
353 declare void @llvm.ve.vl.vstunc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
355 ; Function Attrs: nounwind
356 define void @vstunc_vssl_imm(ptr %0) {
357 ; CHECK-LABEL: vstunc_vssl_imm:
359 ; CHECK-NEXT: lea %s1, 256
360 ; CHECK-NEXT: lvl %s1
361 ; CHECK-NEXT: vld %v0, 8, %s0
362 ; CHECK-NEXT: vstu.nc %v0, 8, %s0
363 ; CHECK-NEXT: b.l.t (, %s10)
364 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
365 tail call void @llvm.ve.vl.vstunc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
369 ; Function Attrs: nounwind
370 define void @vstunc_vssml_imm(ptr %0) {
371 ; CHECK-LABEL: vstunc_vssml_imm:
373 ; CHECK-NEXT: lea %s1, 256
374 ; CHECK-NEXT: lvl %s1
375 ; CHECK-NEXT: vld %v0, 8, %s0
376 ; CHECK-NEXT: vstu.nc %v0, 8, %s0, %vm1
377 ; CHECK-NEXT: b.l.t (, %s10)
378 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
379 tail call void @llvm.ve.vl.vstunc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
383 ; Function Attrs: nounwind
384 define void @vstuot_vssl(ptr %0, i64 %1) {
385 ; CHECK-LABEL: vstuot_vssl:
387 ; CHECK-NEXT: lea %s2, 256
388 ; CHECK-NEXT: lvl %s2
389 ; CHECK-NEXT: vld %v0, %s1, %s0
390 ; CHECK-NEXT: vstu.ot %v0, %s1, %s0
391 ; CHECK-NEXT: b.l.t (, %s10)
392 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
393 tail call void @llvm.ve.vl.vstuot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
397 ; Function Attrs: nounwind writeonly
398 declare void @llvm.ve.vl.vstuot.vssl(<256 x double>, i64, ptr, i32)
400 ; Function Attrs: nounwind
401 define void @vstuot_vssml(ptr %0, i64 %1) {
402 ; CHECK-LABEL: vstuot_vssml:
404 ; CHECK-NEXT: lea %s2, 256
405 ; CHECK-NEXT: lvl %s2
406 ; CHECK-NEXT: vld %v0, %s1, %s0
407 ; CHECK-NEXT: vstu.ot %v0, %s1, %s0, %vm1
408 ; CHECK-NEXT: b.l.t (, %s10)
409 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
410 tail call void @llvm.ve.vl.vstuot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
414 ; Function Attrs: nounwind writeonly
415 declare void @llvm.ve.vl.vstuot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
417 ; Function Attrs: nounwind
418 define void @vstuot_vssl_imm(ptr %0) {
419 ; CHECK-LABEL: vstuot_vssl_imm:
421 ; CHECK-NEXT: lea %s1, 256
422 ; CHECK-NEXT: lvl %s1
423 ; CHECK-NEXT: vld %v0, 8, %s0
424 ; CHECK-NEXT: vstu.ot %v0, 8, %s0
425 ; CHECK-NEXT: b.l.t (, %s10)
426 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
427 tail call void @llvm.ve.vl.vstuot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
431 ; Function Attrs: nounwind
432 define void @vstuot_vssml_imm(ptr %0) {
433 ; CHECK-LABEL: vstuot_vssml_imm:
435 ; CHECK-NEXT: lea %s1, 256
436 ; CHECK-NEXT: lvl %s1
437 ; CHECK-NEXT: vld %v0, 8, %s0
438 ; CHECK-NEXT: vstu.ot %v0, 8, %s0, %vm1
439 ; CHECK-NEXT: b.l.t (, %s10)
440 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
441 tail call void @llvm.ve.vl.vstuot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
445 ; Function Attrs: nounwind
446 define void @vstuncot_vssl(ptr %0, i64 %1) {
447 ; CHECK-LABEL: vstuncot_vssl:
449 ; CHECK-NEXT: lea %s2, 256
450 ; CHECK-NEXT: lvl %s2
451 ; CHECK-NEXT: vld %v0, %s1, %s0
452 ; CHECK-NEXT: vstu.nc.ot %v0, %s1, %s0
453 ; CHECK-NEXT: b.l.t (, %s10)
454 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
455 tail call void @llvm.ve.vl.vstuncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
459 ; Function Attrs: nounwind writeonly
460 declare void @llvm.ve.vl.vstuncot.vssl(<256 x double>, i64, ptr, i32)
462 ; Function Attrs: nounwind
463 define void @vstuncot_vssml(ptr %0, i64 %1) {
464 ; CHECK-LABEL: vstuncot_vssml:
466 ; CHECK-NEXT: lea %s2, 256
467 ; CHECK-NEXT: lvl %s2
468 ; CHECK-NEXT: vld %v0, %s1, %s0
469 ; CHECK-NEXT: vstu.nc.ot %v0, %s1, %s0, %vm1
470 ; CHECK-NEXT: b.l.t (, %s10)
471 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
472 tail call void @llvm.ve.vl.vstuncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
476 ; Function Attrs: nounwind writeonly
477 declare void @llvm.ve.vl.vstuncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
479 ; Function Attrs: nounwind
480 define void @vstuncot_vssl_imm(ptr %0) {
481 ; CHECK-LABEL: vstuncot_vssl_imm:
483 ; CHECK-NEXT: lea %s1, 256
484 ; CHECK-NEXT: lvl %s1
485 ; CHECK-NEXT: vld %v0, 8, %s0
486 ; CHECK-NEXT: vstu.nc.ot %v0, 8, %s0
487 ; CHECK-NEXT: b.l.t (, %s10)
488 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
489 tail call void @llvm.ve.vl.vstuncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
493 ; Function Attrs: nounwind
494 define void @vstuncot_vssml_imm(ptr %0) {
495 ; CHECK-LABEL: vstuncot_vssml_imm:
497 ; CHECK-NEXT: lea %s1, 256
498 ; CHECK-NEXT: lvl %s1
499 ; CHECK-NEXT: vld %v0, 8, %s0
500 ; CHECK-NEXT: vstu.nc.ot %v0, 8, %s0, %vm1
501 ; CHECK-NEXT: b.l.t (, %s10)
502 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
503 tail call void @llvm.ve.vl.vstuncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
507 ; Function Attrs: nounwind
508 define void @vstl_vssl(ptr %0, i64 %1) {
509 ; CHECK-LABEL: vstl_vssl:
511 ; CHECK-NEXT: lea %s2, 256
512 ; CHECK-NEXT: lvl %s2
513 ; CHECK-NEXT: vld %v0, %s1, %s0
514 ; CHECK-NEXT: vstl %v0, %s1, %s0
515 ; CHECK-NEXT: b.l.t (, %s10)
516 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
517 tail call void @llvm.ve.vl.vstl.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
521 ; Function Attrs: nounwind writeonly
522 declare void @llvm.ve.vl.vstl.vssl(<256 x double>, i64, ptr, i32)
524 ; Function Attrs: nounwind
525 define void @vstl_vssml(ptr %0, i64 %1) {
526 ; CHECK-LABEL: vstl_vssml:
528 ; CHECK-NEXT: lea %s2, 256
529 ; CHECK-NEXT: lvl %s2
530 ; CHECK-NEXT: vld %v0, %s1, %s0
531 ; CHECK-NEXT: vstl %v0, %s1, %s0, %vm1
532 ; CHECK-NEXT: b.l.t (, %s10)
533 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
534 tail call void @llvm.ve.vl.vstl.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
538 ; Function Attrs: nounwind writeonly
539 declare void @llvm.ve.vl.vstl.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
541 ; Function Attrs: nounwind
542 define void @vstl_vssl_imm(ptr %0) {
543 ; CHECK-LABEL: vstl_vssl_imm:
545 ; CHECK-NEXT: lea %s1, 256
546 ; CHECK-NEXT: lvl %s1
547 ; CHECK-NEXT: vld %v0, 8, %s0
548 ; CHECK-NEXT: vstl %v0, 8, %s0
549 ; CHECK-NEXT: b.l.t (, %s10)
550 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
551 tail call void @llvm.ve.vl.vstl.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
555 ; Function Attrs: nounwind
556 define void @vstl_vssml_imm(ptr %0) {
557 ; CHECK-LABEL: vstl_vssml_imm:
559 ; CHECK-NEXT: lea %s1, 256
560 ; CHECK-NEXT: lvl %s1
561 ; CHECK-NEXT: vld %v0, 8, %s0
562 ; CHECK-NEXT: vstl %v0, 8, %s0, %vm1
563 ; CHECK-NEXT: b.l.t (, %s10)
564 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
565 tail call void @llvm.ve.vl.vstl.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
569 ; Function Attrs: nounwind
570 define void @vstlnc_vssl(ptr %0, i64 %1) {
571 ; CHECK-LABEL: vstlnc_vssl:
573 ; CHECK-NEXT: lea %s2, 256
574 ; CHECK-NEXT: lvl %s2
575 ; CHECK-NEXT: vld %v0, %s1, %s0
576 ; CHECK-NEXT: vstl.nc %v0, %s1, %s0
577 ; CHECK-NEXT: b.l.t (, %s10)
578 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
579 tail call void @llvm.ve.vl.vstlnc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
583 ; Function Attrs: nounwind writeonly
584 declare void @llvm.ve.vl.vstlnc.vssl(<256 x double>, i64, ptr, i32)
586 ; Function Attrs: nounwind
587 define void @vstlnc_vssml(ptr %0, i64 %1) {
588 ; CHECK-LABEL: vstlnc_vssml:
590 ; CHECK-NEXT: lea %s2, 256
591 ; CHECK-NEXT: lvl %s2
592 ; CHECK-NEXT: vld %v0, %s1, %s0
593 ; CHECK-NEXT: vstl.nc %v0, %s1, %s0, %vm1
594 ; CHECK-NEXT: b.l.t (, %s10)
595 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
596 tail call void @llvm.ve.vl.vstlnc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
600 ; Function Attrs: nounwind writeonly
601 declare void @llvm.ve.vl.vstlnc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
603 ; Function Attrs: nounwind
604 define void @vstlnc_vssl_imm(ptr %0) {
605 ; CHECK-LABEL: vstlnc_vssl_imm:
607 ; CHECK-NEXT: lea %s1, 256
608 ; CHECK-NEXT: lvl %s1
609 ; CHECK-NEXT: vld %v0, 8, %s0
610 ; CHECK-NEXT: vstl.nc %v0, 8, %s0
611 ; CHECK-NEXT: b.l.t (, %s10)
612 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
613 tail call void @llvm.ve.vl.vstlnc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
617 ; Function Attrs: nounwind
618 define void @vstlnc_vssml_imm(ptr %0) {
619 ; CHECK-LABEL: vstlnc_vssml_imm:
621 ; CHECK-NEXT: lea %s1, 256
622 ; CHECK-NEXT: lvl %s1
623 ; CHECK-NEXT: vld %v0, 8, %s0
624 ; CHECK-NEXT: vstl.nc %v0, 8, %s0, %vm1
625 ; CHECK-NEXT: b.l.t (, %s10)
626 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
627 tail call void @llvm.ve.vl.vstlnc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
631 ; Function Attrs: nounwind
632 define void @vstlot_vssl(ptr %0, i64 %1) {
633 ; CHECK-LABEL: vstlot_vssl:
635 ; CHECK-NEXT: lea %s2, 256
636 ; CHECK-NEXT: lvl %s2
637 ; CHECK-NEXT: vld %v0, %s1, %s0
638 ; CHECK-NEXT: vstl.ot %v0, %s1, %s0
639 ; CHECK-NEXT: b.l.t (, %s10)
640 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
641 tail call void @llvm.ve.vl.vstlot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
645 ; Function Attrs: nounwind writeonly
646 declare void @llvm.ve.vl.vstlot.vssl(<256 x double>, i64, ptr, i32)
648 ; Function Attrs: nounwind
649 define void @vstlot_vssml(ptr %0, i64 %1) {
650 ; CHECK-LABEL: vstlot_vssml:
652 ; CHECK-NEXT: lea %s2, 256
653 ; CHECK-NEXT: lvl %s2
654 ; CHECK-NEXT: vld %v0, %s1, %s0
655 ; CHECK-NEXT: vstl.ot %v0, %s1, %s0, %vm1
656 ; CHECK-NEXT: b.l.t (, %s10)
657 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
658 tail call void @llvm.ve.vl.vstlot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
662 ; Function Attrs: nounwind writeonly
663 declare void @llvm.ve.vl.vstlot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
665 ; Function Attrs: nounwind
666 define void @vstlot_vssl_imm(ptr %0) {
667 ; CHECK-LABEL: vstlot_vssl_imm:
669 ; CHECK-NEXT: lea %s1, 256
670 ; CHECK-NEXT: lvl %s1
671 ; CHECK-NEXT: vld %v0, 8, %s0
672 ; CHECK-NEXT: vstl.ot %v0, 8, %s0
673 ; CHECK-NEXT: b.l.t (, %s10)
674 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
675 tail call void @llvm.ve.vl.vstlot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
679 ; Function Attrs: nounwind
680 define void @vstlot_vssml_imm(ptr %0) {
681 ; CHECK-LABEL: vstlot_vssml_imm:
683 ; CHECK-NEXT: lea %s1, 256
684 ; CHECK-NEXT: lvl %s1
685 ; CHECK-NEXT: vld %v0, 8, %s0
686 ; CHECK-NEXT: vstl.ot %v0, 8, %s0, %vm1
687 ; CHECK-NEXT: b.l.t (, %s10)
688 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
689 tail call void @llvm.ve.vl.vstlot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
693 ; Function Attrs: nounwind
694 define void @vstlncot_vssl(ptr %0, i64 %1) {
695 ; CHECK-LABEL: vstlncot_vssl:
697 ; CHECK-NEXT: lea %s2, 256
698 ; CHECK-NEXT: lvl %s2
699 ; CHECK-NEXT: vld %v0, %s1, %s0
700 ; CHECK-NEXT: vstl.nc.ot %v0, %s1, %s0
701 ; CHECK-NEXT: b.l.t (, %s10)
702 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
703 tail call void @llvm.ve.vl.vstlncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
707 ; Function Attrs: nounwind writeonly
708 declare void @llvm.ve.vl.vstlncot.vssl(<256 x double>, i64, ptr, i32)
710 ; Function Attrs: nounwind
711 define void @vstlncot_vssml(ptr %0, i64 %1) {
712 ; CHECK-LABEL: vstlncot_vssml:
714 ; CHECK-NEXT: lea %s2, 256
715 ; CHECK-NEXT: lvl %s2
716 ; CHECK-NEXT: vld %v0, %s1, %s0
717 ; CHECK-NEXT: vstl.nc.ot %v0, %s1, %s0, %vm1
718 ; CHECK-NEXT: b.l.t (, %s10)
719 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
720 tail call void @llvm.ve.vl.vstlncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
724 ; Function Attrs: nounwind writeonly
725 declare void @llvm.ve.vl.vstlncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
727 ; Function Attrs: nounwind
728 define void @vstlncot_vssl_imm(ptr %0) {
729 ; CHECK-LABEL: vstlncot_vssl_imm:
731 ; CHECK-NEXT: lea %s1, 256
732 ; CHECK-NEXT: lvl %s1
733 ; CHECK-NEXT: vld %v0, 8, %s0
734 ; CHECK-NEXT: vstl.nc.ot %v0, 8, %s0
735 ; CHECK-NEXT: b.l.t (, %s10)
736 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
737 tail call void @llvm.ve.vl.vstlncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
741 ; Function Attrs: nounwind
742 define void @vstlncot_vssml_imm(ptr %0) {
743 ; CHECK-LABEL: vstlncot_vssml_imm:
745 ; CHECK-NEXT: lea %s1, 256
746 ; CHECK-NEXT: lvl %s1
747 ; CHECK-NEXT: vld %v0, 8, %s0
748 ; CHECK-NEXT: vstl.nc.ot %v0, 8, %s0, %vm1
749 ; CHECK-NEXT: b.l.t (, %s10)
750 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
751 tail call void @llvm.ve.vl.vstlncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
755 ; Function Attrs: nounwind
756 define void @vst2d_vssl(ptr %0, i64 %1) {
757 ; CHECK-LABEL: vst2d_vssl:
759 ; CHECK-NEXT: lea %s2, 256
760 ; CHECK-NEXT: lvl %s2
761 ; CHECK-NEXT: vld %v0, %s1, %s0
762 ; CHECK-NEXT: vst2d %v0, %s1, %s0
763 ; CHECK-NEXT: b.l.t (, %s10)
764 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
765 tail call void @llvm.ve.vl.vst2d.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
769 ; Function Attrs: nounwind writeonly
770 declare void @llvm.ve.vl.vst2d.vssl(<256 x double>, i64, ptr, i32)
772 ; Function Attrs: nounwind
773 define void @vst2d_vssml(ptr %0, i64 %1) {
774 ; CHECK-LABEL: vst2d_vssml:
776 ; CHECK-NEXT: lea %s2, 256
777 ; CHECK-NEXT: lvl %s2
778 ; CHECK-NEXT: vld %v0, %s1, %s0
779 ; CHECK-NEXT: vst2d %v0, %s1, %s0, %vm1
780 ; CHECK-NEXT: b.l.t (, %s10)
781 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
782 tail call void @llvm.ve.vl.vst2d.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
786 ; Function Attrs: nounwind writeonly
787 declare void @llvm.ve.vl.vst2d.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
789 ; Function Attrs: nounwind
790 define void @vst2d_vssl_imm(ptr %0) {
791 ; CHECK-LABEL: vst2d_vssl_imm:
793 ; CHECK-NEXT: lea %s1, 256
794 ; CHECK-NEXT: lvl %s1
795 ; CHECK-NEXT: vld %v0, 8, %s0
796 ; CHECK-NEXT: vst2d %v0, 8, %s0
797 ; CHECK-NEXT: b.l.t (, %s10)
798 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
799 tail call void @llvm.ve.vl.vst2d.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
803 ; Function Attrs: nounwind
804 define void @vst2d_vssml_imm(ptr %0) {
805 ; CHECK-LABEL: vst2d_vssml_imm:
807 ; CHECK-NEXT: lea %s1, 256
808 ; CHECK-NEXT: lvl %s1
809 ; CHECK-NEXT: vld %v0, 8, %s0
810 ; CHECK-NEXT: vst2d %v0, 8, %s0, %vm1
811 ; CHECK-NEXT: b.l.t (, %s10)
812 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
813 tail call void @llvm.ve.vl.vst2d.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
817 ; Function Attrs: nounwind
818 define void @vst2dnc_vssl(ptr %0, i64 %1) {
819 ; CHECK-LABEL: vst2dnc_vssl:
821 ; CHECK-NEXT: lea %s2, 256
822 ; CHECK-NEXT: lvl %s2
823 ; CHECK-NEXT: vld %v0, %s1, %s0
824 ; CHECK-NEXT: vst2d.nc %v0, %s1, %s0
825 ; CHECK-NEXT: b.l.t (, %s10)
826 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
827 tail call void @llvm.ve.vl.vst2dnc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
831 ; Function Attrs: nounwind writeonly
832 declare void @llvm.ve.vl.vst2dnc.vssl(<256 x double>, i64, ptr, i32)
834 ; Function Attrs: nounwind
835 define void @vst2dnc_vssml(ptr %0, i64 %1) {
836 ; CHECK-LABEL: vst2dnc_vssml:
838 ; CHECK-NEXT: lea %s2, 256
839 ; CHECK-NEXT: lvl %s2
840 ; CHECK-NEXT: vld %v0, %s1, %s0
841 ; CHECK-NEXT: vst2d.nc %v0, %s1, %s0, %vm1
842 ; CHECK-NEXT: b.l.t (, %s10)
843 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
844 tail call void @llvm.ve.vl.vst2dnc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
848 ; Function Attrs: nounwind writeonly
849 declare void @llvm.ve.vl.vst2dnc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
851 ; Function Attrs: nounwind
852 define void @vst2dnc_vssl_imm(ptr %0) {
853 ; CHECK-LABEL: vst2dnc_vssl_imm:
855 ; CHECK-NEXT: lea %s1, 256
856 ; CHECK-NEXT: lvl %s1
857 ; CHECK-NEXT: vld %v0, 8, %s0
858 ; CHECK-NEXT: vst2d.nc %v0, 8, %s0
859 ; CHECK-NEXT: b.l.t (, %s10)
860 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
861 tail call void @llvm.ve.vl.vst2dnc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
865 ; Function Attrs: nounwind
866 define void @vst2dnc_vssml_imm(ptr %0) {
867 ; CHECK-LABEL: vst2dnc_vssml_imm:
869 ; CHECK-NEXT: lea %s1, 256
870 ; CHECK-NEXT: lvl %s1
871 ; CHECK-NEXT: vld %v0, 8, %s0
872 ; CHECK-NEXT: vst2d.nc %v0, 8, %s0, %vm1
873 ; CHECK-NEXT: b.l.t (, %s10)
874 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
875 tail call void @llvm.ve.vl.vst2dnc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
879 ; Function Attrs: nounwind
880 define void @vst2dot_vssl(ptr %0, i64 %1) {
881 ; CHECK-LABEL: vst2dot_vssl:
883 ; CHECK-NEXT: lea %s2, 256
884 ; CHECK-NEXT: lvl %s2
885 ; CHECK-NEXT: vld %v0, %s1, %s0
886 ; CHECK-NEXT: vst2d.ot %v0, %s1, %s0
887 ; CHECK-NEXT: b.l.t (, %s10)
888 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
889 tail call void @llvm.ve.vl.vst2dot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
893 ; Function Attrs: nounwind writeonly
894 declare void @llvm.ve.vl.vst2dot.vssl(<256 x double>, i64, ptr, i32)
896 ; Function Attrs: nounwind
897 define void @vst2dot_vssml(ptr %0, i64 %1) {
898 ; CHECK-LABEL: vst2dot_vssml:
900 ; CHECK-NEXT: lea %s2, 256
901 ; CHECK-NEXT: lvl %s2
902 ; CHECK-NEXT: vld %v0, %s1, %s0
903 ; CHECK-NEXT: vst2d.ot %v0, %s1, %s0, %vm1
904 ; CHECK-NEXT: b.l.t (, %s10)
905 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
906 tail call void @llvm.ve.vl.vst2dot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
910 ; Function Attrs: nounwind writeonly
911 declare void @llvm.ve.vl.vst2dot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
913 ; Function Attrs: nounwind
914 define void @vst2dot_vssl_imm(ptr %0) {
915 ; CHECK-LABEL: vst2dot_vssl_imm:
917 ; CHECK-NEXT: lea %s1, 256
918 ; CHECK-NEXT: lvl %s1
919 ; CHECK-NEXT: vld %v0, 8, %s0
920 ; CHECK-NEXT: vst2d.ot %v0, 8, %s0
921 ; CHECK-NEXT: b.l.t (, %s10)
922 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
923 tail call void @llvm.ve.vl.vst2dot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
927 ; Function Attrs: nounwind
928 define void @vst2dot_vssml_imm(ptr %0) {
929 ; CHECK-LABEL: vst2dot_vssml_imm:
931 ; CHECK-NEXT: lea %s1, 256
932 ; CHECK-NEXT: lvl %s1
933 ; CHECK-NEXT: vld %v0, 8, %s0
934 ; CHECK-NEXT: vst2d.ot %v0, 8, %s0, %vm1
935 ; CHECK-NEXT: b.l.t (, %s10)
936 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
937 tail call void @llvm.ve.vl.vst2dot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
941 ; Function Attrs: nounwind
942 define void @vst2dncot_vssl(ptr %0, i64 %1) {
943 ; CHECK-LABEL: vst2dncot_vssl:
945 ; CHECK-NEXT: lea %s2, 256
946 ; CHECK-NEXT: lvl %s2
947 ; CHECK-NEXT: vld %v0, %s1, %s0
948 ; CHECK-NEXT: vst2d.nc.ot %v0, %s1, %s0
949 ; CHECK-NEXT: b.l.t (, %s10)
950 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
951 tail call void @llvm.ve.vl.vst2dncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
955 ; Function Attrs: nounwind writeonly
956 declare void @llvm.ve.vl.vst2dncot.vssl(<256 x double>, i64, ptr, i32)
958 ; Function Attrs: nounwind
959 define void @vst2dncot_vssml(ptr %0, i64 %1) {
960 ; CHECK-LABEL: vst2dncot_vssml:
962 ; CHECK-NEXT: lea %s2, 256
963 ; CHECK-NEXT: lvl %s2
964 ; CHECK-NEXT: vld %v0, %s1, %s0
965 ; CHECK-NEXT: vst2d.nc.ot %v0, %s1, %s0, %vm1
966 ; CHECK-NEXT: b.l.t (, %s10)
967 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
968 tail call void @llvm.ve.vl.vst2dncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
972 ; Function Attrs: nounwind writeonly
973 declare void @llvm.ve.vl.vst2dncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
975 ; Function Attrs: nounwind
976 define void @vst2dncot_vssl_imm(ptr %0) {
977 ; CHECK-LABEL: vst2dncot_vssl_imm:
979 ; CHECK-NEXT: lea %s1, 256
980 ; CHECK-NEXT: lvl %s1
981 ; CHECK-NEXT: vld %v0, 8, %s0
982 ; CHECK-NEXT: vst2d.nc.ot %v0, 8, %s0
983 ; CHECK-NEXT: b.l.t (, %s10)
984 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
985 tail call void @llvm.ve.vl.vst2dncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
989 ; Function Attrs: nounwind
990 define void @vst2dncot_vssml_imm(ptr %0) {
991 ; CHECK-LABEL: vst2dncot_vssml_imm:
993 ; CHECK-NEXT: lea %s1, 256
994 ; CHECK-NEXT: lvl %s1
995 ; CHECK-NEXT: vld %v0, 8, %s0
996 ; CHECK-NEXT: vst2d.nc.ot %v0, 8, %s0, %vm1
997 ; CHECK-NEXT: b.l.t (, %s10)
998 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
999 tail call void @llvm.ve.vl.vst2dncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
1003 ; Function Attrs: nounwind
1004 define void @vstu2d_vssl(ptr %0, i64 %1) {
1005 ; CHECK-LABEL: vstu2d_vssl:
1007 ; CHECK-NEXT: lea %s2, 256
1008 ; CHECK-NEXT: lvl %s2
1009 ; CHECK-NEXT: vld %v0, %s1, %s0
1010 ; CHECK-NEXT: vstu2d %v0, %s1, %s0
1011 ; CHECK-NEXT: b.l.t (, %s10)
1012 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1013 tail call void @llvm.ve.vl.vstu2d.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
1017 ; Function Attrs: nounwind writeonly
1018 declare void @llvm.ve.vl.vstu2d.vssl(<256 x double>, i64, ptr, i32)
1020 ; Function Attrs: nounwind
1021 define void @vstu2d_vssml(ptr %0, i64 %1) {
1022 ; CHECK-LABEL: vstu2d_vssml:
1024 ; CHECK-NEXT: lea %s2, 256
1025 ; CHECK-NEXT: lvl %s2
1026 ; CHECK-NEXT: vld %v0, %s1, %s0
1027 ; CHECK-NEXT: vstu2d %v0, %s1, %s0, %vm1
1028 ; CHECK-NEXT: b.l.t (, %s10)
1029 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1030 tail call void @llvm.ve.vl.vstu2d.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
1034 ; Function Attrs: nounwind writeonly
1035 declare void @llvm.ve.vl.vstu2d.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
1037 ; Function Attrs: nounwind
1038 define void @vstu2d_vssl_imm(ptr %0) {
1039 ; CHECK-LABEL: vstu2d_vssl_imm:
1041 ; CHECK-NEXT: lea %s1, 256
1042 ; CHECK-NEXT: lvl %s1
1043 ; CHECK-NEXT: vld %v0, 8, %s0
1044 ; CHECK-NEXT: vstu2d %v0, 8, %s0
1045 ; CHECK-NEXT: b.l.t (, %s10)
1046 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1047 tail call void @llvm.ve.vl.vstu2d.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
1051 ; Function Attrs: nounwind
1052 define void @vstu2d_vssml_imm(ptr %0) {
1053 ; CHECK-LABEL: vstu2d_vssml_imm:
1055 ; CHECK-NEXT: lea %s1, 256
1056 ; CHECK-NEXT: lvl %s1
1057 ; CHECK-NEXT: vld %v0, 8, %s0
1058 ; CHECK-NEXT: vstu2d %v0, 8, %s0, %vm1
1059 ; CHECK-NEXT: b.l.t (, %s10)
1060 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1061 tail call void @llvm.ve.vl.vstu2d.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
1065 ; Function Attrs: nounwind
1066 define void @vstu2dnc_vssl(ptr %0, i64 %1) {
1067 ; CHECK-LABEL: vstu2dnc_vssl:
1069 ; CHECK-NEXT: lea %s2, 256
1070 ; CHECK-NEXT: lvl %s2
1071 ; CHECK-NEXT: vld %v0, %s1, %s0
1072 ; CHECK-NEXT: vstu2d.nc %v0, %s1, %s0
1073 ; CHECK-NEXT: b.l.t (, %s10)
1074 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1075 tail call void @llvm.ve.vl.vstu2dnc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
1079 ; Function Attrs: nounwind writeonly
1080 declare void @llvm.ve.vl.vstu2dnc.vssl(<256 x double>, i64, ptr, i32)
1082 ; Function Attrs: nounwind
1083 define void @vstu2dnc_vssml(ptr %0, i64 %1) {
1084 ; CHECK-LABEL: vstu2dnc_vssml:
1086 ; CHECK-NEXT: lea %s2, 256
1087 ; CHECK-NEXT: lvl %s2
1088 ; CHECK-NEXT: vld %v0, %s1, %s0
1089 ; CHECK-NEXT: vstu2d.nc %v0, %s1, %s0, %vm1
1090 ; CHECK-NEXT: b.l.t (, %s10)
1091 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1092 tail call void @llvm.ve.vl.vstu2dnc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
1096 ; Function Attrs: nounwind writeonly
1097 declare void @llvm.ve.vl.vstu2dnc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
1099 ; Function Attrs: nounwind
1100 define void @vstu2dnc_vssl_imm(ptr %0) {
1101 ; CHECK-LABEL: vstu2dnc_vssl_imm:
1103 ; CHECK-NEXT: lea %s1, 256
1104 ; CHECK-NEXT: lvl %s1
1105 ; CHECK-NEXT: vld %v0, 8, %s0
1106 ; CHECK-NEXT: vstu2d.nc %v0, 8, %s0
1107 ; CHECK-NEXT: b.l.t (, %s10)
1108 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1109 tail call void @llvm.ve.vl.vstu2dnc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
1113 ; Function Attrs: nounwind
1114 define void @vstu2dnc_vssml_imm(ptr %0) {
1115 ; CHECK-LABEL: vstu2dnc_vssml_imm:
1117 ; CHECK-NEXT: lea %s1, 256
1118 ; CHECK-NEXT: lvl %s1
1119 ; CHECK-NEXT: vld %v0, 8, %s0
1120 ; CHECK-NEXT: vstu2d.nc %v0, 8, %s0, %vm1
1121 ; CHECK-NEXT: b.l.t (, %s10)
1122 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1123 tail call void @llvm.ve.vl.vstu2dnc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
1127 ; Function Attrs: nounwind
1128 define void @vstu2dot_vssl(ptr %0, i64 %1) {
1129 ; CHECK-LABEL: vstu2dot_vssl:
1131 ; CHECK-NEXT: lea %s2, 256
1132 ; CHECK-NEXT: lvl %s2
1133 ; CHECK-NEXT: vld %v0, %s1, %s0
1134 ; CHECK-NEXT: vstu2d.ot %v0, %s1, %s0
1135 ; CHECK-NEXT: b.l.t (, %s10)
1136 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1137 tail call void @llvm.ve.vl.vstu2dot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
1141 ; Function Attrs: nounwind writeonly
1142 declare void @llvm.ve.vl.vstu2dot.vssl(<256 x double>, i64, ptr, i32)
1144 ; Function Attrs: nounwind
1145 define void @vstu2dot_vssml(ptr %0, i64 %1) {
1146 ; CHECK-LABEL: vstu2dot_vssml:
1148 ; CHECK-NEXT: lea %s2, 256
1149 ; CHECK-NEXT: lvl %s2
1150 ; CHECK-NEXT: vld %v0, %s1, %s0
1151 ; CHECK-NEXT: vstu2d.ot %v0, %s1, %s0, %vm1
1152 ; CHECK-NEXT: b.l.t (, %s10)
1153 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1154 tail call void @llvm.ve.vl.vstu2dot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
1158 ; Function Attrs: nounwind writeonly
1159 declare void @llvm.ve.vl.vstu2dot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
1161 ; Function Attrs: nounwind
1162 define void @vstu2dot_vssl_imm(ptr %0) {
1163 ; CHECK-LABEL: vstu2dot_vssl_imm:
1165 ; CHECK-NEXT: lea %s1, 256
1166 ; CHECK-NEXT: lvl %s1
1167 ; CHECK-NEXT: vld %v0, 8, %s0
1168 ; CHECK-NEXT: vstu2d.ot %v0, 8, %s0
1169 ; CHECK-NEXT: b.l.t (, %s10)
1170 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1171 tail call void @llvm.ve.vl.vstu2dot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
1175 ; Function Attrs: nounwind
1176 define void @vstu2dot_vssml_imm(ptr %0) {
1177 ; CHECK-LABEL: vstu2dot_vssml_imm:
1179 ; CHECK-NEXT: lea %s1, 256
1180 ; CHECK-NEXT: lvl %s1
1181 ; CHECK-NEXT: vld %v0, 8, %s0
1182 ; CHECK-NEXT: vstu2d.ot %v0, 8, %s0, %vm1
1183 ; CHECK-NEXT: b.l.t (, %s10)
1184 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1185 tail call void @llvm.ve.vl.vstu2dot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
1189 ; Function Attrs: nounwind
1190 define void @vstu2dncot_vssl(ptr %0, i64 %1) {
1191 ; CHECK-LABEL: vstu2dncot_vssl:
1193 ; CHECK-NEXT: lea %s2, 256
1194 ; CHECK-NEXT: lvl %s2
1195 ; CHECK-NEXT: vld %v0, %s1, %s0
1196 ; CHECK-NEXT: vstu2d.nc.ot %v0, %s1, %s0
1197 ; CHECK-NEXT: b.l.t (, %s10)
1198 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1199 tail call void @llvm.ve.vl.vstu2dncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
1203 ; Function Attrs: nounwind writeonly
1204 declare void @llvm.ve.vl.vstu2dncot.vssl(<256 x double>, i64, ptr, i32)
1206 ; Function Attrs: nounwind
1207 define void @vstu2dncot_vssml(ptr %0, i64 %1) {
1208 ; CHECK-LABEL: vstu2dncot_vssml:
1210 ; CHECK-NEXT: lea %s2, 256
1211 ; CHECK-NEXT: lvl %s2
1212 ; CHECK-NEXT: vld %v0, %s1, %s0
1213 ; CHECK-NEXT: vstu2d.nc.ot %v0, %s1, %s0, %vm1
1214 ; CHECK-NEXT: b.l.t (, %s10)
1215 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1216 tail call void @llvm.ve.vl.vstu2dncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
1220 ; Function Attrs: nounwind writeonly
1221 declare void @llvm.ve.vl.vstu2dncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
1223 ; Function Attrs: nounwind
1224 define void @vstu2dncot_vssl_imm(ptr %0) {
1225 ; CHECK-LABEL: vstu2dncot_vssl_imm:
1227 ; CHECK-NEXT: lea %s1, 256
1228 ; CHECK-NEXT: lvl %s1
1229 ; CHECK-NEXT: vld %v0, 8, %s0
1230 ; CHECK-NEXT: vstu2d.nc.ot %v0, 8, %s0
1231 ; CHECK-NEXT: b.l.t (, %s10)
1232 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1233 tail call void @llvm.ve.vl.vstu2dncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
1237 ; Function Attrs: nounwind
1238 define void @vstu2dncot_vssml_imm(ptr %0) {
1239 ; CHECK-LABEL: vstu2dncot_vssml_imm:
1241 ; CHECK-NEXT: lea %s1, 256
1242 ; CHECK-NEXT: lvl %s1
1243 ; CHECK-NEXT: vld %v0, 8, %s0
1244 ; CHECK-NEXT: vstu2d.nc.ot %v0, 8, %s0, %vm1
1245 ; CHECK-NEXT: b.l.t (, %s10)
1246 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1247 tail call void @llvm.ve.vl.vstu2dncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
1251 ; Function Attrs: nounwind
1252 define void @vstl2d_vssl(ptr %0, i64 %1) {
1253 ; CHECK-LABEL: vstl2d_vssl:
1255 ; CHECK-NEXT: lea %s2, 256
1256 ; CHECK-NEXT: lvl %s2
1257 ; CHECK-NEXT: vld %v0, %s1, %s0
1258 ; CHECK-NEXT: vstl2d %v0, %s1, %s0
1259 ; CHECK-NEXT: b.l.t (, %s10)
1260 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1261 tail call void @llvm.ve.vl.vstl2d.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
1265 ; Function Attrs: nounwind writeonly
1266 declare void @llvm.ve.vl.vstl2d.vssl(<256 x double>, i64, ptr, i32)
1268 ; Function Attrs: nounwind
1269 define void @vstl2d_vssml(ptr %0, i64 %1) {
1270 ; CHECK-LABEL: vstl2d_vssml:
1272 ; CHECK-NEXT: lea %s2, 256
1273 ; CHECK-NEXT: lvl %s2
1274 ; CHECK-NEXT: vld %v0, %s1, %s0
1275 ; CHECK-NEXT: vstl2d %v0, %s1, %s0, %vm1
1276 ; CHECK-NEXT: b.l.t (, %s10)
1277 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1278 tail call void @llvm.ve.vl.vstl2d.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
1282 ; Function Attrs: nounwind writeonly
1283 declare void @llvm.ve.vl.vstl2d.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
1285 ; Function Attrs: nounwind
1286 define void @vstl2d_vssl_imm(ptr %0) {
1287 ; CHECK-LABEL: vstl2d_vssl_imm:
1289 ; CHECK-NEXT: lea %s1, 256
1290 ; CHECK-NEXT: lvl %s1
1291 ; CHECK-NEXT: vld %v0, 8, %s0
1292 ; CHECK-NEXT: vstl2d %v0, 8, %s0
1293 ; CHECK-NEXT: b.l.t (, %s10)
1294 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1295 tail call void @llvm.ve.vl.vstl2d.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
1299 ; Function Attrs: nounwind
1300 define void @vstl2d_vssml_imm(ptr %0) {
1301 ; CHECK-LABEL: vstl2d_vssml_imm:
1303 ; CHECK-NEXT: lea %s1, 256
1304 ; CHECK-NEXT: lvl %s1
1305 ; CHECK-NEXT: vld %v0, 8, %s0
1306 ; CHECK-NEXT: vstl2d %v0, 8, %s0, %vm1
1307 ; CHECK-NEXT: b.l.t (, %s10)
1308 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1309 tail call void @llvm.ve.vl.vstl2d.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
1313 ; Function Attrs: nounwind
1314 define void @vstl2dnc_vssl(ptr %0, i64 %1) {
1315 ; CHECK-LABEL: vstl2dnc_vssl:
1317 ; CHECK-NEXT: lea %s2, 256
1318 ; CHECK-NEXT: lvl %s2
1319 ; CHECK-NEXT: vld %v0, %s1, %s0
1320 ; CHECK-NEXT: vstl2d.nc %v0, %s1, %s0
1321 ; CHECK-NEXT: b.l.t (, %s10)
1322 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1323 tail call void @llvm.ve.vl.vstl2dnc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
1327 ; Function Attrs: nounwind writeonly
1328 declare void @llvm.ve.vl.vstl2dnc.vssl(<256 x double>, i64, ptr, i32)
1330 ; Function Attrs: nounwind
1331 define void @vstl2dnc_vssml(ptr %0, i64 %1) {
1332 ; CHECK-LABEL: vstl2dnc_vssml:
1334 ; CHECK-NEXT: lea %s2, 256
1335 ; CHECK-NEXT: lvl %s2
1336 ; CHECK-NEXT: vld %v0, %s1, %s0
1337 ; CHECK-NEXT: vstl2d.nc %v0, %s1, %s0, %vm1
1338 ; CHECK-NEXT: b.l.t (, %s10)
1339 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1340 tail call void @llvm.ve.vl.vstl2dnc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
1344 ; Function Attrs: nounwind writeonly
1345 declare void @llvm.ve.vl.vstl2dnc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
1347 ; Function Attrs: nounwind
1348 define void @vstl2dnc_vssl_imm(ptr %0) {
1349 ; CHECK-LABEL: vstl2dnc_vssl_imm:
1351 ; CHECK-NEXT: lea %s1, 256
1352 ; CHECK-NEXT: lvl %s1
1353 ; CHECK-NEXT: vld %v0, 8, %s0
1354 ; CHECK-NEXT: vstl2d.nc %v0, 8, %s0
1355 ; CHECK-NEXT: b.l.t (, %s10)
1356 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1357 tail call void @llvm.ve.vl.vstl2dnc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
1361 ; Function Attrs: nounwind
1362 define void @vstl2dnc_vssml_imm(ptr %0) {
1363 ; CHECK-LABEL: vstl2dnc_vssml_imm:
1365 ; CHECK-NEXT: lea %s1, 256
1366 ; CHECK-NEXT: lvl %s1
1367 ; CHECK-NEXT: vld %v0, 8, %s0
1368 ; CHECK-NEXT: vstl2d.nc %v0, 8, %s0, %vm1
1369 ; CHECK-NEXT: b.l.t (, %s10)
1370 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1371 tail call void @llvm.ve.vl.vstl2dnc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
1375 ; Function Attrs: nounwind
1376 define void @vstl2dot_vssl(ptr %0, i64 %1) {
1377 ; CHECK-LABEL: vstl2dot_vssl:
1379 ; CHECK-NEXT: lea %s2, 256
1380 ; CHECK-NEXT: lvl %s2
1381 ; CHECK-NEXT: vld %v0, %s1, %s0
1382 ; CHECK-NEXT: vstl2d.ot %v0, %s1, %s0
1383 ; CHECK-NEXT: b.l.t (, %s10)
1384 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1385 tail call void @llvm.ve.vl.vstl2dot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
1389 ; Function Attrs: nounwind writeonly
1390 declare void @llvm.ve.vl.vstl2dot.vssl(<256 x double>, i64, ptr, i32)
1392 ; Function Attrs: nounwind
1393 define void @vstl2dot_vssml(ptr %0, i64 %1) {
1394 ; CHECK-LABEL: vstl2dot_vssml:
1396 ; CHECK-NEXT: lea %s2, 256
1397 ; CHECK-NEXT: lvl %s2
1398 ; CHECK-NEXT: vld %v0, %s1, %s0
1399 ; CHECK-NEXT: vstl2d.ot %v0, %s1, %s0, %vm1
1400 ; CHECK-NEXT: b.l.t (, %s10)
1401 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1402 tail call void @llvm.ve.vl.vstl2dot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
1406 ; Function Attrs: nounwind writeonly
1407 declare void @llvm.ve.vl.vstl2dot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
1409 ; Function Attrs: nounwind
1410 define void @vstl2dot_vssl_imm(ptr %0) {
1411 ; CHECK-LABEL: vstl2dot_vssl_imm:
1413 ; CHECK-NEXT: lea %s1, 256
1414 ; CHECK-NEXT: lvl %s1
1415 ; CHECK-NEXT: vld %v0, 8, %s0
1416 ; CHECK-NEXT: vstl2d.ot %v0, 8, %s0
1417 ; CHECK-NEXT: b.l.t (, %s10)
1418 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1419 tail call void @llvm.ve.vl.vstl2dot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
1423 ; Function Attrs: nounwind
1424 define void @vstl2dot_vssml_imm(ptr %0) {
1425 ; CHECK-LABEL: vstl2dot_vssml_imm:
1427 ; CHECK-NEXT: lea %s1, 256
1428 ; CHECK-NEXT: lvl %s1
1429 ; CHECK-NEXT: vld %v0, 8, %s0
1430 ; CHECK-NEXT: vstl2d.ot %v0, 8, %s0, %vm1
1431 ; CHECK-NEXT: b.l.t (, %s10)
1432 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1433 tail call void @llvm.ve.vl.vstl2dot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
1437 ; Function Attrs: nounwind
1438 define void @vstl2dncot_vssl(ptr %0, i64 %1) {
1439 ; CHECK-LABEL: vstl2dncot_vssl:
1441 ; CHECK-NEXT: lea %s2, 256
1442 ; CHECK-NEXT: lvl %s2
1443 ; CHECK-NEXT: vld %v0, %s1, %s0
1444 ; CHECK-NEXT: vstl2d.nc.ot %v0, %s1, %s0
1445 ; CHECK-NEXT: b.l.t (, %s10)
1446 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1447 tail call void @llvm.ve.vl.vstl2dncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
1451 ; Function Attrs: nounwind writeonly
1452 declare void @llvm.ve.vl.vstl2dncot.vssl(<256 x double>, i64, ptr, i32)
1454 ; Function Attrs: nounwind
1455 define void @vstl2dncot_vssml(ptr %0, i64 %1) {
1456 ; CHECK-LABEL: vstl2dncot_vssml:
1458 ; CHECK-NEXT: lea %s2, 256
1459 ; CHECK-NEXT: lvl %s2
1460 ; CHECK-NEXT: vld %v0, %s1, %s0
1461 ; CHECK-NEXT: vstl2d.nc.ot %v0, %s1, %s0, %vm1
1462 ; CHECK-NEXT: b.l.t (, %s10)
1463 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
1464 tail call void @llvm.ve.vl.vstl2dncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
1468 ; Function Attrs: nounwind writeonly
1469 declare void @llvm.ve.vl.vstl2dncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
1471 ; Function Attrs: nounwind
1472 define void @vstl2dncot_vssl_imm(ptr %0) {
1473 ; CHECK-LABEL: vstl2dncot_vssl_imm:
1475 ; CHECK-NEXT: lea %s1, 256
1476 ; CHECK-NEXT: lvl %s1
1477 ; CHECK-NEXT: vld %v0, 8, %s0
1478 ; CHECK-NEXT: vstl2d.nc.ot %v0, 8, %s0
1479 ; CHECK-NEXT: b.l.t (, %s10)
1480 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1481 tail call void @llvm.ve.vl.vstl2dncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
1485 ; Function Attrs: nounwind
1486 define void @vstl2dncot_vssml_imm(ptr %0) {
1487 ; CHECK-LABEL: vstl2dncot_vssml_imm:
1489 ; CHECK-NEXT: lea %s1, 256
1490 ; CHECK-NEXT: lvl %s1
1491 ; CHECK-NEXT: vld %v0, 8, %s0
1492 ; CHECK-NEXT: vstl2d.nc.ot %v0, 8, %s0, %vm1
1493 ; CHECK-NEXT: b.l.t (, %s10)
1494 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
1495 tail call void @llvm.ve.vl.vstl2dncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)