1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
3 ;;; Test vector shift left arithmetic intrinsic instructions
6 ;;; We test VSLA*vvl, VSLA*vvl_v, VSLA*vrl, VSLA*vrl_v, VSLA*vil, VSLA*vil_v,
7 ;;; VSLA*vvml_v, VSLA*vrml_v, VSLA*viml_v, PVSLA*vvl, PVSLA*vvl_v, PVSLA*vrl,
8 ;;; PVSLA*vrl_v, PVSLA*vvml_v, and PVSLA*vrml_v instructions.
10 ; Function Attrs: nounwind readnone
11 define fastcc <256 x double> @vslawsx_vvvl(<256 x double> %0, <256 x double> %1) {
12 ; CHECK-LABEL: vslawsx_vvvl:
14 ; CHECK-NEXT: lea %s0, 256
16 ; CHECK-NEXT: vsla.w.sx %v0, %v0, %v1
17 ; CHECK-NEXT: b.l.t (, %s10)
18 %3 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
22 ; Function Attrs: nounwind readnone
23 declare <256 x double> @llvm.ve.vl.vslawsx.vvvl(<256 x double>, <256 x double>, i32)
25 ; Function Attrs: nounwind readnone
26 define fastcc <256 x double> @vslawsx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
27 ; CHECK-LABEL: vslawsx_vvvvl:
29 ; CHECK-NEXT: lea %s0, 128
31 ; CHECK-NEXT: vsla.w.sx %v2, %v0, %v1
32 ; CHECK-NEXT: lea %s16, 256
33 ; CHECK-NEXT: lvl %s16
34 ; CHECK-NEXT: vor %v0, (0)1, %v2
35 ; CHECK-NEXT: b.l.t (, %s10)
36 %4 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
40 ; Function Attrs: nounwind readnone
41 declare <256 x double> @llvm.ve.vl.vslawsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
43 ; Function Attrs: nounwind readnone
44 define fastcc <256 x double> @vslawsx_vvsl(<256 x double> %0, i32 signext %1) {
45 ; CHECK-LABEL: vslawsx_vvsl:
47 ; CHECK-NEXT: and %s0, %s0, (32)0
48 ; CHECK-NEXT: lea %s1, 256
50 ; CHECK-NEXT: vsla.w.sx %v0, %v0, %s0
51 ; CHECK-NEXT: b.l.t (, %s10)
52 %3 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsl(<256 x double> %0, i32 %1, i32 256)
56 ; Function Attrs: nounwind readnone
57 declare <256 x double> @llvm.ve.vl.vslawsx.vvsl(<256 x double>, i32, i32)
59 ; Function Attrs: nounwind readnone
60 define fastcc <256 x double> @vslawsx_vvsvl(<256 x double> %0, i32 signext %1, <256 x double> %2) {
61 ; CHECK-LABEL: vslawsx_vvsvl:
63 ; CHECK-NEXT: and %s0, %s0, (32)0
64 ; CHECK-NEXT: lea %s1, 128
66 ; CHECK-NEXT: vsla.w.sx %v1, %v0, %s0
67 ; CHECK-NEXT: lea %s16, 256
68 ; CHECK-NEXT: lvl %s16
69 ; CHECK-NEXT: vor %v0, (0)1, %v1
70 ; CHECK-NEXT: b.l.t (, %s10)
71 %4 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsvl(<256 x double> %0, i32 %1, <256 x double> %2, i32 128)
75 ; Function Attrs: nounwind readnone
76 declare <256 x double> @llvm.ve.vl.vslawsx.vvsvl(<256 x double>, i32, <256 x double>, i32)
78 ; Function Attrs: nounwind readnone
79 define fastcc <256 x double> @vslawsx_vvsl_imm(<256 x double> %0) {
80 ; CHECK-LABEL: vslawsx_vvsl_imm:
82 ; CHECK-NEXT: lea %s0, 256
84 ; CHECK-NEXT: vsla.w.sx %v0, %v0, 8
85 ; CHECK-NEXT: b.l.t (, %s10)
86 %2 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsl(<256 x double> %0, i32 8, i32 256)
90 ; Function Attrs: nounwind readnone
91 define fastcc <256 x double> @vslawsx_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
92 ; CHECK-LABEL: vslawsx_vvsvl_imm:
94 ; CHECK-NEXT: lea %s0, 128
96 ; CHECK-NEXT: vsla.w.sx %v1, %v0, 8
97 ; CHECK-NEXT: lea %s16, 256
98 ; CHECK-NEXT: lvl %s16
99 ; CHECK-NEXT: vor %v0, (0)1, %v1
100 ; CHECK-NEXT: b.l.t (, %s10)
101 %3 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsvl(<256 x double> %0, i32 8, <256 x double> %1, i32 128)
102 ret <256 x double> %3
105 ; Function Attrs: nounwind readnone
106 define fastcc <256 x double> @vslawsx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
107 ; CHECK-LABEL: vslawsx_vvvmvl:
109 ; CHECK-NEXT: lea %s0, 128
110 ; CHECK-NEXT: lvl %s0
111 ; CHECK-NEXT: vsla.w.sx %v2, %v0, %v1, %vm1
112 ; CHECK-NEXT: lea %s16, 256
113 ; CHECK-NEXT: lvl %s16
114 ; CHECK-NEXT: vor %v0, (0)1, %v2
115 ; CHECK-NEXT: b.l.t (, %s10)
116 %5 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
117 ret <256 x double> %5
120 ; Function Attrs: nounwind readnone
121 declare <256 x double> @llvm.ve.vl.vslawsx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
123 ; Function Attrs: nounwind readnone
124 define fastcc <256 x double> @vslawsx_vvsmvl(<256 x double> %0, i32 signext %1, <256 x i1> %2, <256 x double> %3) {
125 ; CHECK-LABEL: vslawsx_vvsmvl:
127 ; CHECK-NEXT: and %s0, %s0, (32)0
128 ; CHECK-NEXT: lea %s1, 128
129 ; CHECK-NEXT: lvl %s1
130 ; CHECK-NEXT: vsla.w.sx %v1, %v0, %s0, %vm1
131 ; CHECK-NEXT: lea %s16, 256
132 ; CHECK-NEXT: lvl %s16
133 ; CHECK-NEXT: vor %v0, (0)1, %v1
134 ; CHECK-NEXT: b.l.t (, %s10)
135 %5 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsmvl(<256 x double> %0, i32 %1, <256 x i1> %2, <256 x double> %3, i32 128)
136 ret <256 x double> %5
139 ; Function Attrs: nounwind readnone
140 declare <256 x double> @llvm.ve.vl.vslawsx.vvsmvl(<256 x double>, i32, <256 x i1>, <256 x double>, i32)
142 ; Function Attrs: nounwind readnone
143 define fastcc <256 x double> @vslawsx_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
144 ; CHECK-LABEL: vslawsx_vvsmvl_imm:
146 ; CHECK-NEXT: lea %s0, 128
147 ; CHECK-NEXT: lvl %s0
148 ; CHECK-NEXT: vsla.w.sx %v1, %v0, 8, %vm1
149 ; CHECK-NEXT: lea %s16, 256
150 ; CHECK-NEXT: lvl %s16
151 ; CHECK-NEXT: vor %v0, (0)1, %v1
152 ; CHECK-NEXT: b.l.t (, %s10)
153 %4 = tail call fast <256 x double> @llvm.ve.vl.vslawsx.vvsmvl(<256 x double> %0, i32 8, <256 x i1> %1, <256 x double> %2, i32 128)
154 ret <256 x double> %4
157 ; Function Attrs: nounwind readnone
158 define fastcc <256 x double> @vslawzx_vvvl(<256 x double> %0, <256 x double> %1) {
159 ; CHECK-LABEL: vslawzx_vvvl:
161 ; CHECK-NEXT: lea %s0, 256
162 ; CHECK-NEXT: lvl %s0
163 ; CHECK-NEXT: vsla.w.zx %v0, %v0, %v1
164 ; CHECK-NEXT: b.l.t (, %s10)
165 %3 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
166 ret <256 x double> %3
169 ; Function Attrs: nounwind readnone
170 declare <256 x double> @llvm.ve.vl.vslawzx.vvvl(<256 x double>, <256 x double>, i32)
172 ; Function Attrs: nounwind readnone
173 define fastcc <256 x double> @vslawzx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
174 ; CHECK-LABEL: vslawzx_vvvvl:
176 ; CHECK-NEXT: lea %s0, 128
177 ; CHECK-NEXT: lvl %s0
178 ; CHECK-NEXT: vsla.w.zx %v2, %v0, %v1
179 ; CHECK-NEXT: lea %s16, 256
180 ; CHECK-NEXT: lvl %s16
181 ; CHECK-NEXT: vor %v0, (0)1, %v2
182 ; CHECK-NEXT: b.l.t (, %s10)
183 %4 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
184 ret <256 x double> %4
187 ; Function Attrs: nounwind readnone
188 declare <256 x double> @llvm.ve.vl.vslawzx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
190 ; Function Attrs: nounwind readnone
191 define fastcc <256 x double> @vslawzx_vvsl(<256 x double> %0, i32 signext %1) {
192 ; CHECK-LABEL: vslawzx_vvsl:
194 ; CHECK-NEXT: and %s0, %s0, (32)0
195 ; CHECK-NEXT: lea %s1, 256
196 ; CHECK-NEXT: lvl %s1
197 ; CHECK-NEXT: vsla.w.zx %v0, %v0, %s0
198 ; CHECK-NEXT: b.l.t (, %s10)
199 %3 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsl(<256 x double> %0, i32 %1, i32 256)
200 ret <256 x double> %3
203 ; Function Attrs: nounwind readnone
204 declare <256 x double> @llvm.ve.vl.vslawzx.vvsl(<256 x double>, i32, i32)
206 ; Function Attrs: nounwind readnone
207 define fastcc <256 x double> @vslawzx_vvsvl(<256 x double> %0, i32 signext %1, <256 x double> %2) {
208 ; CHECK-LABEL: vslawzx_vvsvl:
210 ; CHECK-NEXT: and %s0, %s0, (32)0
211 ; CHECK-NEXT: lea %s1, 128
212 ; CHECK-NEXT: lvl %s1
213 ; CHECK-NEXT: vsla.w.zx %v1, %v0, %s0
214 ; CHECK-NEXT: lea %s16, 256
215 ; CHECK-NEXT: lvl %s16
216 ; CHECK-NEXT: vor %v0, (0)1, %v1
217 ; CHECK-NEXT: b.l.t (, %s10)
218 %4 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsvl(<256 x double> %0, i32 %1, <256 x double> %2, i32 128)
219 ret <256 x double> %4
222 ; Function Attrs: nounwind readnone
223 declare <256 x double> @llvm.ve.vl.vslawzx.vvsvl(<256 x double>, i32, <256 x double>, i32)
225 ; Function Attrs: nounwind readnone
226 define fastcc <256 x double> @vslawzx_vvsl_imm(<256 x double> %0) {
227 ; CHECK-LABEL: vslawzx_vvsl_imm:
229 ; CHECK-NEXT: lea %s0, 256
230 ; CHECK-NEXT: lvl %s0
231 ; CHECK-NEXT: vsla.w.zx %v0, %v0, 8
232 ; CHECK-NEXT: b.l.t (, %s10)
233 %2 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsl(<256 x double> %0, i32 8, i32 256)
234 ret <256 x double> %2
237 ; Function Attrs: nounwind readnone
238 define fastcc <256 x double> @vslawzx_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
239 ; CHECK-LABEL: vslawzx_vvsvl_imm:
241 ; CHECK-NEXT: lea %s0, 128
242 ; CHECK-NEXT: lvl %s0
243 ; CHECK-NEXT: vsla.w.zx %v1, %v0, 8
244 ; CHECK-NEXT: lea %s16, 256
245 ; CHECK-NEXT: lvl %s16
246 ; CHECK-NEXT: vor %v0, (0)1, %v1
247 ; CHECK-NEXT: b.l.t (, %s10)
248 %3 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsvl(<256 x double> %0, i32 8, <256 x double> %1, i32 128)
249 ret <256 x double> %3
252 ; Function Attrs: nounwind readnone
253 define fastcc <256 x double> @vslawzx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
254 ; CHECK-LABEL: vslawzx_vvvmvl:
256 ; CHECK-NEXT: lea %s0, 128
257 ; CHECK-NEXT: lvl %s0
258 ; CHECK-NEXT: vsla.w.zx %v2, %v0, %v1, %vm1
259 ; CHECK-NEXT: lea %s16, 256
260 ; CHECK-NEXT: lvl %s16
261 ; CHECK-NEXT: vor %v0, (0)1, %v2
262 ; CHECK-NEXT: b.l.t (, %s10)
263 %5 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
264 ret <256 x double> %5
267 ; Function Attrs: nounwind readnone
268 declare <256 x double> @llvm.ve.vl.vslawzx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
270 ; Function Attrs: nounwind readnone
271 define fastcc <256 x double> @vslawzx_vvsmvl(<256 x double> %0, i32 signext %1, <256 x i1> %2, <256 x double> %3) {
272 ; CHECK-LABEL: vslawzx_vvsmvl:
274 ; CHECK-NEXT: and %s0, %s0, (32)0
275 ; CHECK-NEXT: lea %s1, 128
276 ; CHECK-NEXT: lvl %s1
277 ; CHECK-NEXT: vsla.w.zx %v1, %v0, %s0, %vm1
278 ; CHECK-NEXT: lea %s16, 256
279 ; CHECK-NEXT: lvl %s16
280 ; CHECK-NEXT: vor %v0, (0)1, %v1
281 ; CHECK-NEXT: b.l.t (, %s10)
282 %5 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsmvl(<256 x double> %0, i32 %1, <256 x i1> %2, <256 x double> %3, i32 128)
283 ret <256 x double> %5
286 ; Function Attrs: nounwind readnone
287 declare <256 x double> @llvm.ve.vl.vslawzx.vvsmvl(<256 x double>, i32, <256 x i1>, <256 x double>, i32)
289 ; Function Attrs: nounwind readnone
290 define fastcc <256 x double> @vslawzx_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
291 ; CHECK-LABEL: vslawzx_vvsmvl_imm:
293 ; CHECK-NEXT: lea %s0, 128
294 ; CHECK-NEXT: lvl %s0
295 ; CHECK-NEXT: vsla.w.zx %v1, %v0, 8, %vm1
296 ; CHECK-NEXT: lea %s16, 256
297 ; CHECK-NEXT: lvl %s16
298 ; CHECK-NEXT: vor %v0, (0)1, %v1
299 ; CHECK-NEXT: b.l.t (, %s10)
300 %4 = tail call fast <256 x double> @llvm.ve.vl.vslawzx.vvsmvl(<256 x double> %0, i32 8, <256 x i1> %1, <256 x double> %2, i32 128)
301 ret <256 x double> %4
304 ; Function Attrs: nounwind readnone
305 define fastcc <256 x double> @vslal_vvvl(<256 x double> %0, <256 x double> %1) {
306 ; CHECK-LABEL: vslal_vvvl:
308 ; CHECK-NEXT: lea %s0, 256
309 ; CHECK-NEXT: lvl %s0
310 ; CHECK-NEXT: vsla.l %v0, %v0, %v1
311 ; CHECK-NEXT: b.l.t (, %s10)
312 %3 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
313 ret <256 x double> %3
316 ; Function Attrs: nounwind readnone
317 declare <256 x double> @llvm.ve.vl.vslal.vvvl(<256 x double>, <256 x double>, i32)
319 ; Function Attrs: nounwind readnone
320 define fastcc <256 x double> @vslal_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
321 ; CHECK-LABEL: vslal_vvvvl:
323 ; CHECK-NEXT: lea %s0, 128
324 ; CHECK-NEXT: lvl %s0
325 ; CHECK-NEXT: vsla.l %v2, %v0, %v1
326 ; CHECK-NEXT: lea %s16, 256
327 ; CHECK-NEXT: lvl %s16
328 ; CHECK-NEXT: vor %v0, (0)1, %v2
329 ; CHECK-NEXT: b.l.t (, %s10)
330 %4 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
331 ret <256 x double> %4
334 ; Function Attrs: nounwind readnone
335 declare <256 x double> @llvm.ve.vl.vslal.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
337 ; Function Attrs: nounwind readnone
338 define fastcc <256 x double> @vslal_vvsl(<256 x double> %0, i64 %1) {
339 ; CHECK-LABEL: vslal_vvsl:
341 ; CHECK-NEXT: lea %s1, 256
342 ; CHECK-NEXT: lvl %s1
343 ; CHECK-NEXT: vsla.l %v0, %v0, %s0
344 ; CHECK-NEXT: b.l.t (, %s10)
345 %3 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsl(<256 x double> %0, i64 %1, i32 256)
346 ret <256 x double> %3
349 ; Function Attrs: nounwind readnone
350 declare <256 x double> @llvm.ve.vl.vslal.vvsl(<256 x double>, i64, i32)
352 ; Function Attrs: nounwind readnone
353 define fastcc <256 x double> @vslal_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
354 ; CHECK-LABEL: vslal_vvsvl:
356 ; CHECK-NEXT: lea %s1, 128
357 ; CHECK-NEXT: lvl %s1
358 ; CHECK-NEXT: vsla.l %v1, %v0, %s0
359 ; CHECK-NEXT: lea %s16, 256
360 ; CHECK-NEXT: lvl %s16
361 ; CHECK-NEXT: vor %v0, (0)1, %v1
362 ; CHECK-NEXT: b.l.t (, %s10)
363 %4 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
364 ret <256 x double> %4
367 ; Function Attrs: nounwind readnone
368 declare <256 x double> @llvm.ve.vl.vslal.vvsvl(<256 x double>, i64, <256 x double>, i32)
370 ; Function Attrs: nounwind readnone
371 define fastcc <256 x double> @vslal_vvsl_imm(<256 x double> %0) {
372 ; CHECK-LABEL: vslal_vvsl_imm:
374 ; CHECK-NEXT: lea %s0, 256
375 ; CHECK-NEXT: lvl %s0
376 ; CHECK-NEXT: vsla.l %v0, %v0, 8
377 ; CHECK-NEXT: b.l.t (, %s10)
378 %2 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsl(<256 x double> %0, i64 8, i32 256)
379 ret <256 x double> %2
382 ; Function Attrs: nounwind readnone
383 define fastcc <256 x double> @vslal_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
384 ; CHECK-LABEL: vslal_vvsvl_imm:
386 ; CHECK-NEXT: lea %s0, 128
387 ; CHECK-NEXT: lvl %s0
388 ; CHECK-NEXT: vsla.l %v1, %v0, 8
389 ; CHECK-NEXT: lea %s16, 256
390 ; CHECK-NEXT: lvl %s16
391 ; CHECK-NEXT: vor %v0, (0)1, %v1
392 ; CHECK-NEXT: b.l.t (, %s10)
393 %3 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsvl(<256 x double> %0, i64 8, <256 x double> %1, i32 128)
394 ret <256 x double> %3
397 ; Function Attrs: nounwind readnone
398 define fastcc <256 x double> @vslal_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
399 ; CHECK-LABEL: vslal_vvvmvl:
401 ; CHECK-NEXT: lea %s0, 128
402 ; CHECK-NEXT: lvl %s0
403 ; CHECK-NEXT: vsla.l %v2, %v0, %v1, %vm1
404 ; CHECK-NEXT: lea %s16, 256
405 ; CHECK-NEXT: lvl %s16
406 ; CHECK-NEXT: vor %v0, (0)1, %v2
407 ; CHECK-NEXT: b.l.t (, %s10)
408 %5 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
409 ret <256 x double> %5
412 ; Function Attrs: nounwind readnone
413 declare <256 x double> @llvm.ve.vl.vslal.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
415 ; Function Attrs: nounwind readnone
416 define fastcc <256 x double> @vslal_vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
417 ; CHECK-LABEL: vslal_vvsmvl:
419 ; CHECK-NEXT: lea %s1, 128
420 ; CHECK-NEXT: lvl %s1
421 ; CHECK-NEXT: vsla.l %v1, %v0, %s0, %vm1
422 ; CHECK-NEXT: lea %s16, 256
423 ; CHECK-NEXT: lvl %s16
424 ; CHECK-NEXT: vor %v0, (0)1, %v1
425 ; CHECK-NEXT: b.l.t (, %s10)
426 %5 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
427 ret <256 x double> %5
430 ; Function Attrs: nounwind readnone
431 declare <256 x double> @llvm.ve.vl.vslal.vvsmvl(<256 x double>, i64, <256 x i1>, <256 x double>, i32)
433 ; Function Attrs: nounwind readnone
434 define fastcc <256 x double> @vslal_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
435 ; CHECK-LABEL: vslal_vvsmvl_imm:
437 ; CHECK-NEXT: lea %s0, 128
438 ; CHECK-NEXT: lvl %s0
439 ; CHECK-NEXT: vsla.l %v1, %v0, 8, %vm1
440 ; CHECK-NEXT: lea %s16, 256
441 ; CHECK-NEXT: lvl %s16
442 ; CHECK-NEXT: vor %v0, (0)1, %v1
443 ; CHECK-NEXT: b.l.t (, %s10)
444 %4 = tail call fast <256 x double> @llvm.ve.vl.vslal.vvsmvl(<256 x double> %0, i64 8, <256 x i1> %1, <256 x double> %2, i32 128)
445 ret <256 x double> %4
448 ; Function Attrs: nounwind readnone
449 define fastcc <256 x double> @pvsla_vvvl(<256 x double> %0, <256 x double> %1) {
450 ; CHECK-LABEL: pvsla_vvvl:
452 ; CHECK-NEXT: lea %s0, 256
453 ; CHECK-NEXT: lvl %s0
454 ; CHECK-NEXT: pvsla %v0, %v0, %v1
455 ; CHECK-NEXT: b.l.t (, %s10)
456 %3 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
457 ret <256 x double> %3
460 ; Function Attrs: nounwind readnone
461 declare <256 x double> @llvm.ve.vl.pvsla.vvvl(<256 x double>, <256 x double>, i32)
463 ; Function Attrs: nounwind readnone
464 define fastcc <256 x double> @pvsla_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
465 ; CHECK-LABEL: pvsla_vvvvl:
467 ; CHECK-NEXT: lea %s0, 128
468 ; CHECK-NEXT: lvl %s0
469 ; CHECK-NEXT: pvsla %v2, %v0, %v1
470 ; CHECK-NEXT: lea %s16, 256
471 ; CHECK-NEXT: lvl %s16
472 ; CHECK-NEXT: vor %v0, (0)1, %v2
473 ; CHECK-NEXT: b.l.t (, %s10)
474 %4 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
475 ret <256 x double> %4
478 ; Function Attrs: nounwind readnone
479 declare <256 x double> @llvm.ve.vl.pvsla.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
481 ; Function Attrs: nounwind readnone
482 define fastcc <256 x double> @pvsla_vvsl(<256 x double> %0, i64 %1) {
483 ; CHECK-LABEL: pvsla_vvsl:
485 ; CHECK-NEXT: lea %s1, 256
486 ; CHECK-NEXT: lvl %s1
487 ; CHECK-NEXT: pvsla %v0, %v0, %s0
488 ; CHECK-NEXT: b.l.t (, %s10)
489 %3 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvsl(<256 x double> %0, i64 %1, i32 256)
490 ret <256 x double> %3
493 ; Function Attrs: nounwind readnone
494 declare <256 x double> @llvm.ve.vl.pvsla.vvsl(<256 x double>, i64, i32)
496 ; Function Attrs: nounwind readnone
497 define fastcc <256 x double> @pvsla_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
498 ; CHECK-LABEL: pvsla_vvsvl:
500 ; CHECK-NEXT: lea %s1, 128
501 ; CHECK-NEXT: lvl %s1
502 ; CHECK-NEXT: pvsla %v1, %v0, %s0
503 ; CHECK-NEXT: lea %s16, 256
504 ; CHECK-NEXT: lvl %s16
505 ; CHECK-NEXT: vor %v0, (0)1, %v1
506 ; CHECK-NEXT: b.l.t (, %s10)
507 %4 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
508 ret <256 x double> %4
511 ; Function Attrs: nounwind readnone
512 declare <256 x double> @llvm.ve.vl.pvsla.vvsvl(<256 x double>, i64, <256 x double>, i32)
514 ; Function Attrs: nounwind readnone
515 define fastcc <256 x double> @pvsla_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) {
516 ; CHECK-LABEL: pvsla_vvvMvl:
518 ; CHECK-NEXT: lea %s0, 128
519 ; CHECK-NEXT: lvl %s0
520 ; CHECK-NEXT: pvsla %v2, %v0, %v1, %vm2
521 ; CHECK-NEXT: lea %s16, 256
522 ; CHECK-NEXT: lvl %s16
523 ; CHECK-NEXT: vor %v0, (0)1, %v2
524 ; CHECK-NEXT: b.l.t (, %s10)
525 %5 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128)
526 ret <256 x double> %5
529 ; Function Attrs: nounwind readnone
530 declare <256 x double> @llvm.ve.vl.pvsla.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32)
532 ; Function Attrs: nounwind readnone
533 define fastcc <256 x double> @pvsla_vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3) {
534 ; CHECK-LABEL: pvsla_vvsMvl:
536 ; CHECK-NEXT: lea %s1, 128
537 ; CHECK-NEXT: lvl %s1
538 ; CHECK-NEXT: pvsla %v1, %v0, %s0, %vm2
539 ; CHECK-NEXT: lea %s16, 256
540 ; CHECK-NEXT: lvl %s16
541 ; CHECK-NEXT: vor %v0, (0)1, %v1
542 ; CHECK-NEXT: b.l.t (, %s10)
543 %5 = tail call fast <256 x double> @llvm.ve.vl.pvsla.vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3, i32 128)
544 ret <256 x double> %5
547 ; Function Attrs: nounwind readnone
548 declare <256 x double> @llvm.ve.vl.pvsla.vvsMvl(<256 x double>, i64, <512 x i1>, <256 x double>, i32)