1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
3 ;;; Test vector floating fused negative multiply add intrinsic instructions
6 ;;; We test VFNMAD*vvvl, VFNMAD*vvvl_v, VFNMAD*rvvl, VFNMAD*rvvl_v,
7 ;;; VFNMAD*vrvl, VFNMAD*vrvl_v, VFNMAD*vvvml_v, VFNMAD*rvvml_v,
8 ;;; VFNMAD*vrvml_v, PVFNMAD*vvvl, PVFNMAD*vvvl_v, PVFNMAD*rvvl,
9 ;;; PVFNMAD*rvvl_v, PVFNMAD*vrvl, PVFNMAD*vrvl_v, PVFNMAD*vvvml_v,
10 ;;; PVFNMAD*rvvml_v, and PVFNMAD*vrvml_v instructions.
12 ; Function Attrs: nounwind readnone
13 define fastcc <256 x double> @vfnmadd_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
14 ; CHECK-LABEL: vfnmadd_vvvvl:
16 ; CHECK-NEXT: lea %s0, 256
18 ; CHECK-NEXT: vfnmad.d %v0, %v0, %v1, %v2
19 ; CHECK-NEXT: b.l.t (, %s10)
20 %4 = tail call fast <256 x double> @llvm.ve.vl.vfnmadd.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 256)
24 ; Function Attrs: nounwind readnone
25 declare <256 x double> @llvm.ve.vl.vfnmadd.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
27 ; Function Attrs: nounwind readnone
28 define fastcc <256 x double> @vfnmadd_vvvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <256 x double> %3) {
29 ; CHECK-LABEL: vfnmadd_vvvvvl:
31 ; CHECK-NEXT: lea %s0, 128
33 ; CHECK-NEXT: vfnmad.d %v3, %v0, %v1, %v2
34 ; CHECK-NEXT: lea %s16, 256
35 ; CHECK-NEXT: lvl %s16
36 ; CHECK-NEXT: vor %v0, (0)1, %v3
37 ; CHECK-NEXT: b.l.t (, %s10)
38 %5 = tail call fast <256 x double> @llvm.ve.vl.vfnmadd.vvvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <256 x double> %3, i32 128)
42 ; Function Attrs: nounwind readnone
43 declare <256 x double> @llvm.ve.vl.vfnmadd.vvvvvl(<256 x double>, <256 x double>, <256 x double>, <256 x double>, i32)
45 ; Function Attrs: nounwind readnone
46 define fastcc <256 x double> @vfnmadd_vsvvl(double %0, <256 x double> %1, <256 x double> %2) {
47 ; CHECK-LABEL: vfnmadd_vsvvl:
49 ; CHECK-NEXT: lea %s1, 256
51 ; CHECK-NEXT: vfnmad.d %v0, %s0, %v0, %v1
52 ; CHECK-NEXT: b.l.t (, %s10)
53 %4 = tail call fast <256 x double> @llvm.ve.vl.vfnmadd.vsvvl(double %0, <256 x double> %1, <256 x double> %2, i32 256)
57 ; Function Attrs: nounwind readnone
58 declare <256 x double> @llvm.ve.vl.vfnmadd.vsvvl(double, <256 x double>, <256 x double>, i32)
60 ; Function Attrs: nounwind readnone
61 define fastcc <256 x double> @vfnmadd_vsvvvl(double %0, <256 x double> %1, <256 x double> %2, <256 x double> %3) {
62 ; CHECK-LABEL: vfnmadd_vsvvvl:
64 ; CHECK-NEXT: lea %s1, 128
66 ; CHECK-NEXT: vfnmad.d %v2, %s0, %v0, %v1
67 ; CHECK-NEXT: lea %s16, 256
68 ; CHECK-NEXT: lvl %s16
69 ; CHECK-NEXT: vor %v0, (0)1, %v2
70 ; CHECK-NEXT: b.l.t (, %s10)
71 %5 = tail call fast <256 x double> @llvm.ve.vl.vfnmadd.vsvvvl(double %0, <256 x double> %1, <256 x double> %2, <256 x double> %3, i32 128)
75 ; Function Attrs: nounwind readnone
76 declare <256 x double> @llvm.ve.vl.vfnmadd.vsvvvl(double, <256 x double>, <256 x double>, <256 x double>, i32)
78 ; Function Attrs: nounwind readnone
79 define fastcc <256 x double> @vfnmadd_vvsvl(<256 x double> %0, double %1, <256 x double> %2) {
80 ; CHECK-LABEL: vfnmadd_vvsvl:
82 ; CHECK-NEXT: lea %s1, 256
84 ; CHECK-NEXT: vfnmad.d %v0, %v0, %s0, %v1
85 ; CHECK-NEXT: b.l.t (, %s10)
86 %4 = tail call fast <256 x double> @llvm.ve.vl.vfnmadd.vvsvl(<256 x double> %0, double %1, <256 x double> %2, i32 256)
90 ; Function Attrs: nounwind readnone
91 declare <256 x double> @llvm.ve.vl.vfnmadd.vvsvl(<256 x double>, double, <256 x double>, i32)
93 ; Function Attrs: nounwind readnone
94 define fastcc <256 x double> @vfnmadd_vvsvvl(<256 x double> %0, double %1, <256 x double> %2, <256 x double> %3) {
95 ; CHECK-LABEL: vfnmadd_vvsvvl:
97 ; CHECK-NEXT: lea %s1, 128
99 ; CHECK-NEXT: vfnmad.d %v2, %v0, %s0, %v1
100 ; CHECK-NEXT: lea %s16, 256
101 ; CHECK-NEXT: lvl %s16
102 ; CHECK-NEXT: vor %v0, (0)1, %v2
103 ; CHECK-NEXT: b.l.t (, %s10)
104 %5 = tail call fast <256 x double> @llvm.ve.vl.vfnmadd.vvsvvl(<256 x double> %0, double %1, <256 x double> %2, <256 x double> %3, i32 128)
105 ret <256 x double> %5
108 ; Function Attrs: nounwind readnone
109 declare <256 x double> @llvm.ve.vl.vfnmadd.vvsvvl(<256 x double>, double, <256 x double>, <256 x double>, i32)
111 ; Function Attrs: nounwind readnone
112 define fastcc <256 x double> @vfnmadd_vvvvmvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4) {
113 ; CHECK-LABEL: vfnmadd_vvvvmvl:
115 ; CHECK-NEXT: lea %s0, 128
116 ; CHECK-NEXT: lvl %s0
117 ; CHECK-NEXT: vfnmad.d %v3, %v0, %v1, %v2, %vm1
118 ; CHECK-NEXT: lea %s16, 256
119 ; CHECK-NEXT: lvl %s16
120 ; CHECK-NEXT: vor %v0, (0)1, %v3
121 ; CHECK-NEXT: b.l.t (, %s10)
122 %6 = tail call fast <256 x double> @llvm.ve.vl.vfnmadd.vvvvmvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4, i32 128)
123 ret <256 x double> %6
126 ; Function Attrs: nounwind readnone
127 declare <256 x double> @llvm.ve.vl.vfnmadd.vvvvmvl(<256 x double>, <256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
129 ; Function Attrs: nounwind readnone
130 define fastcc <256 x double> @vfnmadd_vsvvmvl(double %0, <256 x double> %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4) {
131 ; CHECK-LABEL: vfnmadd_vsvvmvl:
133 ; CHECK-NEXT: lea %s1, 128
134 ; CHECK-NEXT: lvl %s1
135 ; CHECK-NEXT: vfnmad.d %v2, %s0, %v0, %v1, %vm1
136 ; CHECK-NEXT: lea %s16, 256
137 ; CHECK-NEXT: lvl %s16
138 ; CHECK-NEXT: vor %v0, (0)1, %v2
139 ; CHECK-NEXT: b.l.t (, %s10)
140 %6 = tail call fast <256 x double> @llvm.ve.vl.vfnmadd.vsvvmvl(double %0, <256 x double> %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4, i32 128)
141 ret <256 x double> %6
144 ; Function Attrs: nounwind readnone
145 declare <256 x double> @llvm.ve.vl.vfnmadd.vsvvmvl(double, <256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
147 ; Function Attrs: nounwind readnone
148 define fastcc <256 x double> @vfnmadd_vvsvmvl(<256 x double> %0, double %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4) {
149 ; CHECK-LABEL: vfnmadd_vvsvmvl:
151 ; CHECK-NEXT: lea %s1, 128
152 ; CHECK-NEXT: lvl %s1
153 ; CHECK-NEXT: vfnmad.d %v2, %v0, %s0, %v1, %vm1
154 ; CHECK-NEXT: lea %s16, 256
155 ; CHECK-NEXT: lvl %s16
156 ; CHECK-NEXT: vor %v0, (0)1, %v2
157 ; CHECK-NEXT: b.l.t (, %s10)
158 %6 = tail call fast <256 x double> @llvm.ve.vl.vfnmadd.vvsvmvl(<256 x double> %0, double %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4, i32 128)
159 ret <256 x double> %6
162 ; Function Attrs: nounwind readnone
163 declare <256 x double> @llvm.ve.vl.vfnmadd.vvsvmvl(<256 x double>, double, <256 x double>, <256 x i1>, <256 x double>, i32)
165 ; Function Attrs: nounwind readnone
166 define fastcc <256 x double> @vfnmads_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
167 ; CHECK-LABEL: vfnmads_vvvvl:
169 ; CHECK-NEXT: lea %s0, 256
170 ; CHECK-NEXT: lvl %s0
171 ; CHECK-NEXT: vfnmad.s %v0, %v0, %v1, %v2
172 ; CHECK-NEXT: b.l.t (, %s10)
173 %4 = tail call fast <256 x double> @llvm.ve.vl.vfnmads.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 256)
174 ret <256 x double> %4
177 ; Function Attrs: nounwind readnone
178 declare <256 x double> @llvm.ve.vl.vfnmads.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
180 ; Function Attrs: nounwind readnone
181 define fastcc <256 x double> @vfnmads_vvvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <256 x double> %3) {
182 ; CHECK-LABEL: vfnmads_vvvvvl:
184 ; CHECK-NEXT: lea %s0, 128
185 ; CHECK-NEXT: lvl %s0
186 ; CHECK-NEXT: vfnmad.s %v3, %v0, %v1, %v2
187 ; CHECK-NEXT: lea %s16, 256
188 ; CHECK-NEXT: lvl %s16
189 ; CHECK-NEXT: vor %v0, (0)1, %v3
190 ; CHECK-NEXT: b.l.t (, %s10)
191 %5 = tail call fast <256 x double> @llvm.ve.vl.vfnmads.vvvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <256 x double> %3, i32 128)
192 ret <256 x double> %5
195 ; Function Attrs: nounwind readnone
196 declare <256 x double> @llvm.ve.vl.vfnmads.vvvvvl(<256 x double>, <256 x double>, <256 x double>, <256 x double>, i32)
198 ; Function Attrs: nounwind readnone
199 define fastcc <256 x double> @vfnmads_vsvvl(float %0, <256 x double> %1, <256 x double> %2) {
200 ; CHECK-LABEL: vfnmads_vsvvl:
202 ; CHECK-NEXT: lea %s1, 256
203 ; CHECK-NEXT: lvl %s1
204 ; CHECK-NEXT: vfnmad.s %v0, %s0, %v0, %v1
205 ; CHECK-NEXT: b.l.t (, %s10)
206 %4 = tail call fast <256 x double> @llvm.ve.vl.vfnmads.vsvvl(float %0, <256 x double> %1, <256 x double> %2, i32 256)
207 ret <256 x double> %4
210 ; Function Attrs: nounwind readnone
211 declare <256 x double> @llvm.ve.vl.vfnmads.vsvvl(float, <256 x double>, <256 x double>, i32)
213 ; Function Attrs: nounwind readnone
214 define fastcc <256 x double> @vfnmads_vsvvvl(float %0, <256 x double> %1, <256 x double> %2, <256 x double> %3) {
215 ; CHECK-LABEL: vfnmads_vsvvvl:
217 ; CHECK-NEXT: lea %s1, 128
218 ; CHECK-NEXT: lvl %s1
219 ; CHECK-NEXT: vfnmad.s %v2, %s0, %v0, %v1
220 ; CHECK-NEXT: lea %s16, 256
221 ; CHECK-NEXT: lvl %s16
222 ; CHECK-NEXT: vor %v0, (0)1, %v2
223 ; CHECK-NEXT: b.l.t (, %s10)
224 %5 = tail call fast <256 x double> @llvm.ve.vl.vfnmads.vsvvvl(float %0, <256 x double> %1, <256 x double> %2, <256 x double> %3, i32 128)
225 ret <256 x double> %5
228 ; Function Attrs: nounwind readnone
229 declare <256 x double> @llvm.ve.vl.vfnmads.vsvvvl(float, <256 x double>, <256 x double>, <256 x double>, i32)
231 ; Function Attrs: nounwind readnone
232 define fastcc <256 x double> @vfnmads_vvsvl(<256 x double> %0, float %1, <256 x double> %2) {
233 ; CHECK-LABEL: vfnmads_vvsvl:
235 ; CHECK-NEXT: lea %s1, 256
236 ; CHECK-NEXT: lvl %s1
237 ; CHECK-NEXT: vfnmad.s %v0, %v0, %s0, %v1
238 ; CHECK-NEXT: b.l.t (, %s10)
239 %4 = tail call fast <256 x double> @llvm.ve.vl.vfnmads.vvsvl(<256 x double> %0, float %1, <256 x double> %2, i32 256)
240 ret <256 x double> %4
243 ; Function Attrs: nounwind readnone
244 declare <256 x double> @llvm.ve.vl.vfnmads.vvsvl(<256 x double>, float, <256 x double>, i32)
246 ; Function Attrs: nounwind readnone
247 define fastcc <256 x double> @vfnmads_vvsvvl(<256 x double> %0, float %1, <256 x double> %2, <256 x double> %3) {
248 ; CHECK-LABEL: vfnmads_vvsvvl:
250 ; CHECK-NEXT: lea %s1, 128
251 ; CHECK-NEXT: lvl %s1
252 ; CHECK-NEXT: vfnmad.s %v2, %v0, %s0, %v1
253 ; CHECK-NEXT: lea %s16, 256
254 ; CHECK-NEXT: lvl %s16
255 ; CHECK-NEXT: vor %v0, (0)1, %v2
256 ; CHECK-NEXT: b.l.t (, %s10)
257 %5 = tail call fast <256 x double> @llvm.ve.vl.vfnmads.vvsvvl(<256 x double> %0, float %1, <256 x double> %2, <256 x double> %3, i32 128)
258 ret <256 x double> %5
261 ; Function Attrs: nounwind readnone
262 declare <256 x double> @llvm.ve.vl.vfnmads.vvsvvl(<256 x double>, float, <256 x double>, <256 x double>, i32)
264 ; Function Attrs: nounwind readnone
265 define fastcc <256 x double> @vfnmads_vvvvmvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4) {
266 ; CHECK-LABEL: vfnmads_vvvvmvl:
268 ; CHECK-NEXT: lea %s0, 128
269 ; CHECK-NEXT: lvl %s0
270 ; CHECK-NEXT: vfnmad.s %v3, %v0, %v1, %v2, %vm1
271 ; CHECK-NEXT: lea %s16, 256
272 ; CHECK-NEXT: lvl %s16
273 ; CHECK-NEXT: vor %v0, (0)1, %v3
274 ; CHECK-NEXT: b.l.t (, %s10)
275 %6 = tail call fast <256 x double> @llvm.ve.vl.vfnmads.vvvvmvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4, i32 128)
276 ret <256 x double> %6
279 ; Function Attrs: nounwind readnone
280 declare <256 x double> @llvm.ve.vl.vfnmads.vvvvmvl(<256 x double>, <256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
282 ; Function Attrs: nounwind readnone
283 define fastcc <256 x double> @vfnmads_vsvvmvl(float %0, <256 x double> %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4) {
284 ; CHECK-LABEL: vfnmads_vsvvmvl:
286 ; CHECK-NEXT: lea %s1, 128
287 ; CHECK-NEXT: lvl %s1
288 ; CHECK-NEXT: vfnmad.s %v2, %s0, %v0, %v1, %vm1
289 ; CHECK-NEXT: lea %s16, 256
290 ; CHECK-NEXT: lvl %s16
291 ; CHECK-NEXT: vor %v0, (0)1, %v2
292 ; CHECK-NEXT: b.l.t (, %s10)
293 %6 = tail call fast <256 x double> @llvm.ve.vl.vfnmads.vsvvmvl(float %0, <256 x double> %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4, i32 128)
294 ret <256 x double> %6
297 ; Function Attrs: nounwind readnone
298 declare <256 x double> @llvm.ve.vl.vfnmads.vsvvmvl(float, <256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
300 ; Function Attrs: nounwind readnone
301 define fastcc <256 x double> @vfnmads_vvsvmvl(<256 x double> %0, float %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4) {
302 ; CHECK-LABEL: vfnmads_vvsvmvl:
304 ; CHECK-NEXT: lea %s1, 128
305 ; CHECK-NEXT: lvl %s1
306 ; CHECK-NEXT: vfnmad.s %v2, %v0, %s0, %v1, %vm1
307 ; CHECK-NEXT: lea %s16, 256
308 ; CHECK-NEXT: lvl %s16
309 ; CHECK-NEXT: vor %v0, (0)1, %v2
310 ; CHECK-NEXT: b.l.t (, %s10)
311 %6 = tail call fast <256 x double> @llvm.ve.vl.vfnmads.vvsvmvl(<256 x double> %0, float %1, <256 x double> %2, <256 x i1> %3, <256 x double> %4, i32 128)
312 ret <256 x double> %6
315 ; Function Attrs: nounwind readnone
316 declare <256 x double> @llvm.ve.vl.vfnmads.vvsvmvl(<256 x double>, float, <256 x double>, <256 x i1>, <256 x double>, i32)
318 ; Function Attrs: nounwind readnone
319 define fastcc <256 x double> @pvfnmad_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
320 ; CHECK-LABEL: pvfnmad_vvvvl:
322 ; CHECK-NEXT: lea %s0, 256
323 ; CHECK-NEXT: lvl %s0
324 ; CHECK-NEXT: pvfnmad %v0, %v0, %v1, %v2
325 ; CHECK-NEXT: b.l.t (, %s10)
326 %4 = tail call fast <256 x double> @llvm.ve.vl.pvfnmad.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 256)
327 ret <256 x double> %4
330 ; Function Attrs: nounwind readnone
331 declare <256 x double> @llvm.ve.vl.pvfnmad.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
333 ; Function Attrs: nounwind readnone
334 define fastcc <256 x double> @pvfnmad_vvvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <256 x double> %3) {
335 ; CHECK-LABEL: pvfnmad_vvvvvl:
337 ; CHECK-NEXT: lea %s0, 128
338 ; CHECK-NEXT: lvl %s0
339 ; CHECK-NEXT: pvfnmad %v3, %v0, %v1, %v2
340 ; CHECK-NEXT: lea %s16, 256
341 ; CHECK-NEXT: lvl %s16
342 ; CHECK-NEXT: vor %v0, (0)1, %v3
343 ; CHECK-NEXT: b.l.t (, %s10)
344 %5 = tail call fast <256 x double> @llvm.ve.vl.pvfnmad.vvvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <256 x double> %3, i32 128)
345 ret <256 x double> %5
348 ; Function Attrs: nounwind readnone
349 declare <256 x double> @llvm.ve.vl.pvfnmad.vvvvvl(<256 x double>, <256 x double>, <256 x double>, <256 x double>, i32)
351 ; Function Attrs: nounwind readnone
352 define fastcc <256 x double> @pvfnmad_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) {
353 ; CHECK-LABEL: pvfnmad_vsvvl:
355 ; CHECK-NEXT: lea %s1, 256
356 ; CHECK-NEXT: lvl %s1
357 ; CHECK-NEXT: pvfnmad %v0, %s0, %v0, %v1
358 ; CHECK-NEXT: b.l.t (, %s10)
359 %4 = tail call fast <256 x double> @llvm.ve.vl.pvfnmad.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 256)
360 ret <256 x double> %4
363 ; Function Attrs: nounwind readnone
364 declare <256 x double> @llvm.ve.vl.pvfnmad.vsvvl(i64, <256 x double>, <256 x double>, i32)
366 ; Function Attrs: nounwind readnone
367 define fastcc <256 x double> @pvfnmad_vsvvvl(i64 %0, <256 x double> %1, <256 x double> %2, <256 x double> %3) {
368 ; CHECK-LABEL: pvfnmad_vsvvvl:
370 ; CHECK-NEXT: lea %s1, 128
371 ; CHECK-NEXT: lvl %s1
372 ; CHECK-NEXT: pvfnmad %v2, %s0, %v0, %v1
373 ; CHECK-NEXT: lea %s16, 256
374 ; CHECK-NEXT: lvl %s16
375 ; CHECK-NEXT: vor %v0, (0)1, %v2
376 ; CHECK-NEXT: b.l.t (, %s10)
377 %5 = tail call fast <256 x double> @llvm.ve.vl.pvfnmad.vsvvvl(i64 %0, <256 x double> %1, <256 x double> %2, <256 x double> %3, i32 128)
378 ret <256 x double> %5
381 ; Function Attrs: nounwind readnone
382 declare <256 x double> @llvm.ve.vl.pvfnmad.vsvvvl(i64, <256 x double>, <256 x double>, <256 x double>, i32)
384 ; Function Attrs: nounwind readnone
385 define fastcc <256 x double> @pvfnmad_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
386 ; CHECK-LABEL: pvfnmad_vvsvl:
388 ; CHECK-NEXT: lea %s1, 256
389 ; CHECK-NEXT: lvl %s1
390 ; CHECK-NEXT: pvfnmad %v0, %v0, %s0, %v1
391 ; CHECK-NEXT: b.l.t (, %s10)
392 %4 = tail call fast <256 x double> @llvm.ve.vl.pvfnmad.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 256)
393 ret <256 x double> %4
396 ; Function Attrs: nounwind readnone
397 declare <256 x double> @llvm.ve.vl.pvfnmad.vvsvl(<256 x double>, i64, <256 x double>, i32)
399 ; Function Attrs: nounwind readnone
400 define fastcc <256 x double> @pvfnmad_vvsvvl(<256 x double> %0, i64 %1, <256 x double> %2, <256 x double> %3) {
401 ; CHECK-LABEL: pvfnmad_vvsvvl:
403 ; CHECK-NEXT: lea %s1, 128
404 ; CHECK-NEXT: lvl %s1
405 ; CHECK-NEXT: pvfnmad %v2, %v0, %s0, %v1
406 ; CHECK-NEXT: lea %s16, 256
407 ; CHECK-NEXT: lvl %s16
408 ; CHECK-NEXT: vor %v0, (0)1, %v2
409 ; CHECK-NEXT: b.l.t (, %s10)
410 %5 = tail call fast <256 x double> @llvm.ve.vl.pvfnmad.vvsvvl(<256 x double> %0, i64 %1, <256 x double> %2, <256 x double> %3, i32 128)
411 ret <256 x double> %5
414 ; Function Attrs: nounwind readnone
415 declare <256 x double> @llvm.ve.vl.pvfnmad.vvsvvl(<256 x double>, i64, <256 x double>, <256 x double>, i32)
417 ; Function Attrs: nounwind readnone
418 define fastcc <256 x double> @pvfnmad_vvvvMvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <512 x i1> %3, <256 x double> %4) {
419 ; CHECK-LABEL: pvfnmad_vvvvMvl:
421 ; CHECK-NEXT: lea %s0, 128
422 ; CHECK-NEXT: lvl %s0
423 ; CHECK-NEXT: pvfnmad %v3, %v0, %v1, %v2, %vm2
424 ; CHECK-NEXT: lea %s16, 256
425 ; CHECK-NEXT: lvl %s16
426 ; CHECK-NEXT: vor %v0, (0)1, %v3
427 ; CHECK-NEXT: b.l.t (, %s10)
428 %6 = tail call fast <256 x double> @llvm.ve.vl.pvfnmad.vvvvMvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, <512 x i1> %3, <256 x double> %4, i32 128)
429 ret <256 x double> %6
432 ; Function Attrs: nounwind readnone
433 declare <256 x double> @llvm.ve.vl.pvfnmad.vvvvMvl(<256 x double>, <256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32)
435 ; Function Attrs: nounwind readnone
436 define fastcc <256 x double> @pvfnmad_vsvvMvl(i64 %0, <256 x double> %1, <256 x double> %2, <512 x i1> %3, <256 x double> %4) {
437 ; CHECK-LABEL: pvfnmad_vsvvMvl:
439 ; CHECK-NEXT: lea %s1, 128
440 ; CHECK-NEXT: lvl %s1
441 ; CHECK-NEXT: pvfnmad %v2, %s0, %v0, %v1, %vm2
442 ; CHECK-NEXT: lea %s16, 256
443 ; CHECK-NEXT: lvl %s16
444 ; CHECK-NEXT: vor %v0, (0)1, %v2
445 ; CHECK-NEXT: b.l.t (, %s10)
446 %6 = tail call fast <256 x double> @llvm.ve.vl.pvfnmad.vsvvMvl(i64 %0, <256 x double> %1, <256 x double> %2, <512 x i1> %3, <256 x double> %4, i32 128)
447 ret <256 x double> %6
450 ; Function Attrs: nounwind readnone
451 declare <256 x double> @llvm.ve.vl.pvfnmad.vsvvMvl(i64, <256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32)
453 ; Function Attrs: nounwind readnone
454 define fastcc <256 x double> @pvfnmad_vvsvMvl(<256 x double> %0, i64 %1, <256 x double> %2, <512 x i1> %3, <256 x double> %4) {
455 ; CHECK-LABEL: pvfnmad_vvsvMvl:
457 ; CHECK-NEXT: lea %s1, 128
458 ; CHECK-NEXT: lvl %s1
459 ; CHECK-NEXT: pvfnmad %v2, %v0, %s0, %v1, %vm2
460 ; CHECK-NEXT: lea %s16, 256
461 ; CHECK-NEXT: lvl %s16
462 ; CHECK-NEXT: vor %v0, (0)1, %v2
463 ; CHECK-NEXT: b.l.t (, %s10)
464 %6 = tail call fast <256 x double> @llvm.ve.vl.pvfnmad.vvsvMvl(<256 x double> %0, i64 %1, <256 x double> %2, <512 x i1> %3, <256 x double> %4, i32 128)
465 ret <256 x double> %6
468 ; Function Attrs: nounwind readnone
469 declare <256 x double> @llvm.ve.vl.pvfnmad.vvsvMvl(<256 x double>, i64, <256 x double>, <512 x i1>, <256 x double>, i32)