1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
13 define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
17 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
20 %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
21 <vscale x 1 x half> undef,
22 <vscale x 1 x half> %0,
23 <vscale x 1 x half> %1,
26 ret <vscale x 1 x half> %a
29 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
37 define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
41 ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
44 %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
45 <vscale x 1 x half> %0,
46 <vscale x 1 x half> %1,
47 <vscale x 1 x half> %2,
51 ret <vscale x 1 x half> %a
54 declare <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16(
60 define <vscale x 2 x half> @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
61 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16:
62 ; CHECK: # %bb.0: # %entry
63 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
64 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
67 %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16(
68 <vscale x 2 x half> undef,
69 <vscale x 2 x half> %0,
70 <vscale x 2 x half> %1,
73 ret <vscale x 2 x half> %a
76 declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16(
84 define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
88 ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
91 %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16(
92 <vscale x 2 x half> %0,
93 <vscale x 2 x half> %1,
94 <vscale x 2 x half> %2,
98 ret <vscale x 2 x half> %a
101 declare <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16(
107 define <vscale x 4 x half> @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
108 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
111 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
114 %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16(
115 <vscale x 4 x half> undef,
116 <vscale x 4 x half> %0,
117 <vscale x 4 x half> %1,
120 ret <vscale x 4 x half> %a
123 declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16(
131 define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16:
133 ; CHECK: # %bb.0: # %entry
134 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
135 ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
138 %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16(
139 <vscale x 4 x half> %0,
140 <vscale x 4 x half> %1,
141 <vscale x 4 x half> %2,
142 <vscale x 4 x i1> %3,
145 ret <vscale x 4 x half> %a
148 declare <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16(
154 define <vscale x 8 x half> @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
155 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16:
156 ; CHECK: # %bb.0: # %entry
157 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
158 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
161 %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16(
162 <vscale x 8 x half> undef,
163 <vscale x 8 x half> %0,
164 <vscale x 8 x half> %1,
167 ret <vscale x 8 x half> %a
170 declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16(
178 define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
182 ; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t
185 %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16(
186 <vscale x 8 x half> %0,
187 <vscale x 8 x half> %1,
188 <vscale x 8 x half> %2,
189 <vscale x 8 x i1> %3,
192 ret <vscale x 8 x half> %a
195 declare <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16(
196 <vscale x 16 x half>,
197 <vscale x 16 x half>,
198 <vscale x 16 x half>,
201 define <vscale x 16 x half> @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
202 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16:
203 ; CHECK: # %bb.0: # %entry
204 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
205 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12
208 %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16(
209 <vscale x 16 x half> undef,
210 <vscale x 16 x half> %0,
211 <vscale x 16 x half> %1,
214 ret <vscale x 16 x half> %a
217 declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16(
218 <vscale x 16 x half>,
219 <vscale x 16 x half>,
220 <vscale x 16 x half>,
225 define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16:
227 ; CHECK: # %bb.0: # %entry
228 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
229 ; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t
232 %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16(
233 <vscale x 16 x half> %0,
234 <vscale x 16 x half> %1,
235 <vscale x 16 x half> %2,
236 <vscale x 16 x i1> %3,
239 ret <vscale x 16 x half> %a
242 declare <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16(
243 <vscale x 32 x half>,
244 <vscale x 32 x half>,
245 <vscale x 32 x half>,
248 define <vscale x 32 x half> @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
249 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16:
250 ; CHECK: # %bb.0: # %entry
251 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
252 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16
255 %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16(
256 <vscale x 32 x half> undef,
257 <vscale x 32 x half> %0,
258 <vscale x 32 x half> %1,
261 ret <vscale x 32 x half> %a
264 declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16(
265 <vscale x 32 x half>,
266 <vscale x 32 x half>,
267 <vscale x 32 x half>,
272 define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vl8re16.v v24, (a0)
276 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
277 ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t
280 %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16(
281 <vscale x 32 x half> %0,
282 <vscale x 32 x half> %1,
283 <vscale x 32 x half> %2,
284 <vscale x 32 x i1> %3,
287 ret <vscale x 32 x half> %a
290 declare <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32(
291 <vscale x 1 x float>,
292 <vscale x 1 x float>,
293 <vscale x 1 x float>,
296 define <vscale x 1 x float> @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
297 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32:
298 ; CHECK: # %bb.0: # %entry
299 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
300 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
303 %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32(
304 <vscale x 1 x float> undef,
305 <vscale x 1 x float> %0,
306 <vscale x 1 x float> %1,
309 ret <vscale x 1 x float> %a
312 declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32(
313 <vscale x 1 x float>,
314 <vscale x 1 x float>,
315 <vscale x 1 x float>,
320 define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
321 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32:
322 ; CHECK: # %bb.0: # %entry
323 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
324 ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
327 %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32(
328 <vscale x 1 x float> %0,
329 <vscale x 1 x float> %1,
330 <vscale x 1 x float> %2,
331 <vscale x 1 x i1> %3,
334 ret <vscale x 1 x float> %a
337 declare <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32(
338 <vscale x 2 x float>,
339 <vscale x 2 x float>,
340 <vscale x 2 x float>,
343 define <vscale x 2 x float> @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
344 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32:
345 ; CHECK: # %bb.0: # %entry
346 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
347 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
350 %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32(
351 <vscale x 2 x float> undef,
352 <vscale x 2 x float> %0,
353 <vscale x 2 x float> %1,
356 ret <vscale x 2 x float> %a
359 declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32(
360 <vscale x 2 x float>,
361 <vscale x 2 x float>,
362 <vscale x 2 x float>,
367 define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
368 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32:
369 ; CHECK: # %bb.0: # %entry
370 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
371 ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
374 %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32(
375 <vscale x 2 x float> %0,
376 <vscale x 2 x float> %1,
377 <vscale x 2 x float> %2,
378 <vscale x 2 x i1> %3,
381 ret <vscale x 2 x float> %a
384 declare <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(
385 <vscale x 4 x float>,
386 <vscale x 4 x float>,
387 <vscale x 4 x float>,
390 define <vscale x 4 x float> @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
391 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32:
392 ; CHECK: # %bb.0: # %entry
393 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
394 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
397 %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(
398 <vscale x 4 x float> undef,
399 <vscale x 4 x float> %0,
400 <vscale x 4 x float> %1,
403 ret <vscale x 4 x float> %a
406 declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32(
407 <vscale x 4 x float>,
408 <vscale x 4 x float>,
409 <vscale x 4 x float>,
414 define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
415 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32:
416 ; CHECK: # %bb.0: # %entry
417 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
418 ; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t
421 %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32(
422 <vscale x 4 x float> %0,
423 <vscale x 4 x float> %1,
424 <vscale x 4 x float> %2,
425 <vscale x 4 x i1> %3,
428 ret <vscale x 4 x float> %a
431 declare <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32(
432 <vscale x 8 x float>,
433 <vscale x 8 x float>,
434 <vscale x 8 x float>,
437 define <vscale x 8 x float> @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
438 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32:
439 ; CHECK: # %bb.0: # %entry
440 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
441 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12
444 %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32(
445 <vscale x 8 x float> undef,
446 <vscale x 8 x float> %0,
447 <vscale x 8 x float> %1,
450 ret <vscale x 8 x float> %a
453 declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32(
454 <vscale x 8 x float>,
455 <vscale x 8 x float>,
456 <vscale x 8 x float>,
461 define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
462 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32:
463 ; CHECK: # %bb.0: # %entry
464 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
465 ; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t
468 %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32(
469 <vscale x 8 x float> %0,
470 <vscale x 8 x float> %1,
471 <vscale x 8 x float> %2,
472 <vscale x 8 x i1> %3,
475 ret <vscale x 8 x float> %a
478 declare <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32(
479 <vscale x 16 x float>,
480 <vscale x 16 x float>,
481 <vscale x 16 x float>,
484 define <vscale x 16 x float> @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
485 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32:
486 ; CHECK: # %bb.0: # %entry
487 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
488 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16
491 %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32(
492 <vscale x 16 x float> undef,
493 <vscale x 16 x float> %0,
494 <vscale x 16 x float> %1,
497 ret <vscale x 16 x float> %a
500 declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32(
501 <vscale x 16 x float>,
502 <vscale x 16 x float>,
503 <vscale x 16 x float>,
508 define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
509 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32:
510 ; CHECK: # %bb.0: # %entry
511 ; CHECK-NEXT: vl8re32.v v24, (a0)
512 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
513 ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t
516 %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32(
517 <vscale x 16 x float> %0,
518 <vscale x 16 x float> %1,
519 <vscale x 16 x float> %2,
520 <vscale x 16 x i1> %3,
523 ret <vscale x 16 x float> %a
526 declare <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64(
527 <vscale x 1 x double>,
528 <vscale x 1 x double>,
529 <vscale x 1 x double>,
532 define <vscale x 1 x double> @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
533 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64:
534 ; CHECK: # %bb.0: # %entry
535 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
536 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
539 %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64(
540 <vscale x 1 x double> undef,
541 <vscale x 1 x double> %0,
542 <vscale x 1 x double> %1,
545 ret <vscale x 1 x double> %a
548 declare <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64(
549 <vscale x 1 x double>,
550 <vscale x 1 x double>,
551 <vscale x 1 x double>,
556 define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
557 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64:
558 ; CHECK: # %bb.0: # %entry
559 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
560 ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
563 %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64(
564 <vscale x 1 x double> %0,
565 <vscale x 1 x double> %1,
566 <vscale x 1 x double> %2,
567 <vscale x 1 x i1> %3,
570 ret <vscale x 1 x double> %a
573 declare <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64(
574 <vscale x 2 x double>,
575 <vscale x 2 x double>,
576 <vscale x 2 x double>,
579 define <vscale x 2 x double> @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
580 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64:
581 ; CHECK: # %bb.0: # %entry
582 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
583 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
586 %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64(
587 <vscale x 2 x double> undef,
588 <vscale x 2 x double> %0,
589 <vscale x 2 x double> %1,
592 ret <vscale x 2 x double> %a
595 declare <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64(
596 <vscale x 2 x double>,
597 <vscale x 2 x double>,
598 <vscale x 2 x double>,
603 define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
604 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64:
605 ; CHECK: # %bb.0: # %entry
606 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
607 ; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t
610 %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64(
611 <vscale x 2 x double> %0,
612 <vscale x 2 x double> %1,
613 <vscale x 2 x double> %2,
614 <vscale x 2 x i1> %3,
617 ret <vscale x 2 x double> %a
620 declare <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64(
621 <vscale x 4 x double>,
622 <vscale x 4 x double>,
623 <vscale x 4 x double>,
626 define <vscale x 4 x double> @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
627 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64:
628 ; CHECK: # %bb.0: # %entry
629 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
630 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12
633 %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64(
634 <vscale x 4 x double> undef,
635 <vscale x 4 x double> %0,
636 <vscale x 4 x double> %1,
639 ret <vscale x 4 x double> %a
642 declare <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64(
643 <vscale x 4 x double>,
644 <vscale x 4 x double>,
645 <vscale x 4 x double>,
650 define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
651 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64:
652 ; CHECK: # %bb.0: # %entry
653 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
654 ; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t
657 %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64(
658 <vscale x 4 x double> %0,
659 <vscale x 4 x double> %1,
660 <vscale x 4 x double> %2,
661 <vscale x 4 x i1> %3,
664 ret <vscale x 4 x double> %a
667 declare <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64(
668 <vscale x 8 x double>,
669 <vscale x 8 x double>,
670 <vscale x 8 x double>,
673 define <vscale x 8 x double> @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
674 ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64:
675 ; CHECK: # %bb.0: # %entry
676 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
677 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16
680 %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64(
681 <vscale x 8 x double> undef,
682 <vscale x 8 x double> %0,
683 <vscale x 8 x double> %1,
686 ret <vscale x 8 x double> %a
689 declare <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64(
690 <vscale x 8 x double>,
691 <vscale x 8 x double>,
692 <vscale x 8 x double>,
697 define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
698 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64:
699 ; CHECK: # %bb.0: # %entry
700 ; CHECK-NEXT: vl8re64.v v24, (a0)
701 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
702 ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t
705 %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64(
706 <vscale x 8 x double> %0,
707 <vscale x 8 x double> %1,
708 <vscale x 8 x double> %2,
709 <vscale x 8 x i1> %3,
712 ret <vscale x 8 x double> %a
715 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
721 define <vscale x 1 x half> @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
722 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16:
723 ; CHECK: # %bb.0: # %entry
724 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
725 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
728 %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
729 <vscale x 1 x half> undef,
730 <vscale x 1 x half> %0,
734 ret <vscale x 1 x half> %a
737 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
745 define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
746 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16:
747 ; CHECK: # %bb.0: # %entry
748 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
749 ; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
752 %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
753 <vscale x 1 x half> %0,
754 <vscale x 1 x half> %1,
756 <vscale x 1 x i1> %3,
759 ret <vscale x 1 x half> %a
762 declare <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
768 define <vscale x 2 x half> @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
769 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16:
770 ; CHECK: # %bb.0: # %entry
771 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
772 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
775 %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
776 <vscale x 2 x half> undef,
777 <vscale x 2 x half> %0,
781 ret <vscale x 2 x half> %a
784 declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
792 define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
793 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16:
794 ; CHECK: # %bb.0: # %entry
795 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
796 ; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
799 %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
800 <vscale x 2 x half> %0,
801 <vscale x 2 x half> %1,
803 <vscale x 2 x i1> %3,
806 ret <vscale x 2 x half> %a
809 declare <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
815 define <vscale x 4 x half> @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
816 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16:
817 ; CHECK: # %bb.0: # %entry
818 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
819 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
822 %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
823 <vscale x 4 x half> undef,
824 <vscale x 4 x half> %0,
828 ret <vscale x 4 x half> %a
831 declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
839 define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
840 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16:
841 ; CHECK: # %bb.0: # %entry
842 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
843 ; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
846 %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
847 <vscale x 4 x half> %0,
848 <vscale x 4 x half> %1,
850 <vscale x 4 x i1> %3,
853 ret <vscale x 4 x half> %a
856 declare <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
862 define <vscale x 8 x half> @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
863 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16:
864 ; CHECK: # %bb.0: # %entry
865 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
866 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
869 %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
870 <vscale x 8 x half> undef,
871 <vscale x 8 x half> %0,
875 ret <vscale x 8 x half> %a
878 declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
886 define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
887 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16:
888 ; CHECK: # %bb.0: # %entry
889 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
890 ; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t
893 %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
894 <vscale x 8 x half> %0,
895 <vscale x 8 x half> %1,
897 <vscale x 8 x i1> %3,
900 ret <vscale x 8 x half> %a
903 declare <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
904 <vscale x 16 x half>,
905 <vscale x 16 x half>,
909 define <vscale x 16 x half> @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
910 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16:
911 ; CHECK: # %bb.0: # %entry
912 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
913 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
916 %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
917 <vscale x 16 x half> undef,
918 <vscale x 16 x half> %0,
922 ret <vscale x 16 x half> %a
925 declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
926 <vscale x 16 x half>,
927 <vscale x 16 x half>,
933 define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
934 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16:
935 ; CHECK: # %bb.0: # %entry
936 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
937 ; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t
940 %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
941 <vscale x 16 x half> %0,
942 <vscale x 16 x half> %1,
944 <vscale x 16 x i1> %3,
947 ret <vscale x 16 x half> %a
950 declare <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
951 <vscale x 32 x half>,
952 <vscale x 32 x half>,
956 define <vscale x 32 x half> @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
957 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16:
958 ; CHECK: # %bb.0: # %entry
959 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
960 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
963 %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
964 <vscale x 32 x half> undef,
965 <vscale x 32 x half> %0,
969 ret <vscale x 32 x half> %a
972 declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
973 <vscale x 32 x half>,
974 <vscale x 32 x half>,
980 define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
981 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16:
982 ; CHECK: # %bb.0: # %entry
983 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
984 ; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t
987 %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
988 <vscale x 32 x half> %0,
989 <vscale x 32 x half> %1,
991 <vscale x 32 x i1> %3,
994 ret <vscale x 32 x half> %a
997 declare <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
998 <vscale x 1 x float>,
999 <vscale x 1 x float>,
1003 define <vscale x 1 x float> @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
1004 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32:
1005 ; CHECK: # %bb.0: # %entry
1006 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1007 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1010 %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
1011 <vscale x 1 x float> undef,
1012 <vscale x 1 x float> %0,
1016 ret <vscale x 1 x float> %a
1019 declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
1020 <vscale x 1 x float>,
1021 <vscale x 1 x float>,
1027 define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1028 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32:
1029 ; CHECK: # %bb.0: # %entry
1030 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1031 ; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
1034 %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
1035 <vscale x 1 x float> %0,
1036 <vscale x 1 x float> %1,
1038 <vscale x 1 x i1> %3,
1041 ret <vscale x 1 x float> %a
1044 declare <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
1045 <vscale x 2 x float>,
1046 <vscale x 2 x float>,
1050 define <vscale x 2 x float> @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
1051 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32:
1052 ; CHECK: # %bb.0: # %entry
1053 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1054 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1057 %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
1058 <vscale x 2 x float> undef,
1059 <vscale x 2 x float> %0,
1063 ret <vscale x 2 x float> %a
1066 declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
1067 <vscale x 2 x float>,
1068 <vscale x 2 x float>,
1074 define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1075 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32:
1076 ; CHECK: # %bb.0: # %entry
1077 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1078 ; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
1081 %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
1082 <vscale x 2 x float> %0,
1083 <vscale x 2 x float> %1,
1085 <vscale x 2 x i1> %3,
1088 ret <vscale x 2 x float> %a
1091 declare <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
1092 <vscale x 4 x float>,
1093 <vscale x 4 x float>,
1097 define <vscale x 4 x float> @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
1098 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32:
1099 ; CHECK: # %bb.0: # %entry
1100 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1101 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1104 %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
1105 <vscale x 4 x float> undef,
1106 <vscale x 4 x float> %0,
1110 ret <vscale x 4 x float> %a
1113 declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
1114 <vscale x 4 x float>,
1115 <vscale x 4 x float>,
1121 define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1122 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32:
1123 ; CHECK: # %bb.0: # %entry
1124 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1125 ; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t
1128 %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
1129 <vscale x 4 x float> %0,
1130 <vscale x 4 x float> %1,
1132 <vscale x 4 x i1> %3,
1135 ret <vscale x 4 x float> %a
1138 declare <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
1139 <vscale x 8 x float>,
1140 <vscale x 8 x float>,
1144 define <vscale x 8 x float> @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
1145 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32:
1146 ; CHECK: # %bb.0: # %entry
1147 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1148 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1151 %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
1152 <vscale x 8 x float> undef,
1153 <vscale x 8 x float> %0,
1157 ret <vscale x 8 x float> %a
1160 declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
1161 <vscale x 8 x float>,
1162 <vscale x 8 x float>,
1168 define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1169 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32:
1170 ; CHECK: # %bb.0: # %entry
1171 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1172 ; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t
1175 %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
1176 <vscale x 8 x float> %0,
1177 <vscale x 8 x float> %1,
1179 <vscale x 8 x i1> %3,
1182 ret <vscale x 8 x float> %a
1185 declare <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
1186 <vscale x 16 x float>,
1187 <vscale x 16 x float>,
1191 define <vscale x 16 x float> @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
1192 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32:
1193 ; CHECK: # %bb.0: # %entry
1194 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1195 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1198 %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
1199 <vscale x 16 x float> undef,
1200 <vscale x 16 x float> %0,
1204 ret <vscale x 16 x float> %a
1207 declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
1208 <vscale x 16 x float>,
1209 <vscale x 16 x float>,
1215 define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1216 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32:
1217 ; CHECK: # %bb.0: # %entry
1218 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
1219 ; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t
1222 %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
1223 <vscale x 16 x float> %0,
1224 <vscale x 16 x float> %1,
1226 <vscale x 16 x i1> %3,
1229 ret <vscale x 16 x float> %a
1232 declare <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
1233 <vscale x 1 x double>,
1234 <vscale x 1 x double>,
1238 define <vscale x 1 x double> @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
1239 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64:
1240 ; CHECK: # %bb.0: # %entry
1241 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1242 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1245 %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
1246 <vscale x 1 x double> undef,
1247 <vscale x 1 x double> %0,
1251 ret <vscale x 1 x double> %a
1254 declare <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
1255 <vscale x 1 x double>,
1256 <vscale x 1 x double>,
1262 define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1263 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64:
1264 ; CHECK: # %bb.0: # %entry
1265 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
1266 ; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t
1269 %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
1270 <vscale x 1 x double> %0,
1271 <vscale x 1 x double> %1,
1273 <vscale x 1 x i1> %3,
1276 ret <vscale x 1 x double> %a
1279 declare <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
1280 <vscale x 2 x double>,
1281 <vscale x 2 x double>,
1285 define <vscale x 2 x double> @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
1286 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64:
1287 ; CHECK: # %bb.0: # %entry
1288 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1289 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1292 %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
1293 <vscale x 2 x double> undef,
1294 <vscale x 2 x double> %0,
1298 ret <vscale x 2 x double> %a
1301 declare <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
1302 <vscale x 2 x double>,
1303 <vscale x 2 x double>,
1309 define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1310 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64:
1311 ; CHECK: # %bb.0: # %entry
1312 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
1313 ; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t
1316 %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
1317 <vscale x 2 x double> %0,
1318 <vscale x 2 x double> %1,
1320 <vscale x 2 x i1> %3,
1323 ret <vscale x 2 x double> %a
1326 declare <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
1327 <vscale x 4 x double>,
1328 <vscale x 4 x double>,
1332 define <vscale x 4 x double> @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
1333 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64:
1334 ; CHECK: # %bb.0: # %entry
1335 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1336 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1339 %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
1340 <vscale x 4 x double> undef,
1341 <vscale x 4 x double> %0,
1345 ret <vscale x 4 x double> %a
1348 declare <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
1349 <vscale x 4 x double>,
1350 <vscale x 4 x double>,
1356 define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1357 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64:
1358 ; CHECK: # %bb.0: # %entry
1359 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1360 ; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t
1363 %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
1364 <vscale x 4 x double> %0,
1365 <vscale x 4 x double> %1,
1367 <vscale x 4 x i1> %3,
1370 ret <vscale x 4 x double> %a
1373 declare <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
1374 <vscale x 8 x double>,
1375 <vscale x 8 x double>,
1379 define <vscale x 8 x double> @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
1380 ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64:
1381 ; CHECK: # %bb.0: # %entry
1382 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1383 ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0
1386 %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
1387 <vscale x 8 x double> undef,
1388 <vscale x 8 x double> %0,
1392 ret <vscale x 8 x double> %a
1395 declare <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
1396 <vscale x 8 x double>,
1397 <vscale x 8 x double>,
1403 define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1404 ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64:
1405 ; CHECK: # %bb.0: # %entry
1406 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
1407 ; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t
1410 %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
1411 <vscale x 8 x double> %0,
1412 <vscale x 8 x double> %1,
1414 <vscale x 8 x i1> %3,
1417 ret <vscale x 8 x double> %a