1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16(
14 define <vscale x 1 x half> @intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
15 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16:
16 ; CHECK: # %bb.0: # %entry
17 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
18 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
21 %a = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16(
22 <vscale x 1 x half> undef,
23 <vscale x 1 x half> %0,
24 <vscale x 1 x half> %1,
28 ret <vscale x 1 x half> %a
31 declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
38 define <vscale x 1 x half> @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
39 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
42 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
45 %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
46 <vscale x 1 x half> undef,
47 <vscale x 1 x half> %0,
52 ret <vscale x 1 x half> %a
55 declare <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16(
62 define <vscale x 2 x half> @intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
63 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16:
64 ; CHECK: # %bb.0: # %entry
65 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
66 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
69 %a = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16(
70 <vscale x 2 x half> undef,
71 <vscale x 2 x half> %0,
72 <vscale x 2 x half> %1,
76 ret <vscale x 2 x half> %a
79 declare <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
86 define <vscale x 2 x half> @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
87 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
90 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
93 %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
94 <vscale x 2 x half> undef,
95 <vscale x 2 x half> %0,
100 ret <vscale x 2 x half> %a
103 declare <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16(
110 define <vscale x 4 x half> @intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
111 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16:
112 ; CHECK: # %bb.0: # %entry
113 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
114 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
117 %a = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16(
118 <vscale x 4 x half> undef,
119 <vscale x 4 x half> %0,
120 <vscale x 4 x half> %1,
121 <vscale x 4 x i1> %2,
124 ret <vscale x 4 x half> %a
127 declare <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
134 define <vscale x 4 x half> @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
135 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16:
136 ; CHECK: # %bb.0: # %entry
137 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
138 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
141 %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
142 <vscale x 4 x half> undef,
143 <vscale x 4 x half> %0,
145 <vscale x 4 x i1> %2,
148 ret <vscale x 4 x half> %a
151 declare <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16(
158 define <vscale x 8 x half> @intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
159 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16:
160 ; CHECK: # %bb.0: # %entry
161 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
162 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
165 %a = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16(
166 <vscale x 8 x half> undef,
167 <vscale x 8 x half> %0,
168 <vscale x 8 x half> %1,
169 <vscale x 8 x i1> %2,
172 ret <vscale x 8 x half> %a
175 declare <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
182 define <vscale x 8 x half> @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
183 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
186 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
189 %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
190 <vscale x 8 x half> undef,
191 <vscale x 8 x half> %0,
193 <vscale x 8 x i1> %2,
196 ret <vscale x 8 x half> %a
199 declare <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16(
200 <vscale x 16 x half>,
201 <vscale x 16 x half>,
202 <vscale x 16 x half>,
206 define <vscale x 16 x half> @intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
207 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16:
208 ; CHECK: # %bb.0: # %entry
209 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
210 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
213 %a = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16(
214 <vscale x 16 x half> undef,
215 <vscale x 16 x half> %0,
216 <vscale x 16 x half> %1,
217 <vscale x 16 x i1> %2,
220 ret <vscale x 16 x half> %a
223 declare <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
224 <vscale x 16 x half>,
225 <vscale x 16 x half>,
230 define <vscale x 16 x half> @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
231 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
234 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
237 %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
238 <vscale x 16 x half> undef,
239 <vscale x 16 x half> %0,
241 <vscale x 16 x i1> %2,
244 ret <vscale x 16 x half> %a
247 declare <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16(
248 <vscale x 32 x half>,
249 <vscale x 32 x half>,
250 <vscale x 32 x half>,
254 define <vscale x 32 x half> @intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
255 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16:
256 ; CHECK: # %bb.0: # %entry
257 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
258 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
261 %a = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16(
262 <vscale x 32 x half> undef,
263 <vscale x 32 x half> %0,
264 <vscale x 32 x half> %1,
265 <vscale x 32 x i1> %2,
268 ret <vscale x 32 x half> %a
271 declare <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
272 <vscale x 32 x half>,
273 <vscale x 32 x half>,
278 define <vscale x 32 x half> @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
279 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16:
280 ; CHECK: # %bb.0: # %entry
281 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
282 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
285 %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
286 <vscale x 32 x half> undef,
287 <vscale x 32 x half> %0,
289 <vscale x 32 x i1> %2,
292 ret <vscale x 32 x half> %a
295 declare <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32(
296 <vscale x 1 x float>,
297 <vscale x 1 x float>,
298 <vscale x 1 x float>,
302 define <vscale x 1 x float> @intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
303 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32:
304 ; CHECK: # %bb.0: # %entry
305 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
306 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
309 %a = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32(
310 <vscale x 1 x float> undef,
311 <vscale x 1 x float> %0,
312 <vscale x 1 x float> %1,
313 <vscale x 1 x i1> %2,
316 ret <vscale x 1 x float> %a
319 declare <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
320 <vscale x 1 x float>,
321 <vscale x 1 x float>,
326 define <vscale x 1 x float> @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
327 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32:
328 ; CHECK: # %bb.0: # %entry
329 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
330 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
333 %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
334 <vscale x 1 x float> undef,
335 <vscale x 1 x float> %0,
337 <vscale x 1 x i1> %2,
340 ret <vscale x 1 x float> %a
343 declare <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(
344 <vscale x 2 x float>,
345 <vscale x 2 x float>,
346 <vscale x 2 x float>,
350 define <vscale x 2 x float> @intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
351 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32:
352 ; CHECK: # %bb.0: # %entry
353 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
354 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
357 %a = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(
358 <vscale x 2 x float> undef,
359 <vscale x 2 x float> %0,
360 <vscale x 2 x float> %1,
361 <vscale x 2 x i1> %2,
364 ret <vscale x 2 x float> %a
367 declare <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
368 <vscale x 2 x float>,
369 <vscale x 2 x float>,
374 define <vscale x 2 x float> @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
375 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32:
376 ; CHECK: # %bb.0: # %entry
377 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
378 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
381 %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
382 <vscale x 2 x float> undef,
383 <vscale x 2 x float> %0,
385 <vscale x 2 x i1> %2,
388 ret <vscale x 2 x float> %a
391 declare <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32(
392 <vscale x 4 x float>,
393 <vscale x 4 x float>,
394 <vscale x 4 x float>,
398 define <vscale x 4 x float> @intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
399 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32:
400 ; CHECK: # %bb.0: # %entry
401 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
402 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
405 %a = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32(
406 <vscale x 4 x float> undef,
407 <vscale x 4 x float> %0,
408 <vscale x 4 x float> %1,
409 <vscale x 4 x i1> %2,
412 ret <vscale x 4 x float> %a
415 declare <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
416 <vscale x 4 x float>,
417 <vscale x 4 x float>,
422 define <vscale x 4 x float> @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
423 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32:
424 ; CHECK: # %bb.0: # %entry
425 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
426 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
429 %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
430 <vscale x 4 x float> undef,
431 <vscale x 4 x float> %0,
433 <vscale x 4 x i1> %2,
436 ret <vscale x 4 x float> %a
439 declare <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32(
440 <vscale x 8 x float>,
441 <vscale x 8 x float>,
442 <vscale x 8 x float>,
446 define <vscale x 8 x float> @intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
447 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32:
448 ; CHECK: # %bb.0: # %entry
449 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
450 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
453 %a = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32(
454 <vscale x 8 x float> undef,
455 <vscale x 8 x float> %0,
456 <vscale x 8 x float> %1,
457 <vscale x 8 x i1> %2,
460 ret <vscale x 8 x float> %a
463 declare <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
464 <vscale x 8 x float>,
465 <vscale x 8 x float>,
470 define <vscale x 8 x float> @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
471 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32:
472 ; CHECK: # %bb.0: # %entry
473 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
474 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
477 %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
478 <vscale x 8 x float> undef,
479 <vscale x 8 x float> %0,
481 <vscale x 8 x i1> %2,
484 ret <vscale x 8 x float> %a
487 declare <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32(
488 <vscale x 16 x float>,
489 <vscale x 16 x float>,
490 <vscale x 16 x float>,
494 define <vscale x 16 x float> @intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
495 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32:
496 ; CHECK: # %bb.0: # %entry
497 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
498 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
501 %a = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32(
502 <vscale x 16 x float> undef,
503 <vscale x 16 x float> %0,
504 <vscale x 16 x float> %1,
505 <vscale x 16 x i1> %2,
508 ret <vscale x 16 x float> %a
511 declare <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
512 <vscale x 16 x float>,
513 <vscale x 16 x float>,
518 define <vscale x 16 x float> @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
519 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32:
520 ; CHECK: # %bb.0: # %entry
521 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
522 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
525 %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
526 <vscale x 16 x float> undef,
527 <vscale x 16 x float> %0,
529 <vscale x 16 x i1> %2,
532 ret <vscale x 16 x float> %a
535 declare <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64(
536 <vscale x 1 x double>,
537 <vscale x 1 x double>,
538 <vscale x 1 x double>,
542 define <vscale x 1 x double> @intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
543 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64:
544 ; CHECK: # %bb.0: # %entry
545 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
546 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
549 %a = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64(
550 <vscale x 1 x double> undef,
551 <vscale x 1 x double> %0,
552 <vscale x 1 x double> %1,
553 <vscale x 1 x i1> %2,
556 ret <vscale x 1 x double> %a
559 declare <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
560 <vscale x 1 x double>,
561 <vscale x 1 x double>,
566 define <vscale x 1 x double> @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
567 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64:
568 ; CHECK: # %bb.0: # %entry
569 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
570 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
573 %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
574 <vscale x 1 x double> undef,
575 <vscale x 1 x double> %0,
577 <vscale x 1 x i1> %2,
580 ret <vscale x 1 x double> %a
583 declare <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64(
584 <vscale x 2 x double>,
585 <vscale x 2 x double>,
586 <vscale x 2 x double>,
590 define <vscale x 2 x double> @intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
591 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64:
592 ; CHECK: # %bb.0: # %entry
593 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
594 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
597 %a = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64(
598 <vscale x 2 x double> undef,
599 <vscale x 2 x double> %0,
600 <vscale x 2 x double> %1,
601 <vscale x 2 x i1> %2,
604 ret <vscale x 2 x double> %a
607 declare <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
608 <vscale x 2 x double>,
609 <vscale x 2 x double>,
614 define <vscale x 2 x double> @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
615 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64:
616 ; CHECK: # %bb.0: # %entry
617 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
618 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
621 %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
622 <vscale x 2 x double> undef,
623 <vscale x 2 x double> %0,
625 <vscale x 2 x i1> %2,
628 ret <vscale x 2 x double> %a
631 declare <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64(
632 <vscale x 4 x double>,
633 <vscale x 4 x double>,
634 <vscale x 4 x double>,
638 define <vscale x 4 x double> @intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
639 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64:
640 ; CHECK: # %bb.0: # %entry
641 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
642 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
645 %a = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64(
646 <vscale x 4 x double> undef,
647 <vscale x 4 x double> %0,
648 <vscale x 4 x double> %1,
649 <vscale x 4 x i1> %2,
652 ret <vscale x 4 x double> %a
655 declare <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
656 <vscale x 4 x double>,
657 <vscale x 4 x double>,
662 define <vscale x 4 x double> @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
663 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64:
664 ; CHECK: # %bb.0: # %entry
665 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
666 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
669 %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
670 <vscale x 4 x double> undef,
671 <vscale x 4 x double> %0,
673 <vscale x 4 x i1> %2,
676 ret <vscale x 4 x double> %a
679 declare <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64(
680 <vscale x 8 x double>,
681 <vscale x 8 x double>,
682 <vscale x 8 x double>,
686 define <vscale x 8 x double> @intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
687 ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64:
688 ; CHECK: # %bb.0: # %entry
689 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
690 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
693 %a = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64(
694 <vscale x 8 x double> undef,
695 <vscale x 8 x double> %0,
696 <vscale x 8 x double> %1,
697 <vscale x 8 x i1> %2,
700 ret <vscale x 8 x double> %a
703 declare <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
704 <vscale x 8 x double>,
705 <vscale x 8 x double>,
710 define <vscale x 8 x double> @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
711 ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64:
712 ; CHECK: # %bb.0: # %entry
713 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
714 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
717 %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
718 <vscale x 8 x double> undef,
719 <vscale x 8 x double> %0,
721 <vscale x 8 x i1> %2,
724 ret <vscale x 8 x double> %a
727 define <vscale x 1 x half> @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
728 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16:
729 ; CHECK: # %bb.0: # %entry
730 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
731 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
734 %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
735 <vscale x 1 x half> undef,
736 <vscale x 1 x half> %0,
737 half zeroinitializer,
738 <vscale x 1 x i1> %1,
741 ret <vscale x 1 x half> %a
744 define <vscale x 2 x half> @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
745 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16:
746 ; CHECK: # %bb.0: # %entry
747 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
748 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
751 %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
752 <vscale x 2 x half> undef,
753 <vscale x 2 x half> %0,
754 half zeroinitializer,
755 <vscale x 2 x i1> %1,
758 ret <vscale x 2 x half> %a
761 define <vscale x 4 x half> @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
762 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16:
763 ; CHECK: # %bb.0: # %entry
764 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
765 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
768 %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
769 <vscale x 4 x half> undef,
770 <vscale x 4 x half> %0,
771 half zeroinitializer,
772 <vscale x 4 x i1> %1,
775 ret <vscale x 4 x half> %a
778 define <vscale x 8 x half> @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
779 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16:
780 ; CHECK: # %bb.0: # %entry
781 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
782 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
785 %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
786 <vscale x 8 x half> undef,
787 <vscale x 8 x half> %0,
788 half zeroinitializer,
789 <vscale x 8 x i1> %1,
792 ret <vscale x 8 x half> %a
795 define <vscale x 16 x half> @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
796 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16:
797 ; CHECK: # %bb.0: # %entry
798 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
799 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
802 %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
803 <vscale x 16 x half> undef,
804 <vscale x 16 x half> %0,
805 half zeroinitializer,
806 <vscale x 16 x i1> %1,
809 ret <vscale x 16 x half> %a
812 define <vscale x 32 x half> @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
813 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16:
814 ; CHECK: # %bb.0: # %entry
815 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
816 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
819 %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
820 <vscale x 32 x half> undef,
821 <vscale x 32 x half> %0,
822 half zeroinitializer,
823 <vscale x 32 x i1> %1,
826 ret <vscale x 32 x half> %a
829 define <vscale x 1 x float> @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
830 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32:
831 ; CHECK: # %bb.0: # %entry
832 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
833 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
836 %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
837 <vscale x 1 x float> undef,
838 <vscale x 1 x float> %0,
839 float zeroinitializer,
840 <vscale x 1 x i1> %1,
843 ret <vscale x 1 x float> %a
846 define <vscale x 2 x float> @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
847 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32:
848 ; CHECK: # %bb.0: # %entry
849 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
850 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
853 %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
854 <vscale x 2 x float> undef,
855 <vscale x 2 x float> %0,
856 float zeroinitializer,
857 <vscale x 2 x i1> %1,
860 ret <vscale x 2 x float> %a
863 define <vscale x 4 x float> @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
864 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32:
865 ; CHECK: # %bb.0: # %entry
866 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
867 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
870 %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
871 <vscale x 4 x float> undef,
872 <vscale x 4 x float> %0,
873 float zeroinitializer,
874 <vscale x 4 x i1> %1,
877 ret <vscale x 4 x float> %a
880 define <vscale x 8 x float> @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
881 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32:
882 ; CHECK: # %bb.0: # %entry
883 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
884 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
887 %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
888 <vscale x 8 x float> undef,
889 <vscale x 8 x float> %0,
890 float zeroinitializer,
891 <vscale x 8 x i1> %1,
894 ret <vscale x 8 x float> %a
897 define <vscale x 16 x float> @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
898 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32:
899 ; CHECK: # %bb.0: # %entry
900 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
901 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
904 %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
905 <vscale x 16 x float> undef,
906 <vscale x 16 x float> %0,
907 float zeroinitializer,
908 <vscale x 16 x i1> %1,
911 ret <vscale x 16 x float> %a
914 define <vscale x 1 x double> @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
915 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64:
916 ; CHECK: # %bb.0: # %entry
917 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
918 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
921 %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
922 <vscale x 1 x double> undef,
923 <vscale x 1 x double> %0,
924 double zeroinitializer,
925 <vscale x 1 x i1> %1,
928 ret <vscale x 1 x double> %a
931 define <vscale x 2 x double> @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
932 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64:
933 ; CHECK: # %bb.0: # %entry
934 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
935 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
938 %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
939 <vscale x 2 x double> undef,
940 <vscale x 2 x double> %0,
941 double zeroinitializer,
942 <vscale x 2 x i1> %1,
945 ret <vscale x 2 x double> %a
948 define <vscale x 4 x double> @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
949 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64:
950 ; CHECK: # %bb.0: # %entry
951 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
952 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
955 %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
956 <vscale x 4 x double> undef,
957 <vscale x 4 x double> %0,
958 double zeroinitializer,
959 <vscale x 4 x i1> %1,
962 ret <vscale x 4 x double> %a
965 define <vscale x 8 x double> @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
966 ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64:
967 ; CHECK: # %bb.0: # %entry
968 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
969 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
972 %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
973 <vscale x 8 x double> undef,
974 <vscale x 8 x double> %0,
975 double zeroinitializer,
976 <vscale x 8 x i1> %1,
979 ret <vscale x 8 x double> %a