1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
13 define <vscale x 1 x i8> @intrinsic_vcompress_vm_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
14 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i8_nxv1i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
17 ; CHECK-NEXT: vcompress.vm v8, v9, v0
20 %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
26 ret <vscale x 1 x i8> %a
29 declare <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8(
35 define <vscale x 2 x i8> @intrinsic_vcompress_vm_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
36 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i8_nxv2i8:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
39 ; CHECK-NEXT: vcompress.vm v8, v9, v0
42 %a = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8(
48 ret <vscale x 2 x i8> %a
51 declare <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8(
57 define <vscale x 4 x i8> @intrinsic_vcompress_vm_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
58 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i8_nxv4i8:
59 ; CHECK: # %bb.0: # %entry
60 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
61 ; CHECK-NEXT: vcompress.vm v8, v9, v0
64 %a = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8(
70 ret <vscale x 4 x i8> %a
73 declare <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8(
79 define <vscale x 8 x i8> @intrinsic_vcompress_vm_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
80 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i8_nxv8i8:
81 ; CHECK: # %bb.0: # %entry
82 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
83 ; CHECK-NEXT: vcompress.vm v8, v9, v0
86 %a = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8(
92 ret <vscale x 8 x i8> %a
95 declare <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8(
101 define <vscale x 16 x i8> @intrinsic_vcompress_vm_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
102 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i8_nxv16i8:
103 ; CHECK: # %bb.0: # %entry
104 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
105 ; CHECK-NEXT: vcompress.vm v8, v10, v0
108 %a = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8(
109 <vscale x 16 x i8> %0,
110 <vscale x 16 x i8> %1,
111 <vscale x 16 x i1> %2,
114 ret <vscale x 16 x i8> %a
117 declare <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8(
123 define <vscale x 32 x i8> @intrinsic_vcompress_vm_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
124 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i8_nxv32i8:
125 ; CHECK: # %bb.0: # %entry
126 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
127 ; CHECK-NEXT: vcompress.vm v8, v12, v0
130 %a = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8(
131 <vscale x 32 x i8> %0,
132 <vscale x 32 x i8> %1,
133 <vscale x 32 x i1> %2,
136 ret <vscale x 32 x i8> %a
139 declare <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8(
145 define <vscale x 64 x i8> @intrinsic_vcompress_vm_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
146 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv64i8_nxv64i8:
147 ; CHECK: # %bb.0: # %entry
148 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
149 ; CHECK-NEXT: vcompress.vm v8, v16, v0
152 %a = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8(
153 <vscale x 64 x i8> %0,
154 <vscale x 64 x i8> %1,
155 <vscale x 64 x i1> %2,
158 ret <vscale x 64 x i8> %a
161 declare <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16(
167 define <vscale x 1 x i16> @intrinsic_vcompress_vm_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
168 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i16_nxv1i16:
169 ; CHECK: # %bb.0: # %entry
170 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
171 ; CHECK-NEXT: vcompress.vm v8, v9, v0
174 %a = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16(
175 <vscale x 1 x i16> %0,
176 <vscale x 1 x i16> %1,
177 <vscale x 1 x i1> %2,
180 ret <vscale x 1 x i16> %a
183 declare <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16(
189 define <vscale x 2 x i16> @intrinsic_vcompress_vm_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
190 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i16_nxv2i16:
191 ; CHECK: # %bb.0: # %entry
192 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
193 ; CHECK-NEXT: vcompress.vm v8, v9, v0
196 %a = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16(
197 <vscale x 2 x i16> %0,
198 <vscale x 2 x i16> %1,
199 <vscale x 2 x i1> %2,
202 ret <vscale x 2 x i16> %a
205 declare <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16(
211 define <vscale x 4 x i16> @intrinsic_vcompress_vm_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
212 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i16_nxv4i16:
213 ; CHECK: # %bb.0: # %entry
214 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
215 ; CHECK-NEXT: vcompress.vm v8, v9, v0
218 %a = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16(
219 <vscale x 4 x i16> %0,
220 <vscale x 4 x i16> %1,
221 <vscale x 4 x i1> %2,
224 ret <vscale x 4 x i16> %a
227 declare <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16(
233 define <vscale x 8 x i16> @intrinsic_vcompress_vm_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
234 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i16_nxv8i16:
235 ; CHECK: # %bb.0: # %entry
236 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
237 ; CHECK-NEXT: vcompress.vm v8, v10, v0
240 %a = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16(
241 <vscale x 8 x i16> %0,
242 <vscale x 8 x i16> %1,
243 <vscale x 8 x i1> %2,
246 ret <vscale x 8 x i16> %a
249 declare <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16(
255 define <vscale x 16 x i16> @intrinsic_vcompress_vm_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
256 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i16_nxv16i16:
257 ; CHECK: # %bb.0: # %entry
258 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
259 ; CHECK-NEXT: vcompress.vm v8, v12, v0
262 %a = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16(
263 <vscale x 16 x i16> %0,
264 <vscale x 16 x i16> %1,
265 <vscale x 16 x i1> %2,
268 ret <vscale x 16 x i16> %a
271 declare <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16(
277 define <vscale x 32 x i16> @intrinsic_vcompress_vm_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
278 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i16_nxv32i16:
279 ; CHECK: # %bb.0: # %entry
280 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma
281 ; CHECK-NEXT: vcompress.vm v8, v16, v0
284 %a = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16(
285 <vscale x 32 x i16> %0,
286 <vscale x 32 x i16> %1,
287 <vscale x 32 x i1> %2,
290 ret <vscale x 32 x i16> %a
293 declare <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32(
299 define <vscale x 1 x i32> @intrinsic_vcompress_vm_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
300 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i32_nxv1i32:
301 ; CHECK: # %bb.0: # %entry
302 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
303 ; CHECK-NEXT: vcompress.vm v8, v9, v0
306 %a = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32(
307 <vscale x 1 x i32> %0,
308 <vscale x 1 x i32> %1,
309 <vscale x 1 x i1> %2,
312 ret <vscale x 1 x i32> %a
315 declare <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32(
321 define <vscale x 2 x i32> @intrinsic_vcompress_vm_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
322 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i32_nxv2i32:
323 ; CHECK: # %bb.0: # %entry
324 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
325 ; CHECK-NEXT: vcompress.vm v8, v9, v0
328 %a = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32(
329 <vscale x 2 x i32> %0,
330 <vscale x 2 x i32> %1,
331 <vscale x 2 x i1> %2,
334 ret <vscale x 2 x i32> %a
337 declare <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32(
343 define <vscale x 4 x i32> @intrinsic_vcompress_vm_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
344 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i32_nxv4i32:
345 ; CHECK: # %bb.0: # %entry
346 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
347 ; CHECK-NEXT: vcompress.vm v8, v10, v0
350 %a = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32(
351 <vscale x 4 x i32> %0,
352 <vscale x 4 x i32> %1,
353 <vscale x 4 x i1> %2,
356 ret <vscale x 4 x i32> %a
359 declare <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32(
365 define <vscale x 8 x i32> @intrinsic_vcompress_vm_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
366 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i32_nxv8i32:
367 ; CHECK: # %bb.0: # %entry
368 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
369 ; CHECK-NEXT: vcompress.vm v8, v12, v0
372 %a = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32(
373 <vscale x 8 x i32> %0,
374 <vscale x 8 x i32> %1,
375 <vscale x 8 x i1> %2,
378 ret <vscale x 8 x i32> %a
381 declare <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32(
387 define <vscale x 16 x i32> @intrinsic_vcompress_vm_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
388 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i32_nxv16i32:
389 ; CHECK: # %bb.0: # %entry
390 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
391 ; CHECK-NEXT: vcompress.vm v8, v16, v0
394 %a = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32(
395 <vscale x 16 x i32> %0,
396 <vscale x 16 x i32> %1,
397 <vscale x 16 x i1> %2,
400 ret <vscale x 16 x i32> %a
403 declare <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
409 define <vscale x 1 x i64> @intrinsic_vcompress_vm_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
410 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i64_nxv1i64:
411 ; CHECK: # %bb.0: # %entry
412 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
413 ; CHECK-NEXT: vcompress.vm v8, v9, v0
416 %a = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
417 <vscale x 1 x i64> %0,
418 <vscale x 1 x i64> %1,
419 <vscale x 1 x i1> %2,
422 ret <vscale x 1 x i64> %a
425 declare <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
431 define <vscale x 2 x i64> @intrinsic_vcompress_vm_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
432 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i64_nxv2i64:
433 ; CHECK: # %bb.0: # %entry
434 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
435 ; CHECK-NEXT: vcompress.vm v8, v10, v0
438 %a = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
439 <vscale x 2 x i64> %0,
440 <vscale x 2 x i64> %1,
441 <vscale x 2 x i1> %2,
444 ret <vscale x 2 x i64> %a
447 declare <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
453 define <vscale x 4 x i64> @intrinsic_vcompress_vm_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
454 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i64_nxv4i64:
455 ; CHECK: # %bb.0: # %entry
456 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
457 ; CHECK-NEXT: vcompress.vm v8, v12, v0
460 %a = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
461 <vscale x 4 x i64> %0,
462 <vscale x 4 x i64> %1,
463 <vscale x 4 x i1> %2,
466 ret <vscale x 4 x i64> %a
469 declare <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
475 define <vscale x 8 x i64> @intrinsic_vcompress_vm_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
476 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i64_nxv8i64:
477 ; CHECK: # %bb.0: # %entry
478 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
479 ; CHECK-NEXT: vcompress.vm v8, v16, v0
482 %a = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
483 <vscale x 8 x i64> %0,
484 <vscale x 8 x i64> %1,
485 <vscale x 8 x i1> %2,
488 ret <vscale x 8 x i64> %a
491 declare <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
497 define <vscale x 1 x half> @intrinsic_vcompress_vm_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
498 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f16_nxv1f16:
499 ; CHECK: # %bb.0: # %entry
500 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
501 ; CHECK-NEXT: vcompress.vm v8, v9, v0
504 %a = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
505 <vscale x 1 x half> %0,
506 <vscale x 1 x half> %1,
507 <vscale x 1 x i1> %2,
510 ret <vscale x 1 x half> %a
513 declare <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16(
519 define <vscale x 2 x half> @intrinsic_vcompress_vm_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
520 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f16_nxv2f16:
521 ; CHECK: # %bb.0: # %entry
522 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
523 ; CHECK-NEXT: vcompress.vm v8, v9, v0
526 %a = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16(
527 <vscale x 2 x half> %0,
528 <vscale x 2 x half> %1,
529 <vscale x 2 x i1> %2,
532 ret <vscale x 2 x half> %a
535 declare <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16(
541 define <vscale x 4 x half> @intrinsic_vcompress_vm_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
542 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f16_nxv4f16:
543 ; CHECK: # %bb.0: # %entry
544 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
545 ; CHECK-NEXT: vcompress.vm v8, v9, v0
548 %a = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16(
549 <vscale x 4 x half> %0,
550 <vscale x 4 x half> %1,
551 <vscale x 4 x i1> %2,
554 ret <vscale x 4 x half> %a
557 declare <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16(
563 define <vscale x 8 x half> @intrinsic_vcompress_vm_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
564 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f16_nxv8f16:
565 ; CHECK: # %bb.0: # %entry
566 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
567 ; CHECK-NEXT: vcompress.vm v8, v10, v0
570 %a = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16(
571 <vscale x 8 x half> %0,
572 <vscale x 8 x half> %1,
573 <vscale x 8 x i1> %2,
576 ret <vscale x 8 x half> %a
579 declare <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16(
580 <vscale x 16 x half>,
581 <vscale x 16 x half>,
585 define <vscale x 16 x half> @intrinsic_vcompress_vm_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
586 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f16_nxv16f16:
587 ; CHECK: # %bb.0: # %entry
588 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
589 ; CHECK-NEXT: vcompress.vm v8, v12, v0
592 %a = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16(
593 <vscale x 16 x half> %0,
594 <vscale x 16 x half> %1,
595 <vscale x 16 x i1> %2,
598 ret <vscale x 16 x half> %a
601 declare <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16(
602 <vscale x 32 x half>,
603 <vscale x 32 x half>,
607 define <vscale x 32 x half> @intrinsic_vcompress_vm_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
608 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32f16_nxv32f16:
609 ; CHECK: # %bb.0: # %entry
610 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma
611 ; CHECK-NEXT: vcompress.vm v8, v16, v0
614 %a = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16(
615 <vscale x 32 x half> %0,
616 <vscale x 32 x half> %1,
617 <vscale x 32 x i1> %2,
620 ret <vscale x 32 x half> %a
623 declare <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32(
624 <vscale x 1 x float>,
625 <vscale x 1 x float>,
629 define <vscale x 1 x float> @intrinsic_vcompress_vm_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
630 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f32_nxv1f32:
631 ; CHECK: # %bb.0: # %entry
632 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
633 ; CHECK-NEXT: vcompress.vm v8, v9, v0
636 %a = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32(
637 <vscale x 1 x float> %0,
638 <vscale x 1 x float> %1,
639 <vscale x 1 x i1> %2,
642 ret <vscale x 1 x float> %a
645 declare <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32(
646 <vscale x 2 x float>,
647 <vscale x 2 x float>,
651 define <vscale x 2 x float> @intrinsic_vcompress_vm_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
652 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f32_nxv2f32:
653 ; CHECK: # %bb.0: # %entry
654 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
655 ; CHECK-NEXT: vcompress.vm v8, v9, v0
658 %a = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32(
659 <vscale x 2 x float> %0,
660 <vscale x 2 x float> %1,
661 <vscale x 2 x i1> %2,
664 ret <vscale x 2 x float> %a
667 declare <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32(
668 <vscale x 4 x float>,
669 <vscale x 4 x float>,
673 define <vscale x 4 x float> @intrinsic_vcompress_vm_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
674 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f32_nxv4f32:
675 ; CHECK: # %bb.0: # %entry
676 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
677 ; CHECK-NEXT: vcompress.vm v8, v10, v0
680 %a = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32(
681 <vscale x 4 x float> %0,
682 <vscale x 4 x float> %1,
683 <vscale x 4 x i1> %2,
686 ret <vscale x 4 x float> %a
689 declare <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32(
690 <vscale x 8 x float>,
691 <vscale x 8 x float>,
695 define <vscale x 8 x float> @intrinsic_vcompress_vm_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
696 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f32_nxv8f32:
697 ; CHECK: # %bb.0: # %entry
698 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
699 ; CHECK-NEXT: vcompress.vm v8, v12, v0
702 %a = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32(
703 <vscale x 8 x float> %0,
704 <vscale x 8 x float> %1,
705 <vscale x 8 x i1> %2,
708 ret <vscale x 8 x float> %a
711 declare <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32(
712 <vscale x 16 x float>,
713 <vscale x 16 x float>,
717 define <vscale x 16 x float> @intrinsic_vcompress_vm_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
718 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f32_nxv16f32:
719 ; CHECK: # %bb.0: # %entry
720 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
721 ; CHECK-NEXT: vcompress.vm v8, v16, v0
724 %a = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32(
725 <vscale x 16 x float> %0,
726 <vscale x 16 x float> %1,
727 <vscale x 16 x i1> %2,
730 ret <vscale x 16 x float> %a
733 declare <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
734 <vscale x 1 x double>,
735 <vscale x 1 x double>,
739 define <vscale x 1 x double> @intrinsic_vcompress_vm_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
740 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f64_nxv1f64:
741 ; CHECK: # %bb.0: # %entry
742 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
743 ; CHECK-NEXT: vcompress.vm v8, v9, v0
746 %a = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
747 <vscale x 1 x double> %0,
748 <vscale x 1 x double> %1,
749 <vscale x 1 x i1> %2,
752 ret <vscale x 1 x double> %a
755 declare <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
756 <vscale x 2 x double>,
757 <vscale x 2 x double>,
761 define <vscale x 2 x double> @intrinsic_vcompress_vm_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
762 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f64_nxv2f64:
763 ; CHECK: # %bb.0: # %entry
764 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
765 ; CHECK-NEXT: vcompress.vm v8, v10, v0
768 %a = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
769 <vscale x 2 x double> %0,
770 <vscale x 2 x double> %1,
771 <vscale x 2 x i1> %2,
774 ret <vscale x 2 x double> %a
777 declare <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
778 <vscale x 4 x double>,
779 <vscale x 4 x double>,
783 define <vscale x 4 x double> @intrinsic_vcompress_vm_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
784 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f64_nxv4f64:
785 ; CHECK: # %bb.0: # %entry
786 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
787 ; CHECK-NEXT: vcompress.vm v8, v12, v0
790 %a = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
791 <vscale x 4 x double> %0,
792 <vscale x 4 x double> %1,
793 <vscale x 4 x i1> %2,
796 ret <vscale x 4 x double> %a
799 declare <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
800 <vscale x 8 x double>,
801 <vscale x 8 x double>,
805 define <vscale x 8 x double> @intrinsic_vcompress_vm_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
806 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f64_nxv8f64:
807 ; CHECK: # %bb.0: # %entry
808 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
809 ; CHECK-NEXT: vcompress.vm v8, v16, v0
812 %a = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
813 <vscale x 8 x double> %0,
814 <vscale x 8 x double> %1,
815 <vscale x 8 x i1> %2,
818 ret <vscale x 8 x double> %a