1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2 ; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin < %s | FileCheck %s
3 ; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin < %s | FileCheck %s
4 ; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin < %s | FileCheck %s
5 ; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin < %s | FileCheck %s
7 ; Vector compress for i8 type
9 define <vscale x 1 x i8> @vector_compress_nxv1i8(<vscale x 1 x i8> %data, <vscale x 1 x i1> %mask) {
10 ; CHECK-LABEL: vector_compress_nxv1i8:
12 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
13 ; CHECK-NEXT: vcompress.vm v9, v8, v0
14 ; CHECK-NEXT: vmv1r.v v8, v9
16 %ret = call <vscale x 1 x i8> @llvm.experimental.vector.compress.nxv1i8(<vscale x 1 x i8> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i8> undef)
17 ret <vscale x 1 x i8> %ret
20 define <vscale x 1 x i8> @vector_compress_nxv1i8_passthru(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %data, <vscale x 1 x i1> %mask) {
21 ; CHECK-LABEL: vector_compress_nxv1i8_passthru:
23 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
24 ; CHECK-NEXT: vcompress.vm v8, v9, v0
26 %ret = call <vscale x 1 x i8> @llvm.experimental.vector.compress.nxv1i8(<vscale x 1 x i8> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i8> %passthru)
27 ret <vscale x 1 x i8> %ret
30 define <vscale x 2 x i8> @vector_compress_nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i1> %mask) {
31 ; CHECK-LABEL: vector_compress_nxv2i8:
33 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
34 ; CHECK-NEXT: vcompress.vm v9, v8, v0
35 ; CHECK-NEXT: vmv1r.v v8, v9
37 %ret = call <vscale x 2 x i8> @llvm.experimental.vector.compress.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
38 ret <vscale x 2 x i8> %ret
41 define <vscale x 2 x i8> @vector_compress_nxv2i8_passthru(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %data, <vscale x 2 x i1> %mask) {
42 ; CHECK-LABEL: vector_compress_nxv2i8_passthru:
44 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
45 ; CHECK-NEXT: vcompress.vm v8, v9, v0
47 %ret = call <vscale x 2 x i8> @llvm.experimental.vector.compress.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %passthru)
48 ret <vscale x 2 x i8> %ret
51 define <vscale x 4 x i8> @vector_compress_nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i1> %mask) {
52 ; CHECK-LABEL: vector_compress_nxv4i8:
54 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
55 ; CHECK-NEXT: vcompress.vm v9, v8, v0
56 ; CHECK-NEXT: vmv1r.v v8, v9
58 %ret = call <vscale x 4 x i8> @llvm.experimental.vector.compress.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
59 ret <vscale x 4 x i8> %ret
62 define <vscale x 4 x i8> @vector_compress_nxv4i8_passthru(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %data, <vscale x 4 x i1> %mask) {
63 ; CHECK-LABEL: vector_compress_nxv4i8_passthru:
65 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
66 ; CHECK-NEXT: vcompress.vm v8, v9, v0
68 %ret = call <vscale x 4 x i8> @llvm.experimental.vector.compress.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i8> %passthru)
69 ret <vscale x 4 x i8> %ret
72 define <vscale x 8 x i8> @vector_compress_nxv8i8(<vscale x 8 x i8> %data, <vscale x 8 x i1> %mask) {
73 ; CHECK-LABEL: vector_compress_nxv8i8:
75 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
76 ; CHECK-NEXT: vcompress.vm v9, v8, v0
77 ; CHECK-NEXT: vmv.v.v v8, v9
79 %ret = call <vscale x 8 x i8> @llvm.experimental.vector.compress.nxv8i8(<vscale x 8 x i8> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
80 ret <vscale x 8 x i8> %ret
83 define <vscale x 8 x i8> @vector_compress_nxv8i8_passthru(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %data, <vscale x 8 x i1> %mask) {
84 ; CHECK-LABEL: vector_compress_nxv8i8_passthru:
86 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, tu, ma
87 ; CHECK-NEXT: vcompress.vm v8, v9, v0
89 %ret = call <vscale x 8 x i8> @llvm.experimental.vector.compress.nxv8i8(<vscale x 8 x i8> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i8> %passthru)
90 ret <vscale x 8 x i8> %ret
93 define <vscale x 16 x i8> @vector_compress_nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask) {
94 ; CHECK-LABEL: vector_compress_nxv16i8:
96 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
97 ; CHECK-NEXT: vcompress.vm v10, v8, v0
98 ; CHECK-NEXT: vmv.v.v v8, v10
100 %ret = call <vscale x 16 x i8> @llvm.experimental.vector.compress.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
101 ret <vscale x 16 x i8> %ret
104 define <vscale x 16 x i8> @vector_compress_nxv16i8_passthru(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %data, <vscale x 16 x i1> %mask) {
105 ; CHECK-LABEL: vector_compress_nxv16i8_passthru:
107 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, tu, ma
108 ; CHECK-NEXT: vcompress.vm v8, v10, v0
110 %ret = call <vscale x 16 x i8> @llvm.experimental.vector.compress.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i8> %passthru)
111 ret <vscale x 16 x i8> %ret
114 define <vscale x 32 x i8> @vector_compress_nxv32i8(<vscale x 32 x i8> %data, <vscale x 32 x i1> %mask) {
115 ; CHECK-LABEL: vector_compress_nxv32i8:
117 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
118 ; CHECK-NEXT: vcompress.vm v12, v8, v0
119 ; CHECK-NEXT: vmv.v.v v8, v12
121 %ret = call <vscale x 32 x i8> @llvm.experimental.vector.compress.nxv32i8(<vscale x 32 x i8> %data, <vscale x 32 x i1> %mask, <vscale x 32 x i8> undef)
122 ret <vscale x 32 x i8> %ret
125 define <vscale x 32 x i8> @vector_compress_nxv32i8_passthru(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %data, <vscale x 32 x i1> %mask) {
126 ; CHECK-LABEL: vector_compress_nxv32i8_passthru:
128 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, tu, ma
129 ; CHECK-NEXT: vcompress.vm v8, v12, v0
131 %ret = call <vscale x 32 x i8> @llvm.experimental.vector.compress.nxv32i8(<vscale x 32 x i8> %data, <vscale x 32 x i1> %mask, <vscale x 32 x i8> %passthru)
132 ret <vscale x 32 x i8> %ret
135 define <vscale x 64 x i8> @vector_compress_nxv64i8(<vscale x 64 x i8> %data, <vscale x 64 x i1> %mask) {
136 ; CHECK-LABEL: vector_compress_nxv64i8:
138 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
139 ; CHECK-NEXT: vcompress.vm v16, v8, v0
140 ; CHECK-NEXT: vmv.v.v v8, v16
142 %ret = call <vscale x 64 x i8> @llvm.experimental.vector.compress.nxv64i8(<vscale x 64 x i8> %data, <vscale x 64 x i1> %mask, <vscale x 64 x i8> undef)
143 ret <vscale x 64 x i8> %ret
146 define <vscale x 64 x i8> @vector_compress_nxv64i8_passthru(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %data, <vscale x 64 x i1> %mask) {
147 ; CHECK-LABEL: vector_compress_nxv64i8_passthru:
149 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, tu, ma
150 ; CHECK-NEXT: vcompress.vm v8, v16, v0
152 %ret = call <vscale x 64 x i8> @llvm.experimental.vector.compress.nxv64i8(<vscale x 64 x i8> %data, <vscale x 64 x i1> %mask, <vscale x 64 x i8> %passthru)
153 ret <vscale x 64 x i8> %ret
156 ; Vector compress for i16 type
158 define <vscale x 1 x i16> @vector_compress_nxv1i16(<vscale x 1 x i16> %data, <vscale x 1 x i1> %mask) {
159 ; CHECK-LABEL: vector_compress_nxv1i16:
161 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
162 ; CHECK-NEXT: vcompress.vm v9, v8, v0
163 ; CHECK-NEXT: vmv1r.v v8, v9
165 %ret = call <vscale x 1 x i16> @llvm.experimental.vector.compress.nxv1i16(<vscale x 1 x i16> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i16> undef)
166 ret <vscale x 1 x i16> %ret
169 define <vscale x 1 x i16> @vector_compress_nxv1i16_passthru(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %data, <vscale x 1 x i1> %mask) {
170 ; CHECK-LABEL: vector_compress_nxv1i16_passthru:
172 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
173 ; CHECK-NEXT: vcompress.vm v8, v9, v0
175 %ret = call <vscale x 1 x i16> @llvm.experimental.vector.compress.nxv1i16(<vscale x 1 x i16> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i16> %passthru)
176 ret <vscale x 1 x i16> %ret
179 define <vscale x 2 x i16> @vector_compress_nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i1> %mask) {
180 ; CHECK-LABEL: vector_compress_nxv2i16:
182 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
183 ; CHECK-NEXT: vcompress.vm v9, v8, v0
184 ; CHECK-NEXT: vmv1r.v v8, v9
186 %ret = call <vscale x 2 x i16> @llvm.experimental.vector.compress.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
187 ret <vscale x 2 x i16> %ret
190 define <vscale x 2 x i16> @vector_compress_nxv2i16_passthru(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %data, <vscale x 2 x i1> %mask) {
191 ; CHECK-LABEL: vector_compress_nxv2i16_passthru:
193 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
194 ; CHECK-NEXT: vcompress.vm v8, v9, v0
196 %ret = call <vscale x 2 x i16> @llvm.experimental.vector.compress.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i16> %passthru)
197 ret <vscale x 2 x i16> %ret
200 define <vscale x 4 x i16> @vector_compress_nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x i1> %mask) {
201 ; CHECK-LABEL: vector_compress_nxv4i16:
203 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
204 ; CHECK-NEXT: vcompress.vm v9, v8, v0
205 ; CHECK-NEXT: vmv.v.v v8, v9
207 %ret = call <vscale x 4 x i16> @llvm.experimental.vector.compress.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
208 ret <vscale x 4 x i16> %ret
211 define <vscale x 4 x i16> @vector_compress_nxv4i16_passthru(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %data, <vscale x 4 x i1> %mask) {
212 ; CHECK-LABEL: vector_compress_nxv4i16_passthru:
214 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma
215 ; CHECK-NEXT: vcompress.vm v8, v9, v0
217 %ret = call <vscale x 4 x i16> @llvm.experimental.vector.compress.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i16> %passthru)
218 ret <vscale x 4 x i16> %ret
221 define <vscale x 8 x i16> @vector_compress_nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask) {
222 ; CHECK-LABEL: vector_compress_nxv8i16:
224 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
225 ; CHECK-NEXT: vcompress.vm v10, v8, v0
226 ; CHECK-NEXT: vmv.v.v v8, v10
228 %ret = call <vscale x 8 x i16> @llvm.experimental.vector.compress.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
229 ret <vscale x 8 x i16> %ret
232 define <vscale x 8 x i16> @vector_compress_nxv8i16_passthru(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %data, <vscale x 8 x i1> %mask) {
233 ; CHECK-LABEL: vector_compress_nxv8i16_passthru:
235 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, ma
236 ; CHECK-NEXT: vcompress.vm v8, v10, v0
238 %ret = call <vscale x 8 x i16> @llvm.experimental.vector.compress.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i16> %passthru)
239 ret <vscale x 8 x i16> %ret
242 define <vscale x 16 x i16> @vector_compress_nxv16i16(<vscale x 16 x i16> %data, <vscale x 16 x i1> %mask) {
243 ; CHECK-LABEL: vector_compress_nxv16i16:
245 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
246 ; CHECK-NEXT: vcompress.vm v12, v8, v0
247 ; CHECK-NEXT: vmv.v.v v8, v12
249 %ret = call <vscale x 16 x i16> @llvm.experimental.vector.compress.nxv16i16(<vscale x 16 x i16> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i16> undef)
250 ret <vscale x 16 x i16> %ret
253 define <vscale x 16 x i16> @vector_compress_nxv16i16_passthru(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %data, <vscale x 16 x i1> %mask) {
254 ; CHECK-LABEL: vector_compress_nxv16i16_passthru:
256 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, ma
257 ; CHECK-NEXT: vcompress.vm v8, v12, v0
259 %ret = call <vscale x 16 x i16> @llvm.experimental.vector.compress.nxv16i16(<vscale x 16 x i16> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i16> %passthru)
260 ret <vscale x 16 x i16> %ret
263 define <vscale x 32 x i16> @vector_compress_nxv32i16(<vscale x 32 x i16> %data, <vscale x 32 x i1> %mask) {
264 ; CHECK-LABEL: vector_compress_nxv32i16:
266 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
267 ; CHECK-NEXT: vcompress.vm v16, v8, v0
268 ; CHECK-NEXT: vmv.v.v v8, v16
270 %ret = call <vscale x 32 x i16> @llvm.experimental.vector.compress.nxv32i16(<vscale x 32 x i16> %data, <vscale x 32 x i1> %mask, <vscale x 32 x i16> undef)
271 ret <vscale x 32 x i16> %ret
274 define <vscale x 32 x i16> @vector_compress_nxv32i16_passthru(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %data, <vscale x 32 x i1> %mask) {
275 ; CHECK-LABEL: vector_compress_nxv32i16_passthru:
277 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, ma
278 ; CHECK-NEXT: vcompress.vm v8, v16, v0
280 %ret = call <vscale x 32 x i16> @llvm.experimental.vector.compress.nxv32i16(<vscale x 32 x i16> %data, <vscale x 32 x i1> %mask, <vscale x 32 x i16> %passthru)
281 ret <vscale x 32 x i16> %ret
284 ; Vector compress for i32 type
286 define <vscale x 1 x i32> @vector_compress_nxv1i32(<vscale x 1 x i32> %data, <vscale x 1 x i1> %mask) {
287 ; CHECK-LABEL: vector_compress_nxv1i32:
289 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
290 ; CHECK-NEXT: vcompress.vm v9, v8, v0
291 ; CHECK-NEXT: vmv1r.v v8, v9
293 %ret = call <vscale x 1 x i32> @llvm.experimental.vector.compress.nxv1i32(<vscale x 1 x i32> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i32> undef)
294 ret <vscale x 1 x i32> %ret
297 define <vscale x 1 x i32> @vector_compress_nxv1i32_passthru(<vscale x 1 x i32> %passthru, <vscale x 1 x i32> %data, <vscale x 1 x i1> %mask) {
298 ; CHECK-LABEL: vector_compress_nxv1i32_passthru:
300 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, ma
301 ; CHECK-NEXT: vcompress.vm v8, v9, v0
303 %ret = call <vscale x 1 x i32> @llvm.experimental.vector.compress.nxv1i32(<vscale x 1 x i32> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i32> %passthru)
304 ret <vscale x 1 x i32> %ret
307 define <vscale x 2 x i32> @vector_compress_nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i1> %mask) {
308 ; CHECK-LABEL: vector_compress_nxv2i32:
310 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
311 ; CHECK-NEXT: vcompress.vm v9, v8, v0
312 ; CHECK-NEXT: vmv.v.v v8, v9
314 %ret = call <vscale x 2 x i32> @llvm.experimental.vector.compress.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
315 ret <vscale x 2 x i32> %ret
318 define <vscale x 2 x i32> @vector_compress_nxv2i32_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %data, <vscale x 2 x i1> %mask) {
319 ; CHECK-LABEL: vector_compress_nxv2i32_passthru:
321 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma
322 ; CHECK-NEXT: vcompress.vm v8, v9, v0
324 %ret = call <vscale x 2 x i32> @llvm.experimental.vector.compress.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
325 ret <vscale x 2 x i32> %ret
328 define <vscale x 4 x i32> @vector_compress_nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask) {
329 ; CHECK-LABEL: vector_compress_nxv4i32:
331 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
332 ; CHECK-NEXT: vcompress.vm v10, v8, v0
333 ; CHECK-NEXT: vmv.v.v v8, v10
335 %ret = call <vscale x 4 x i32> @llvm.experimental.vector.compress.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
336 ret <vscale x 4 x i32> %ret
339 define <vscale x 4 x i32> @vector_compress_nxv4i32_passthru(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %data, <vscale x 4 x i1> %mask) {
340 ; CHECK-LABEL: vector_compress_nxv4i32_passthru:
342 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, ma
343 ; CHECK-NEXT: vcompress.vm v8, v10, v0
345 %ret = call <vscale x 4 x i32> @llvm.experimental.vector.compress.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %passthru)
346 ret <vscale x 4 x i32> %ret
349 define <vscale x 8 x i32> @vector_compress_nxv8i32(<vscale x 8 x i32> %data, <vscale x 8 x i1> %mask) {
350 ; CHECK-LABEL: vector_compress_nxv8i32:
352 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
353 ; CHECK-NEXT: vcompress.vm v12, v8, v0
354 ; CHECK-NEXT: vmv.v.v v8, v12
356 %ret = call <vscale x 8 x i32> @llvm.experimental.vector.compress.nxv8i32(<vscale x 8 x i32> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i32> undef)
357 ret <vscale x 8 x i32> %ret
360 define <vscale x 8 x i32> @vector_compress_nxv8i32_passthru(<vscale x 8 x i32> %passthru, <vscale x 8 x i32> %data, <vscale x 8 x i1> %mask) {
361 ; CHECK-LABEL: vector_compress_nxv8i32_passthru:
363 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, ma
364 ; CHECK-NEXT: vcompress.vm v8, v12, v0
366 %ret = call <vscale x 8 x i32> @llvm.experimental.vector.compress.nxv8i32(<vscale x 8 x i32> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i32> %passthru)
367 ret <vscale x 8 x i32> %ret
370 define <vscale x 16 x i32> @vector_compress_nxv16i32(<vscale x 16 x i32> %data, <vscale x 16 x i1> %mask) {
371 ; CHECK-LABEL: vector_compress_nxv16i32:
373 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
374 ; CHECK-NEXT: vcompress.vm v16, v8, v0
375 ; CHECK-NEXT: vmv.v.v v8, v16
377 %ret = call <vscale x 16 x i32> @llvm.experimental.vector.compress.nxv16i32(<vscale x 16 x i32> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i32> undef)
378 ret <vscale x 16 x i32> %ret
381 define <vscale x 16 x i32> @vector_compress_nxv16i32_passthru(<vscale x 16 x i32> %passthru, <vscale x 16 x i32> %data, <vscale x 16 x i1> %mask) {
382 ; CHECK-LABEL: vector_compress_nxv16i32_passthru:
384 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, ma
385 ; CHECK-NEXT: vcompress.vm v8, v16, v0
387 %ret = call <vscale x 16 x i32> @llvm.experimental.vector.compress.nxv16i32(<vscale x 16 x i32> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i32> %passthru)
388 ret <vscale x 16 x i32> %ret
391 ; Vector compress for i64 type
393 define <vscale x 1 x i64> @vector_compress_nxv1i64(<vscale x 1 x i64> %data, <vscale x 1 x i1> %mask) {
394 ; CHECK-LABEL: vector_compress_nxv1i64:
396 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
397 ; CHECK-NEXT: vcompress.vm v9, v8, v0
398 ; CHECK-NEXT: vmv.v.v v8, v9
400 %ret = call <vscale x 1 x i64> @llvm.experimental.vector.compress.nxv1i64(<vscale x 1 x i64> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i64> undef)
401 ret <vscale x 1 x i64> %ret
404 define <vscale x 1 x i64> @vector_compress_nxv1i64_passthru(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %data, <vscale x 1 x i1> %mask) {
405 ; CHECK-LABEL: vector_compress_nxv1i64_passthru:
407 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma
408 ; CHECK-NEXT: vcompress.vm v8, v9, v0
410 %ret = call <vscale x 1 x i64> @llvm.experimental.vector.compress.nxv1i64(<vscale x 1 x i64> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i64> %passthru)
411 ret <vscale x 1 x i64> %ret
414 define <vscale x 2 x i64> @vector_compress_nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask) {
415 ; CHECK-LABEL: vector_compress_nxv2i64:
417 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
418 ; CHECK-NEXT: vcompress.vm v10, v8, v0
419 ; CHECK-NEXT: vmv.v.v v8, v10
421 %ret = call <vscale x 2 x i64> @llvm.experimental.vector.compress.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
422 ret <vscale x 2 x i64> %ret
425 define <vscale x 2 x i64> @vector_compress_nxv2i64_passthru(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %data, <vscale x 2 x i1> %mask) {
426 ; CHECK-LABEL: vector_compress_nxv2i64_passthru:
428 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, ma
429 ; CHECK-NEXT: vcompress.vm v8, v10, v0
431 %ret = call <vscale x 2 x i64> @llvm.experimental.vector.compress.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i64> %passthru)
432 ret <vscale x 2 x i64> %ret
435 define <vscale x 4 x i64> @vector_compress_nxv4i64(<vscale x 4 x i64> %data, <vscale x 4 x i1> %mask) {
436 ; CHECK-LABEL: vector_compress_nxv4i64:
438 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
439 ; CHECK-NEXT: vcompress.vm v12, v8, v0
440 ; CHECK-NEXT: vmv.v.v v8, v12
442 %ret = call <vscale x 4 x i64> @llvm.experimental.vector.compress.nxv4i64(<vscale x 4 x i64> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i64> undef)
443 ret <vscale x 4 x i64> %ret
446 define <vscale x 4 x i64> @vector_compress_nxv4i64_passthru(<vscale x 4 x i64> %passthru, <vscale x 4 x i64> %data, <vscale x 4 x i1> %mask) {
447 ; CHECK-LABEL: vector_compress_nxv4i64_passthru:
449 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, ma
450 ; CHECK-NEXT: vcompress.vm v8, v12, v0
452 %ret = call <vscale x 4 x i64> @llvm.experimental.vector.compress.nxv4i64(<vscale x 4 x i64> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i64> %passthru)
453 ret <vscale x 4 x i64> %ret
456 define <vscale x 8 x i64> @vector_compress_nxv8i64(<vscale x 8 x i64> %data, <vscale x 8 x i1> %mask) {
457 ; CHECK-LABEL: vector_compress_nxv8i64:
459 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
460 ; CHECK-NEXT: vcompress.vm v16, v8, v0
461 ; CHECK-NEXT: vmv.v.v v8, v16
463 %ret = call <vscale x 8 x i64> @llvm.experimental.vector.compress.nxv8i64(<vscale x 8 x i64> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
464 ret <vscale x 8 x i64> %ret
467 define <vscale x 8 x i64> @vector_compress_nxv8i64_passthru(<vscale x 8 x i64> %passthru, <vscale x 8 x i64> %data, <vscale x 8 x i1> %mask) {
468 ; CHECK-LABEL: vector_compress_nxv8i64_passthru:
470 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, ma
471 ; CHECK-NEXT: vcompress.vm v8, v16, v0
473 %ret = call <vscale x 8 x i64> @llvm.experimental.vector.compress.nxv8i64(<vscale x 8 x i64> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i64> %passthru)
474 ret <vscale x 8 x i64> %ret
477 ; Vector compress for bf16 type
479 define <vscale x 1 x bfloat> @vector_compress_nxv1bf16(<vscale x 1 x bfloat> %data, <vscale x 1 x i1> %mask) {
480 ; CHECK-LABEL: vector_compress_nxv1bf16:
482 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
483 ; CHECK-NEXT: vcompress.vm v9, v8, v0
484 ; CHECK-NEXT: vmv1r.v v8, v9
486 %ret = call <vscale x 1 x bfloat> @llvm.experimental.vector.compress.nxv1bf16(<vscale x 1 x bfloat> %data, <vscale x 1 x i1> %mask, <vscale x 1 x bfloat> undef)
487 ret <vscale x 1 x bfloat> %ret
490 define <vscale x 1 x bfloat> @vector_compress_nxv1bf16_passthru(<vscale x 1 x bfloat> %passthru, <vscale x 1 x bfloat> %data, <vscale x 1 x i1> %mask) {
491 ; CHECK-LABEL: vector_compress_nxv1bf16_passthru:
493 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
494 ; CHECK-NEXT: vcompress.vm v8, v9, v0
496 %ret = call <vscale x 1 x bfloat> @llvm.experimental.vector.compress.nxv1bf16(<vscale x 1 x bfloat> %data, <vscale x 1 x i1> %mask, <vscale x 1 x bfloat> %passthru)
497 ret <vscale x 1 x bfloat> %ret
500 define <vscale x 2 x bfloat> @vector_compress_nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x i1> %mask) {
501 ; CHECK-LABEL: vector_compress_nxv2bf16:
503 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
504 ; CHECK-NEXT: vcompress.vm v9, v8, v0
505 ; CHECK-NEXT: vmv1r.v v8, v9
507 %ret = call <vscale x 2 x bfloat> @llvm.experimental.vector.compress.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
508 ret <vscale x 2 x bfloat> %ret
511 define <vscale x 2 x bfloat> @vector_compress_nxv2bf16_passthru(<vscale x 2 x bfloat> %passthru, <vscale x 2 x bfloat> %data, <vscale x 2 x i1> %mask) {
512 ; CHECK-LABEL: vector_compress_nxv2bf16_passthru:
514 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
515 ; CHECK-NEXT: vcompress.vm v8, v9, v0
517 %ret = call <vscale x 2 x bfloat> @llvm.experimental.vector.compress.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> %passthru)
518 ret <vscale x 2 x bfloat> %ret
521 define <vscale x 4 x bfloat> @vector_compress_nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x i1> %mask) {
522 ; CHECK-LABEL: vector_compress_nxv4bf16:
524 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
525 ; CHECK-NEXT: vcompress.vm v9, v8, v0
526 ; CHECK-NEXT: vmv.v.v v8, v9
528 %ret = call <vscale x 4 x bfloat> @llvm.experimental.vector.compress.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
529 ret <vscale x 4 x bfloat> %ret
532 define <vscale x 4 x bfloat> @vector_compress_nxv4bf16_passthru(<vscale x 4 x bfloat> %passthru, <vscale x 4 x bfloat> %data, <vscale x 4 x i1> %mask) {
533 ; CHECK-LABEL: vector_compress_nxv4bf16_passthru:
535 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma
536 ; CHECK-NEXT: vcompress.vm v8, v9, v0
538 %ret = call <vscale x 4 x bfloat> @llvm.experimental.vector.compress.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> %passthru)
539 ret <vscale x 4 x bfloat> %ret
542 define <vscale x 8 x bfloat> @vector_compress_nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %mask) {
543 ; CHECK-LABEL: vector_compress_nxv8bf16:
545 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
546 ; CHECK-NEXT: vcompress.vm v10, v8, v0
547 ; CHECK-NEXT: vmv.v.v v8, v10
549 %ret = call <vscale x 8 x bfloat> @llvm.experimental.vector.compress.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> undef)
550 ret <vscale x 8 x bfloat> %ret
553 define <vscale x 8 x bfloat> @vector_compress_nxv8bf16_passthru(<vscale x 8 x bfloat> %passthru, <vscale x 8 x bfloat> %data, <vscale x 8 x i1> %mask) {
554 ; CHECK-LABEL: vector_compress_nxv8bf16_passthru:
556 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, ma
557 ; CHECK-NEXT: vcompress.vm v8, v10, v0
559 %ret = call <vscale x 8 x bfloat> @llvm.experimental.vector.compress.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> %passthru)
560 ret <vscale x 8 x bfloat> %ret
563 define <vscale x 16 x bfloat> @vector_compress_nxv16bf16(<vscale x 16 x bfloat> %data, <vscale x 16 x i1> %mask) {
564 ; CHECK-LABEL: vector_compress_nxv16bf16:
566 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
567 ; CHECK-NEXT: vcompress.vm v12, v8, v0
568 ; CHECK-NEXT: vmv.v.v v8, v12
570 %ret = call <vscale x 16 x bfloat> @llvm.experimental.vector.compress.nxv16bf16(<vscale x 16 x bfloat> %data, <vscale x 16 x i1> %mask, <vscale x 16 x bfloat> undef)
571 ret <vscale x 16 x bfloat> %ret
574 define <vscale x 16 x bfloat> @vector_compress_nxv16bf16_passthru(<vscale x 16 x bfloat> %passthru, <vscale x 16 x bfloat> %data, <vscale x 16 x i1> %mask) {
575 ; CHECK-LABEL: vector_compress_nxv16bf16_passthru:
577 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, ma
578 ; CHECK-NEXT: vcompress.vm v8, v12, v0
580 %ret = call <vscale x 16 x bfloat> @llvm.experimental.vector.compress.nxv16bf16(<vscale x 16 x bfloat> %data, <vscale x 16 x i1> %mask, <vscale x 16 x bfloat> %passthru)
581 ret <vscale x 16 x bfloat> %ret
584 define <vscale x 32 x bfloat> @vector_compress_nxv32bf16(<vscale x 32 x bfloat> %data, <vscale x 32 x i1> %mask) {
585 ; CHECK-LABEL: vector_compress_nxv32bf16:
587 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
588 ; CHECK-NEXT: vcompress.vm v16, v8, v0
589 ; CHECK-NEXT: vmv.v.v v8, v16
591 %ret = call <vscale x 32 x bfloat> @llvm.experimental.vector.compress.nxv32bf16(<vscale x 32 x bfloat> %data, <vscale x 32 x i1> %mask, <vscale x 32 x bfloat> undef)
592 ret <vscale x 32 x bfloat> %ret
595 define <vscale x 32 x bfloat> @vector_compress_nxv32bf16_passthru(<vscale x 32 x bfloat> %passthru, <vscale x 32 x bfloat> %data, <vscale x 32 x i1> %mask) {
596 ; CHECK-LABEL: vector_compress_nxv32bf16_passthru:
598 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, ma
599 ; CHECK-NEXT: vcompress.vm v8, v16, v0
601 %ret = call <vscale x 32 x bfloat> @llvm.experimental.vector.compress.nxv32bf16(<vscale x 32 x bfloat> %data, <vscale x 32 x i1> %mask, <vscale x 32 x bfloat> %passthru)
602 ret <vscale x 32 x bfloat> %ret
605 ; Vector compress for f16 type
607 define <vscale x 1 x half> @vector_compress_nxv1f16(<vscale x 1 x half> %data, <vscale x 1 x i1> %mask) {
608 ; CHECK-LABEL: vector_compress_nxv1f16:
610 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
611 ; CHECK-NEXT: vcompress.vm v9, v8, v0
612 ; CHECK-NEXT: vmv1r.v v8, v9
614 %ret = call <vscale x 1 x half> @llvm.experimental.vector.compress.nxv1f16(<vscale x 1 x half> %data, <vscale x 1 x i1> %mask, <vscale x 1 x half> undef)
615 ret <vscale x 1 x half> %ret
618 define <vscale x 1 x half> @vector_compress_nxv1f16_passthru(<vscale x 1 x half> %passthru, <vscale x 1 x half> %data, <vscale x 1 x i1> %mask) {
619 ; CHECK-LABEL: vector_compress_nxv1f16_passthru:
621 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
622 ; CHECK-NEXT: vcompress.vm v8, v9, v0
624 %ret = call <vscale x 1 x half> @llvm.experimental.vector.compress.nxv1f16(<vscale x 1 x half> %data, <vscale x 1 x i1> %mask, <vscale x 1 x half> %passthru)
625 ret <vscale x 1 x half> %ret
628 define <vscale x 2 x half> @vector_compress_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x i1> %mask) {
629 ; CHECK-LABEL: vector_compress_nxv2f16:
631 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
632 ; CHECK-NEXT: vcompress.vm v9, v8, v0
633 ; CHECK-NEXT: vmv1r.v v8, v9
635 %ret = call <vscale x 2 x half> @llvm.experimental.vector.compress.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
636 ret <vscale x 2 x half> %ret
639 define <vscale x 2 x half> @vector_compress_nxv2f16_passthru(<vscale x 2 x half> %passthru, <vscale x 2 x half> %data, <vscale x 2 x i1> %mask) {
640 ; CHECK-LABEL: vector_compress_nxv2f16_passthru:
642 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
643 ; CHECK-NEXT: vcompress.vm v8, v9, v0
645 %ret = call <vscale x 2 x half> @llvm.experimental.vector.compress.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x i1> %mask, <vscale x 2 x half> %passthru)
646 ret <vscale x 2 x half> %ret
649 define <vscale x 4 x half> @vector_compress_nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x i1> %mask) {
650 ; CHECK-LABEL: vector_compress_nxv4f16:
652 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
653 ; CHECK-NEXT: vcompress.vm v9, v8, v0
654 ; CHECK-NEXT: vmv.v.v v8, v9
656 %ret = call <vscale x 4 x half> @llvm.experimental.vector.compress.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
657 ret <vscale x 4 x half> %ret
660 define <vscale x 4 x half> @vector_compress_nxv4f16_passthru(<vscale x 4 x half> %passthru, <vscale x 4 x half> %data, <vscale x 4 x i1> %mask) {
661 ; CHECK-LABEL: vector_compress_nxv4f16_passthru:
663 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma
664 ; CHECK-NEXT: vcompress.vm v8, v9, v0
666 %ret = call <vscale x 4 x half> @llvm.experimental.vector.compress.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x i1> %mask, <vscale x 4 x half> %passthru)
667 ret <vscale x 4 x half> %ret
670 define <vscale x 8 x half> @vector_compress_nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %mask) {
671 ; CHECK-LABEL: vector_compress_nxv8f16:
673 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
674 ; CHECK-NEXT: vcompress.vm v10, v8, v0
675 ; CHECK-NEXT: vmv.v.v v8, v10
677 %ret = call <vscale x 8 x half> @llvm.experimental.vector.compress.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
678 ret <vscale x 8 x half> %ret
681 define <vscale x 8 x half> @vector_compress_nxv8f16_passthru(<vscale x 8 x half> %passthru, <vscale x 8 x half> %data, <vscale x 8 x i1> %mask) {
682 ; CHECK-LABEL: vector_compress_nxv8f16_passthru:
684 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, ma
685 ; CHECK-NEXT: vcompress.vm v8, v10, v0
687 %ret = call <vscale x 8 x half> @llvm.experimental.vector.compress.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %mask, <vscale x 8 x half> %passthru)
688 ret <vscale x 8 x half> %ret
691 define <vscale x 16 x half> @vector_compress_nxv16f16(<vscale x 16 x half> %data, <vscale x 16 x i1> %mask) {
692 ; CHECK-LABEL: vector_compress_nxv16f16:
694 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
695 ; CHECK-NEXT: vcompress.vm v12, v8, v0
696 ; CHECK-NEXT: vmv.v.v v8, v12
698 %ret = call <vscale x 16 x half> @llvm.experimental.vector.compress.nxv16f16(<vscale x 16 x half> %data, <vscale x 16 x i1> %mask, <vscale x 16 x half> undef)
699 ret <vscale x 16 x half> %ret
702 define <vscale x 16 x half> @vector_compress_nxv16f16_passthru(<vscale x 16 x half> %passthru, <vscale x 16 x half> %data, <vscale x 16 x i1> %mask) {
703 ; CHECK-LABEL: vector_compress_nxv16f16_passthru:
705 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, ma
706 ; CHECK-NEXT: vcompress.vm v8, v12, v0
708 %ret = call <vscale x 16 x half> @llvm.experimental.vector.compress.nxv16f16(<vscale x 16 x half> %data, <vscale x 16 x i1> %mask, <vscale x 16 x half> %passthru)
709 ret <vscale x 16 x half> %ret
712 define <vscale x 32 x half> @vector_compress_nxv32f16(<vscale x 32 x half> %data, <vscale x 32 x i1> %mask) {
713 ; CHECK-LABEL: vector_compress_nxv32f16:
715 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
716 ; CHECK-NEXT: vcompress.vm v16, v8, v0
717 ; CHECK-NEXT: vmv.v.v v8, v16
719 %ret = call <vscale x 32 x half> @llvm.experimental.vector.compress.nxv32f16(<vscale x 32 x half> %data, <vscale x 32 x i1> %mask, <vscale x 32 x half> undef)
720 ret <vscale x 32 x half> %ret
723 define <vscale x 32 x half> @vector_compress_nxv32f16_passthru(<vscale x 32 x half> %passthru, <vscale x 32 x half> %data, <vscale x 32 x i1> %mask) {
724 ; CHECK-LABEL: vector_compress_nxv32f16_passthru:
726 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, ma
727 ; CHECK-NEXT: vcompress.vm v8, v16, v0
729 %ret = call <vscale x 32 x half> @llvm.experimental.vector.compress.nxv32f16(<vscale x 32 x half> %data, <vscale x 32 x i1> %mask, <vscale x 32 x half> %passthru)
730 ret <vscale x 32 x half> %ret
733 ; Vector compress for f32 type
735 define <vscale x 1 x float> @vector_compress_nxv1f32(<vscale x 1 x float> %data, <vscale x 1 x i1> %mask) {
736 ; CHECK-LABEL: vector_compress_nxv1f32:
738 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
739 ; CHECK-NEXT: vcompress.vm v9, v8, v0
740 ; CHECK-NEXT: vmv1r.v v8, v9
742 %ret = call <vscale x 1 x float> @llvm.experimental.vector.compress.nxv1f32(<vscale x 1 x float> %data, <vscale x 1 x i1> %mask, <vscale x 1 x float> undef)
743 ret <vscale x 1 x float> %ret
746 define <vscale x 1 x float> @vector_compress_nxv1f32_passthru(<vscale x 1 x float> %passthru, <vscale x 1 x float> %data, <vscale x 1 x i1> %mask) {
747 ; CHECK-LABEL: vector_compress_nxv1f32_passthru:
749 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, ma
750 ; CHECK-NEXT: vcompress.vm v8, v9, v0
752 %ret = call <vscale x 1 x float> @llvm.experimental.vector.compress.nxv1f32(<vscale x 1 x float> %data, <vscale x 1 x i1> %mask, <vscale x 1 x float> %passthru)
753 ret <vscale x 1 x float> %ret
756 define <vscale x 2 x float> @vector_compress_nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x i1> %mask) {
757 ; CHECK-LABEL: vector_compress_nxv2f32:
759 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
760 ; CHECK-NEXT: vcompress.vm v9, v8, v0
761 ; CHECK-NEXT: vmv.v.v v8, v9
763 %ret = call <vscale x 2 x float> @llvm.experimental.vector.compress.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
764 ret <vscale x 2 x float> %ret
767 define <vscale x 2 x float> @vector_compress_nxv2f32_passthru(<vscale x 2 x float> %passthru, <vscale x 2 x float> %data, <vscale x 2 x i1> %mask) {
768 ; CHECK-LABEL: vector_compress_nxv2f32_passthru:
770 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma
771 ; CHECK-NEXT: vcompress.vm v8, v9, v0
773 %ret = call <vscale x 2 x float> @llvm.experimental.vector.compress.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x i1> %mask, <vscale x 2 x float> %passthru)
774 ret <vscale x 2 x float> %ret
777 define <vscale x 4 x float> @vector_compress_nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask) {
778 ; CHECK-LABEL: vector_compress_nxv4f32:
780 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
781 ; CHECK-NEXT: vcompress.vm v10, v8, v0
782 ; CHECK-NEXT: vmv.v.v v8, v10
784 %ret = call <vscale x 4 x float> @llvm.experimental.vector.compress.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
785 ret <vscale x 4 x float> %ret
788 define <vscale x 4 x float> @vector_compress_nxv4f32_passthru(<vscale x 4 x float> %passthru, <vscale x 4 x float> %data, <vscale x 4 x i1> %mask) {
789 ; CHECK-LABEL: vector_compress_nxv4f32_passthru:
791 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, ma
792 ; CHECK-NEXT: vcompress.vm v8, v10, v0
794 %ret = call <vscale x 4 x float> @llvm.experimental.vector.compress.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthru)
795 ret <vscale x 4 x float> %ret
798 define <vscale x 8 x float> @vector_compress_nxv8f32(<vscale x 8 x float> %data, <vscale x 8 x i1> %mask) {
799 ; CHECK-LABEL: vector_compress_nxv8f32:
801 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
802 ; CHECK-NEXT: vcompress.vm v12, v8, v0
803 ; CHECK-NEXT: vmv.v.v v8, v12
805 %ret = call <vscale x 8 x float> @llvm.experimental.vector.compress.nxv8f32(<vscale x 8 x float> %data, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
806 ret <vscale x 8 x float> %ret
809 define <vscale x 8 x float> @vector_compress_nxv8f32_passthru(<vscale x 8 x float> %passthru, <vscale x 8 x float> %data, <vscale x 8 x i1> %mask) {
810 ; CHECK-LABEL: vector_compress_nxv8f32_passthru:
812 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, ma
813 ; CHECK-NEXT: vcompress.vm v8, v12, v0
815 %ret = call <vscale x 8 x float> @llvm.experimental.vector.compress.nxv8f32(<vscale x 8 x float> %data, <vscale x 8 x i1> %mask, <vscale x 8 x float> %passthru)
816 ret <vscale x 8 x float> %ret
819 define <vscale x 16 x float> @vector_compress_nxv16f32(<vscale x 16 x float> %data, <vscale x 16 x i1> %mask) {
820 ; CHECK-LABEL: vector_compress_nxv16f32:
822 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
823 ; CHECK-NEXT: vcompress.vm v16, v8, v0
824 ; CHECK-NEXT: vmv.v.v v8, v16
826 %ret = call <vscale x 16 x float> @llvm.experimental.vector.compress.nxv16f32(<vscale x 16 x float> %data, <vscale x 16 x i1> %mask, <vscale x 16 x float> undef)
827 ret <vscale x 16 x float> %ret
830 define <vscale x 16 x float> @vector_compress_nxv16f32_passthru(<vscale x 16 x float> %passthru, <vscale x 16 x float> %data, <vscale x 16 x i1> %mask) {
831 ; CHECK-LABEL: vector_compress_nxv16f32_passthru:
833 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, ma
834 ; CHECK-NEXT: vcompress.vm v8, v16, v0
836 %ret = call <vscale x 16 x float> @llvm.experimental.vector.compress.nxv16f32(<vscale x 16 x float> %data, <vscale x 16 x i1> %mask, <vscale x 16 x float> %passthru)
837 ret <vscale x 16 x float> %ret
840 ; Vector compress for f64 type
842 define <vscale x 1 x double> @vector_compress_nxv1f64(<vscale x 1 x double> %data, <vscale x 1 x i1> %mask) {
843 ; CHECK-LABEL: vector_compress_nxv1f64:
845 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
846 ; CHECK-NEXT: vcompress.vm v9, v8, v0
847 ; CHECK-NEXT: vmv.v.v v8, v9
849 %ret = call <vscale x 1 x double> @llvm.experimental.vector.compress.nxv1f64(<vscale x 1 x double> %data, <vscale x 1 x i1> %mask, <vscale x 1 x double> undef)
850 ret <vscale x 1 x double> %ret
853 define <vscale x 1 x double> @vector_compress_nxv1f64_passthru(<vscale x 1 x double> %passthru, <vscale x 1 x double> %data, <vscale x 1 x i1> %mask) {
854 ; CHECK-LABEL: vector_compress_nxv1f64_passthru:
856 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma
857 ; CHECK-NEXT: vcompress.vm v8, v9, v0
859 %ret = call <vscale x 1 x double> @llvm.experimental.vector.compress.nxv1f64(<vscale x 1 x double> %data, <vscale x 1 x i1> %mask, <vscale x 1 x double> %passthru)
860 ret <vscale x 1 x double> %ret
863 define <vscale x 2 x double> @vector_compress_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask) {
864 ; CHECK-LABEL: vector_compress_nxv2f64:
866 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
867 ; CHECK-NEXT: vcompress.vm v10, v8, v0
868 ; CHECK-NEXT: vmv.v.v v8, v10
870 %ret = call <vscale x 2 x double> @llvm.experimental.vector.compress.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
871 ret <vscale x 2 x double> %ret
874 define <vscale x 2 x double> @vector_compress_nxv2f64_passthru(<vscale x 2 x double> %passthru, <vscale x 2 x double> %data, <vscale x 2 x i1> %mask) {
875 ; CHECK-LABEL: vector_compress_nxv2f64_passthru:
877 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, ma
878 ; CHECK-NEXT: vcompress.vm v8, v10, v0
880 %ret = call <vscale x 2 x double> @llvm.experimental.vector.compress.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, <vscale x 2 x double> %passthru)
881 ret <vscale x 2 x double> %ret
884 define <vscale x 4 x double> @vector_compress_nxv4f64(<vscale x 4 x double> %data, <vscale x 4 x i1> %mask) {
885 ; CHECK-LABEL: vector_compress_nxv4f64:
887 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
888 ; CHECK-NEXT: vcompress.vm v12, v8, v0
889 ; CHECK-NEXT: vmv.v.v v8, v12
891 %ret = call <vscale x 4 x double> @llvm.experimental.vector.compress.nxv4f64(<vscale x 4 x double> %data, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
892 ret <vscale x 4 x double> %ret
895 define <vscale x 4 x double> @vector_compress_nxv4f64_passthru(<vscale x 4 x double> %passthru, <vscale x 4 x double> %data, <vscale x 4 x i1> %mask) {
896 ; CHECK-LABEL: vector_compress_nxv4f64_passthru:
898 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, ma
899 ; CHECK-NEXT: vcompress.vm v8, v12, v0
901 %ret = call <vscale x 4 x double> @llvm.experimental.vector.compress.nxv4f64(<vscale x 4 x double> %data, <vscale x 4 x i1> %mask, <vscale x 4 x double> %passthru)
902 ret <vscale x 4 x double> %ret
905 define <vscale x 8 x double> @vector_compress_nxv8f64(<vscale x 8 x double> %data, <vscale x 8 x i1> %mask) {
906 ; CHECK-LABEL: vector_compress_nxv8f64:
908 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
909 ; CHECK-NEXT: vcompress.vm v16, v8, v0
910 ; CHECK-NEXT: vmv.v.v v8, v16
912 %ret = call <vscale x 8 x double> @llvm.experimental.vector.compress.nxv8f64(<vscale x 8 x double> %data, <vscale x 8 x i1> %mask, <vscale x 8 x double> undef)
913 ret <vscale x 8 x double> %ret
916 define <vscale x 8 x double> @vector_compress_nxv8f64_passthru(<vscale x 8 x double> %passthru, <vscale x 8 x double> %data, <vscale x 8 x i1> %mask) {
917 ; CHECK-LABEL: vector_compress_nxv8f64_passthru:
919 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, ma
920 ; CHECK-NEXT: vcompress.vm v8, v16, v0
922 %ret = call <vscale x 8 x double> @llvm.experimental.vector.compress.nxv8f64(<vscale x 8 x double> %data, <vscale x 8 x i1> %mask, <vscale x 8 x double> %passthru)
923 ret <vscale x 8 x double> %ret