1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s
6 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
12 define <vscale x 1 x i8> @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
13 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
16 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
19 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
20 <vscale x 1 x i8> undef,
21 <vscale x 1 x i16> %0,
25 ret <vscale x 1 x i8> %a
28 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
36 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
40 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
43 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
45 <vscale x 1 x i16> %1,
50 ret <vscale x 1 x i8> %a
53 declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
59 define <vscale x 2 x i8> @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
60 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
63 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
66 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
67 <vscale x 2 x i8> undef,
68 <vscale x 2 x i16> %0,
72 ret <vscale x 2 x i8> %a
75 declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
83 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
84 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8:
85 ; CHECK: # %bb.0: # %entry
86 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
87 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
90 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
92 <vscale x 2 x i16> %1,
97 ret <vscale x 2 x i8> %a
100 declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
106 define <vscale x 4 x i8> @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
107 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8:
108 ; CHECK: # %bb.0: # %entry
109 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
110 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
113 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
114 <vscale x 4 x i8> undef,
115 <vscale x 4 x i16> %0,
116 <vscale x 4 x i8> %1,
119 ret <vscale x 4 x i8> %a
122 declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
130 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
131 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8:
132 ; CHECK: # %bb.0: # %entry
133 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
134 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
137 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
138 <vscale x 4 x i8> %0,
139 <vscale x 4 x i16> %1,
140 <vscale x 4 x i8> %2,
141 <vscale x 4 x i1> %3,
144 ret <vscale x 4 x i8> %a
147 declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
153 define <vscale x 8 x i8> @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
154 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8:
155 ; CHECK: # %bb.0: # %entry
156 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
157 ; CHECK-NEXT: vnsrl.wv v11, v8, v10
158 ; CHECK-NEXT: vmv.v.v v8, v11
161 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
162 <vscale x 8 x i8> undef,
163 <vscale x 8 x i16> %0,
164 <vscale x 8 x i8> %1,
167 ret <vscale x 8 x i8> %a
170 declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
178 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
182 ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t
185 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
186 <vscale x 8 x i8> %0,
187 <vscale x 8 x i16> %1,
188 <vscale x 8 x i8> %2,
189 <vscale x 8 x i1> %3,
192 ret <vscale x 8 x i8> %a
195 declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
201 define <vscale x 16 x i8> @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
202 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8:
203 ; CHECK: # %bb.0: # %entry
204 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
205 ; CHECK-NEXT: vnsrl.wv v14, v8, v12
206 ; CHECK-NEXT: vmv.v.v v8, v14
209 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
210 <vscale x 16 x i8> undef,
211 <vscale x 16 x i16> %0,
212 <vscale x 16 x i8> %1,
215 ret <vscale x 16 x i8> %a
218 declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
226 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
227 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8:
228 ; CHECK: # %bb.0: # %entry
229 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
230 ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t
233 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
234 <vscale x 16 x i8> %0,
235 <vscale x 16 x i16> %1,
236 <vscale x 16 x i8> %2,
237 <vscale x 16 x i1> %3,
240 ret <vscale x 16 x i8> %a
243 declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
249 define <vscale x 32 x i8> @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
250 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8:
251 ; CHECK: # %bb.0: # %entry
252 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
253 ; CHECK-NEXT: vnsrl.wv v20, v8, v16
254 ; CHECK-NEXT: vmv.v.v v8, v20
257 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
258 <vscale x 32 x i8> undef,
259 <vscale x 32 x i16> %0,
260 <vscale x 32 x i8> %1,
263 ret <vscale x 32 x i8> %a
266 declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
274 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
275 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8:
276 ; CHECK: # %bb.0: # %entry
277 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
278 ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t
281 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
282 <vscale x 32 x i8> %0,
283 <vscale x 32 x i16> %1,
284 <vscale x 32 x i8> %2,
285 <vscale x 32 x i1> %3,
288 ret <vscale x 32 x i8> %a
291 declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
297 define <vscale x 1 x i16> @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
298 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16:
299 ; CHECK: # %bb.0: # %entry
300 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
301 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
304 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
305 <vscale x 1 x i16> undef,
306 <vscale x 1 x i32> %0,
307 <vscale x 1 x i16> %1,
310 ret <vscale x 1 x i16> %a
313 declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
321 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
322 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16:
323 ; CHECK: # %bb.0: # %entry
324 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
325 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
328 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
329 <vscale x 1 x i16> %0,
330 <vscale x 1 x i32> %1,
331 <vscale x 1 x i16> %2,
332 <vscale x 1 x i1> %3,
335 ret <vscale x 1 x i16> %a
338 declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
344 define <vscale x 2 x i16> @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
345 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16:
346 ; CHECK: # %bb.0: # %entry
347 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
348 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
351 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
352 <vscale x 2 x i16> undef,
353 <vscale x 2 x i32> %0,
354 <vscale x 2 x i16> %1,
357 ret <vscale x 2 x i16> %a
360 declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
368 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
369 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16:
370 ; CHECK: # %bb.0: # %entry
371 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
372 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
375 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
376 <vscale x 2 x i16> %0,
377 <vscale x 2 x i32> %1,
378 <vscale x 2 x i16> %2,
379 <vscale x 2 x i1> %3,
382 ret <vscale x 2 x i16> %a
385 declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
391 define <vscale x 4 x i16> @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
392 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16:
393 ; CHECK: # %bb.0: # %entry
394 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
395 ; CHECK-NEXT: vnsrl.wv v11, v8, v10
396 ; CHECK-NEXT: vmv.v.v v8, v11
399 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
400 <vscale x 4 x i16> undef,
401 <vscale x 4 x i32> %0,
402 <vscale x 4 x i16> %1,
405 ret <vscale x 4 x i16> %a
408 declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
416 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
417 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16:
418 ; CHECK: # %bb.0: # %entry
419 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
420 ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t
423 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
424 <vscale x 4 x i16> %0,
425 <vscale x 4 x i32> %1,
426 <vscale x 4 x i16> %2,
427 <vscale x 4 x i1> %3,
430 ret <vscale x 4 x i16> %a
433 declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
439 define <vscale x 8 x i16> @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
440 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16:
441 ; CHECK: # %bb.0: # %entry
442 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
443 ; CHECK-NEXT: vnsrl.wv v14, v8, v12
444 ; CHECK-NEXT: vmv.v.v v8, v14
447 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
448 <vscale x 8 x i16> undef,
449 <vscale x 8 x i32> %0,
450 <vscale x 8 x i16> %1,
453 ret <vscale x 8 x i16> %a
456 declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
464 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
465 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16:
466 ; CHECK: # %bb.0: # %entry
467 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
468 ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t
471 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
472 <vscale x 8 x i16> %0,
473 <vscale x 8 x i32> %1,
474 <vscale x 8 x i16> %2,
475 <vscale x 8 x i1> %3,
478 ret <vscale x 8 x i16> %a
481 declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
487 define <vscale x 16 x i16> @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
488 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16:
489 ; CHECK: # %bb.0: # %entry
490 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
491 ; CHECK-NEXT: vnsrl.wv v20, v8, v16
492 ; CHECK-NEXT: vmv.v.v v8, v20
495 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
496 <vscale x 16 x i16> undef,
497 <vscale x 16 x i32> %0,
498 <vscale x 16 x i16> %1,
501 ret <vscale x 16 x i16> %a
504 declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
512 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
513 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16:
514 ; CHECK: # %bb.0: # %entry
515 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
516 ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t
519 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
520 <vscale x 16 x i16> %0,
521 <vscale x 16 x i32> %1,
522 <vscale x 16 x i16> %2,
523 <vscale x 16 x i1> %3,
526 ret <vscale x 16 x i16> %a
529 declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
535 define <vscale x 1 x i32> @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
536 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32:
537 ; CHECK: # %bb.0: # %entry
538 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
539 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
542 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
543 <vscale x 1 x i32> undef,
544 <vscale x 1 x i64> %0,
545 <vscale x 1 x i32> %1,
548 ret <vscale x 1 x i32> %a
551 declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
559 define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
560 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32:
561 ; CHECK: # %bb.0: # %entry
562 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
563 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
566 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
567 <vscale x 1 x i32> %0,
568 <vscale x 1 x i64> %1,
569 <vscale x 1 x i32> %2,
570 <vscale x 1 x i1> %3,
573 ret <vscale x 1 x i32> %a
576 declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
582 define <vscale x 2 x i32> @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
583 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32:
584 ; CHECK: # %bb.0: # %entry
585 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
586 ; CHECK-NEXT: vnsrl.wv v11, v8, v10
587 ; CHECK-NEXT: vmv.v.v v8, v11
590 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
591 <vscale x 2 x i32> undef,
592 <vscale x 2 x i64> %0,
593 <vscale x 2 x i32> %1,
596 ret <vscale x 2 x i32> %a
599 declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
607 define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
608 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32:
609 ; CHECK: # %bb.0: # %entry
610 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
611 ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t
614 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
615 <vscale x 2 x i32> %0,
616 <vscale x 2 x i64> %1,
617 <vscale x 2 x i32> %2,
618 <vscale x 2 x i1> %3,
621 ret <vscale x 2 x i32> %a
624 declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
630 define <vscale x 4 x i32> @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
631 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32:
632 ; CHECK: # %bb.0: # %entry
633 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
634 ; CHECK-NEXT: vnsrl.wv v14, v8, v12
635 ; CHECK-NEXT: vmv.v.v v8, v14
638 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
639 <vscale x 4 x i32> undef,
640 <vscale x 4 x i64> %0,
641 <vscale x 4 x i32> %1,
644 ret <vscale x 4 x i32> %a
647 declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
655 define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
656 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32:
657 ; CHECK: # %bb.0: # %entry
658 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
659 ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t
662 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
663 <vscale x 4 x i32> %0,
664 <vscale x 4 x i64> %1,
665 <vscale x 4 x i32> %2,
666 <vscale x 4 x i1> %3,
669 ret <vscale x 4 x i32> %a
672 declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
678 define <vscale x 8 x i32> @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
679 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32:
680 ; CHECK: # %bb.0: # %entry
681 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
682 ; CHECK-NEXT: vnsrl.wv v20, v8, v16
683 ; CHECK-NEXT: vmv.v.v v8, v20
686 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
687 <vscale x 8 x i32> undef,
688 <vscale x 8 x i64> %0,
689 <vscale x 8 x i32> %1,
692 ret <vscale x 8 x i32> %a
695 declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
703 define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
704 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32:
705 ; CHECK: # %bb.0: # %entry
706 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
707 ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t
710 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
711 <vscale x 8 x i32> %0,
712 <vscale x 8 x i64> %1,
713 <vscale x 8 x i32> %2,
714 <vscale x 8 x i1> %3,
717 ret <vscale x 8 x i32> %a
720 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
726 define <vscale x 1 x i8> @intrinsic_vnsrl_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
727 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i8_nxv1i16:
728 ; CHECK: # %bb.0: # %entry
729 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
730 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
733 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
734 <vscale x 1 x i8> undef,
735 <vscale x 1 x i16> %0,
739 ret <vscale x 1 x i8> %a
742 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
750 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
751 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16:
752 ; CHECK: # %bb.0: # %entry
753 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
754 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
757 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
758 <vscale x 1 x i8> %0,
759 <vscale x 1 x i16> %1,
761 <vscale x 1 x i1> %3,
764 ret <vscale x 1 x i8> %a
767 declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
773 define <vscale x 2 x i8> @intrinsic_vnsrl_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
774 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i8_nxv2i16:
775 ; CHECK: # %bb.0: # %entry
776 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
777 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
780 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
781 <vscale x 2 x i8> undef,
782 <vscale x 2 x i16> %0,
786 ret <vscale x 2 x i8> %a
789 declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
797 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
798 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16:
799 ; CHECK: # %bb.0: # %entry
800 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
801 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
804 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
805 <vscale x 2 x i8> %0,
806 <vscale x 2 x i16> %1,
808 <vscale x 2 x i1> %3,
811 ret <vscale x 2 x i8> %a
814 declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
820 define <vscale x 4 x i8> @intrinsic_vnsrl_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
821 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i8_nxv4i16:
822 ; CHECK: # %bb.0: # %entry
823 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
824 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
827 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
828 <vscale x 4 x i8> undef,
829 <vscale x 4 x i16> %0,
833 ret <vscale x 4 x i8> %a
836 declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
844 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
845 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16:
846 ; CHECK: # %bb.0: # %entry
847 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
848 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
851 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
852 <vscale x 4 x i8> %0,
853 <vscale x 4 x i16> %1,
855 <vscale x 4 x i1> %3,
858 ret <vscale x 4 x i8> %a
861 declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
867 define <vscale x 8 x i8> @intrinsic_vnsrl_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
868 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i8_nxv8i16:
869 ; CHECK: # %bb.0: # %entry
870 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
871 ; CHECK-NEXT: vnsrl.wx v10, v8, a0
872 ; CHECK-NEXT: vmv.v.v v8, v10
875 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
876 <vscale x 8 x i8> undef,
877 <vscale x 8 x i16> %0,
881 ret <vscale x 8 x i8> %a
884 declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
892 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
893 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16:
894 ; CHECK: # %bb.0: # %entry
895 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
896 ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t
899 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
900 <vscale x 8 x i8> %0,
901 <vscale x 8 x i16> %1,
903 <vscale x 8 x i1> %3,
906 ret <vscale x 8 x i8> %a
909 declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
915 define <vscale x 16 x i8> @intrinsic_vnsrl_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
916 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i8_nxv16i16:
917 ; CHECK: # %bb.0: # %entry
918 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
919 ; CHECK-NEXT: vnsrl.wx v12, v8, a0
920 ; CHECK-NEXT: vmv.v.v v8, v12
923 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
924 <vscale x 16 x i8> undef,
925 <vscale x 16 x i16> %0,
929 ret <vscale x 16 x i8> %a
932 declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
940 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
941 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16:
942 ; CHECK: # %bb.0: # %entry
943 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
944 ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t
947 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
948 <vscale x 16 x i8> %0,
949 <vscale x 16 x i16> %1,
951 <vscale x 16 x i1> %3,
954 ret <vscale x 16 x i8> %a
957 declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
963 define <vscale x 32 x i8> @intrinsic_vnsrl_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
964 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv32i8_nxv32i16:
965 ; CHECK: # %bb.0: # %entry
966 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
967 ; CHECK-NEXT: vnsrl.wx v16, v8, a0
968 ; CHECK-NEXT: vmv.v.v v8, v16
971 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
972 <vscale x 32 x i8> undef,
973 <vscale x 32 x i16> %0,
977 ret <vscale x 32 x i8> %a
980 declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
988 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
989 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16:
990 ; CHECK: # %bb.0: # %entry
991 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
992 ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t
995 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
996 <vscale x 32 x i8> %0,
997 <vscale x 32 x i16> %1,
999 <vscale x 32 x i1> %3,
1002 ret <vscale x 32 x i8> %a
1005 declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
1011 define <vscale x 1 x i16> @intrinsic_vnsrl_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
1012 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i16_nxv1i32:
1013 ; CHECK: # %bb.0: # %entry
1014 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1015 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
1018 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
1019 <vscale x 1 x i16> undef,
1020 <vscale x 1 x i32> %0,
1024 ret <vscale x 1 x i16> %a
1027 declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
1035 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1036 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32:
1037 ; CHECK: # %bb.0: # %entry
1038 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1039 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
1042 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
1043 <vscale x 1 x i16> %0,
1044 <vscale x 1 x i32> %1,
1046 <vscale x 1 x i1> %3,
1049 ret <vscale x 1 x i16> %a
1052 declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
1058 define <vscale x 2 x i16> @intrinsic_vnsrl_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
1059 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i16_nxv2i32:
1060 ; CHECK: # %bb.0: # %entry
1061 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1062 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
1065 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
1066 <vscale x 2 x i16> undef,
1067 <vscale x 2 x i32> %0,
1071 ret <vscale x 2 x i16> %a
1074 declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
1082 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1083 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32:
1084 ; CHECK: # %bb.0: # %entry
1085 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1086 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
1089 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
1090 <vscale x 2 x i16> %0,
1091 <vscale x 2 x i32> %1,
1093 <vscale x 2 x i1> %3,
1096 ret <vscale x 2 x i16> %a
1099 declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
1105 define <vscale x 4 x i16> @intrinsic_vnsrl_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
1106 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i16_nxv4i32:
1107 ; CHECK: # %bb.0: # %entry
1108 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1109 ; CHECK-NEXT: vnsrl.wx v10, v8, a0
1110 ; CHECK-NEXT: vmv.v.v v8, v10
1113 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
1114 <vscale x 4 x i16> undef,
1115 <vscale x 4 x i32> %0,
1119 ret <vscale x 4 x i16> %a
1122 declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
1130 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1131 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32:
1132 ; CHECK: # %bb.0: # %entry
1133 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1134 ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t
1137 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
1138 <vscale x 4 x i16> %0,
1139 <vscale x 4 x i32> %1,
1141 <vscale x 4 x i1> %3,
1144 ret <vscale x 4 x i16> %a
1147 declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
1153 define <vscale x 8 x i16> @intrinsic_vnsrl_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
1154 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i16_nxv8i32:
1155 ; CHECK: # %bb.0: # %entry
1156 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1157 ; CHECK-NEXT: vnsrl.wx v12, v8, a0
1158 ; CHECK-NEXT: vmv.v.v v8, v12
1161 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
1162 <vscale x 8 x i16> undef,
1163 <vscale x 8 x i32> %0,
1167 ret <vscale x 8 x i16> %a
1170 declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
1178 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1179 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32:
1180 ; CHECK: # %bb.0: # %entry
1181 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1182 ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t
1185 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
1186 <vscale x 8 x i16> %0,
1187 <vscale x 8 x i32> %1,
1189 <vscale x 8 x i1> %3,
1192 ret <vscale x 8 x i16> %a
1195 declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
1196 <vscale x 16 x i16>,
1197 <vscale x 16 x i32>,
1201 define <vscale x 16 x i16> @intrinsic_vnsrl_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
1202 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i16_nxv16i32:
1203 ; CHECK: # %bb.0: # %entry
1204 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1205 ; CHECK-NEXT: vnsrl.wx v16, v8, a0
1206 ; CHECK-NEXT: vmv.v.v v8, v16
1209 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
1210 <vscale x 16 x i16> undef,
1211 <vscale x 16 x i32> %0,
1215 ret <vscale x 16 x i16> %a
1218 declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
1219 <vscale x 16 x i16>,
1220 <vscale x 16 x i32>,
1226 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1227 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32:
1228 ; CHECK: # %bb.0: # %entry
1229 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1230 ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t
1233 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
1234 <vscale x 16 x i16> %0,
1235 <vscale x 16 x i32> %1,
1237 <vscale x 16 x i1> %3,
1240 ret <vscale x 16 x i16> %a
1243 declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
1249 define <vscale x 1 x i32> @intrinsic_vnsrl_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
1250 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i32_nxv1i64:
1251 ; CHECK: # %bb.0: # %entry
1252 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1253 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
1256 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
1257 <vscale x 1 x i32> undef,
1258 <vscale x 1 x i64> %0,
1262 ret <vscale x 1 x i32> %a
1265 declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
1273 define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1274 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64:
1275 ; CHECK: # %bb.0: # %entry
1276 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1277 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
1280 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
1281 <vscale x 1 x i32> %0,
1282 <vscale x 1 x i64> %1,
1284 <vscale x 1 x i1> %3,
1287 ret <vscale x 1 x i32> %a
1290 declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
1296 define <vscale x 2 x i32> @intrinsic_vnsrl_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
1297 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i32_nxv2i64:
1298 ; CHECK: # %bb.0: # %entry
1299 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1300 ; CHECK-NEXT: vnsrl.wx v10, v8, a0
1301 ; CHECK-NEXT: vmv.v.v v8, v10
1304 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
1305 <vscale x 2 x i32> undef,
1306 <vscale x 2 x i64> %0,
1310 ret <vscale x 2 x i32> %a
1313 declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
1321 define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1322 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64:
1323 ; CHECK: # %bb.0: # %entry
1324 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1325 ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t
1328 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
1329 <vscale x 2 x i32> %0,
1330 <vscale x 2 x i64> %1,
1332 <vscale x 2 x i1> %3,
1335 ret <vscale x 2 x i32> %a
1338 declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
1344 define <vscale x 4 x i32> @intrinsic_vnsrl_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
1345 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i32_nxv4i64:
1346 ; CHECK: # %bb.0: # %entry
1347 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1348 ; CHECK-NEXT: vnsrl.wx v12, v8, a0
1349 ; CHECK-NEXT: vmv.v.v v8, v12
1352 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
1353 <vscale x 4 x i32> undef,
1354 <vscale x 4 x i64> %0,
1358 ret <vscale x 4 x i32> %a
1361 declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
1369 define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1370 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64:
1371 ; CHECK: # %bb.0: # %entry
1372 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1373 ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t
1376 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
1377 <vscale x 4 x i32> %0,
1378 <vscale x 4 x i64> %1,
1380 <vscale x 4 x i1> %3,
1383 ret <vscale x 4 x i32> %a
1386 declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
1392 define <vscale x 8 x i32> @intrinsic_vnsrl_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
1393 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i32_nxv8i64:
1394 ; CHECK: # %bb.0: # %entry
1395 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1396 ; CHECK-NEXT: vnsrl.wx v16, v8, a0
1397 ; CHECK-NEXT: vmv.v.v v8, v16
1400 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
1401 <vscale x 8 x i32> undef,
1402 <vscale x 8 x i64> %0,
1406 ret <vscale x 8 x i32> %a
1409 declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
1417 define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1418 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64:
1419 ; CHECK: # %bb.0: # %entry
1420 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1421 ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t
1424 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
1425 <vscale x 8 x i32> %0,
1426 <vscale x 8 x i64> %1,
1428 <vscale x 8 x i1> %3,
1431 ret <vscale x 8 x i32> %a
1434 define <vscale x 1 x i8> @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, iXLen %1) nounwind {
1435 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8:
1436 ; CHECK: # %bb.0: # %entry
1437 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1438 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1441 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
1442 <vscale x 1 x i8> undef,
1443 <vscale x 1 x i16> %0,
1447 ret <vscale x 1 x i8> %a
1450 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1451 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8:
1452 ; CHECK: # %bb.0: # %entry
1453 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
1454 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1457 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
1458 <vscale x 1 x i8> %0,
1459 <vscale x 1 x i16> %1,
1461 <vscale x 1 x i1> %2,
1464 ret <vscale x 1 x i8> %a
1467 define <vscale x 2 x i8> @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, iXLen %1) nounwind {
1468 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8:
1469 ; CHECK: # %bb.0: # %entry
1470 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1471 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1474 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
1475 <vscale x 2 x i8> undef,
1476 <vscale x 2 x i16> %0,
1480 ret <vscale x 2 x i8> %a
1483 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1484 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8:
1485 ; CHECK: # %bb.0: # %entry
1486 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
1487 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1490 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
1491 <vscale x 2 x i8> %0,
1492 <vscale x 2 x i16> %1,
1494 <vscale x 2 x i1> %2,
1497 ret <vscale x 2 x i8> %a
1500 define <vscale x 4 x i8> @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, iXLen %1) nounwind {
1501 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8:
1502 ; CHECK: # %bb.0: # %entry
1503 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1504 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1507 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
1508 <vscale x 4 x i8> undef,
1509 <vscale x 4 x i16> %0,
1513 ret <vscale x 4 x i8> %a
1516 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1517 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8:
1518 ; CHECK: # %bb.0: # %entry
1519 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
1520 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1523 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
1524 <vscale x 4 x i8> %0,
1525 <vscale x 4 x i16> %1,
1527 <vscale x 4 x i1> %2,
1530 ret <vscale x 4 x i8> %a
1533 define <vscale x 8 x i8> @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, iXLen %1) nounwind {
1534 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8:
1535 ; CHECK: # %bb.0: # %entry
1536 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1537 ; CHECK-NEXT: vnsrl.wi v10, v8, 9
1538 ; CHECK-NEXT: vmv.v.v v8, v10
1541 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
1542 <vscale x 8 x i8> undef,
1543 <vscale x 8 x i16> %0,
1547 ret <vscale x 8 x i8> %a
1550 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1551 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8:
1552 ; CHECK: # %bb.0: # %entry
1553 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
1554 ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t
1557 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
1558 <vscale x 8 x i8> %0,
1559 <vscale x 8 x i16> %1,
1561 <vscale x 8 x i1> %2,
1564 ret <vscale x 8 x i8> %a
1567 define <vscale x 16 x i8> @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, iXLen %1) nounwind {
1568 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8:
1569 ; CHECK: # %bb.0: # %entry
1570 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1571 ; CHECK-NEXT: vnsrl.wi v12, v8, 9
1572 ; CHECK-NEXT: vmv.v.v v8, v12
1575 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
1576 <vscale x 16 x i8> undef,
1577 <vscale x 16 x i16> %0,
1581 ret <vscale x 16 x i8> %a
1584 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1585 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8:
1586 ; CHECK: # %bb.0: # %entry
1587 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
1588 ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t
1591 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
1592 <vscale x 16 x i8> %0,
1593 <vscale x 16 x i16> %1,
1595 <vscale x 16 x i1> %2,
1598 ret <vscale x 16 x i8> %a
1601 define <vscale x 32 x i8> @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, iXLen %1) nounwind {
1602 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8:
1603 ; CHECK: # %bb.0: # %entry
1604 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1605 ; CHECK-NEXT: vnsrl.wi v16, v8, 9
1606 ; CHECK-NEXT: vmv.v.v v8, v16
1609 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
1610 <vscale x 32 x i8> undef,
1611 <vscale x 32 x i16> %0,
1615 ret <vscale x 32 x i8> %a
1618 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1619 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8:
1620 ; CHECK: # %bb.0: # %entry
1621 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
1622 ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t
1625 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
1626 <vscale x 32 x i8> %0,
1627 <vscale x 32 x i16> %1,
1629 <vscale x 32 x i1> %2,
1632 ret <vscale x 32 x i8> %a
1635 define <vscale x 1 x i16> @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, iXLen %1) nounwind {
1636 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16:
1637 ; CHECK: # %bb.0: # %entry
1638 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1639 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1642 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
1643 <vscale x 1 x i16> undef,
1644 <vscale x 1 x i32> %0,
1648 ret <vscale x 1 x i16> %a
1651 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1652 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16:
1653 ; CHECK: # %bb.0: # %entry
1654 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
1655 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1658 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
1659 <vscale x 1 x i16> %0,
1660 <vscale x 1 x i32> %1,
1662 <vscale x 1 x i1> %2,
1665 ret <vscale x 1 x i16> %a
1668 define <vscale x 2 x i16> @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, iXLen %1) nounwind {
1669 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16:
1670 ; CHECK: # %bb.0: # %entry
1671 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1672 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1675 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
1676 <vscale x 2 x i16> undef,
1677 <vscale x 2 x i32> %0,
1681 ret <vscale x 2 x i16> %a
1684 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1685 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16:
1686 ; CHECK: # %bb.0: # %entry
1687 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
1688 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1691 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
1692 <vscale x 2 x i16> %0,
1693 <vscale x 2 x i32> %1,
1695 <vscale x 2 x i1> %2,
1698 ret <vscale x 2 x i16> %a
1701 define <vscale x 4 x i16> @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, iXLen %1) nounwind {
1702 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16:
1703 ; CHECK: # %bb.0: # %entry
1704 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1705 ; CHECK-NEXT: vnsrl.wi v10, v8, 9
1706 ; CHECK-NEXT: vmv.v.v v8, v10
1709 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
1710 <vscale x 4 x i16> undef,
1711 <vscale x 4 x i32> %0,
1715 ret <vscale x 4 x i16> %a
1718 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1719 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16:
1720 ; CHECK: # %bb.0: # %entry
1721 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
1722 ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t
1725 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
1726 <vscale x 4 x i16> %0,
1727 <vscale x 4 x i32> %1,
1729 <vscale x 4 x i1> %2,
1732 ret <vscale x 4 x i16> %a
1735 define <vscale x 8 x i16> @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, iXLen %1) nounwind {
1736 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16:
1737 ; CHECK: # %bb.0: # %entry
1738 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1739 ; CHECK-NEXT: vnsrl.wi v12, v8, 9
1740 ; CHECK-NEXT: vmv.v.v v8, v12
1743 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
1744 <vscale x 8 x i16> undef,
1745 <vscale x 8 x i32> %0,
1749 ret <vscale x 8 x i16> %a
1752 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1753 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16:
1754 ; CHECK: # %bb.0: # %entry
1755 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
1756 ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t
1759 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
1760 <vscale x 8 x i16> %0,
1761 <vscale x 8 x i32> %1,
1763 <vscale x 8 x i1> %2,
1766 ret <vscale x 8 x i16> %a
1769 define <vscale x 16 x i16> @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, iXLen %1) nounwind {
1770 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16:
1771 ; CHECK: # %bb.0: # %entry
1772 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1773 ; CHECK-NEXT: vnsrl.wi v16, v8, 9
1774 ; CHECK-NEXT: vmv.v.v v8, v16
1777 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
1778 <vscale x 16 x i16> undef,
1779 <vscale x 16 x i32> %0,
1783 ret <vscale x 16 x i16> %a
1786 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1787 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16:
1788 ; CHECK: # %bb.0: # %entry
1789 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
1790 ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t
1793 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
1794 <vscale x 16 x i16> %0,
1795 <vscale x 16 x i32> %1,
1797 <vscale x 16 x i1> %2,
1800 ret <vscale x 16 x i16> %a
1803 define <vscale x 1 x i32> @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, iXLen %1) nounwind {
1804 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32:
1805 ; CHECK: # %bb.0: # %entry
1806 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1807 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1810 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
1811 <vscale x 1 x i32> undef,
1812 <vscale x 1 x i64> %0,
1816 ret <vscale x 1 x i32> %a
1819 define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1820 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32:
1821 ; CHECK: # %bb.0: # %entry
1822 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1823 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1826 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
1827 <vscale x 1 x i32> %0,
1828 <vscale x 1 x i64> %1,
1830 <vscale x 1 x i1> %2,
1833 ret <vscale x 1 x i32> %a
1836 define <vscale x 2 x i32> @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, iXLen %1) nounwind {
1837 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32:
1838 ; CHECK: # %bb.0: # %entry
1839 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1840 ; CHECK-NEXT: vnsrl.wi v10, v8, 9
1841 ; CHECK-NEXT: vmv.v.v v8, v10
1844 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
1845 <vscale x 2 x i32> undef,
1846 <vscale x 2 x i64> %0,
1850 ret <vscale x 2 x i32> %a
1853 define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1854 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32:
1855 ; CHECK: # %bb.0: # %entry
1856 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1857 ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t
1860 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
1861 <vscale x 2 x i32> %0,
1862 <vscale x 2 x i64> %1,
1864 <vscale x 2 x i1> %2,
1867 ret <vscale x 2 x i32> %a
1870 define <vscale x 4 x i32> @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, iXLen %1) nounwind {
1871 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32:
1872 ; CHECK: # %bb.0: # %entry
1873 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1874 ; CHECK-NEXT: vnsrl.wi v12, v8, 9
1875 ; CHECK-NEXT: vmv.v.v v8, v12
1878 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
1879 <vscale x 4 x i32> undef,
1880 <vscale x 4 x i64> %0,
1884 ret <vscale x 4 x i32> %a
1887 define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1888 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32:
1889 ; CHECK: # %bb.0: # %entry
1890 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1891 ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t
1894 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
1895 <vscale x 4 x i32> %0,
1896 <vscale x 4 x i64> %1,
1898 <vscale x 4 x i1> %2,
1901 ret <vscale x 4 x i32> %a
1904 define <vscale x 8 x i32> @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, iXLen %1) nounwind {
1905 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32:
1906 ; CHECK: # %bb.0: # %entry
1907 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1908 ; CHECK-NEXT: vnsrl.wi v16, v8, 9
1909 ; CHECK-NEXT: vmv.v.v v8, v16
1912 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
1913 <vscale x 8 x i32> undef,
1914 <vscale x 8 x i64> %0,
1918 ret <vscale x 8 x i32> %a
1921 define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1922 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32:
1923 ; CHECK: # %bb.0: # %entry
1924 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1925 ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t
1928 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
1929 <vscale x 8 x i32> %0,
1930 <vscale x 8 x i64> %1,
1932 <vscale x 8 x i1> %2,
1935 ret <vscale x 8 x i32> %a