1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
13 define <vscale x 1 x i8> @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
20 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
21 <vscale x 1 x i8> undef,
22 <vscale x 1 x i16> %0,
26 ret <vscale x 1 x i8> %a
29 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
37 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i8_nxv1i16_nxv1i8:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
41 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
44 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
46 <vscale x 1 x i16> %1,
51 ret <vscale x 1 x i8> %a
54 declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
60 define <vscale x 2 x i8> @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
61 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8:
62 ; CHECK: # %bb.0: # %entry
63 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
64 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
67 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
68 <vscale x 2 x i8> undef,
69 <vscale x 2 x i16> %0,
73 ret <vscale x 2 x i8> %a
76 declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
84 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i8_nxv2i16_nxv2i8:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
88 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
91 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
93 <vscale x 2 x i16> %1,
98 ret <vscale x 2 x i8> %a
101 declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
107 define <vscale x 4 x i8> @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
108 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
111 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
114 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
115 <vscale x 4 x i8> undef,
116 <vscale x 4 x i16> %0,
117 <vscale x 4 x i8> %1,
120 ret <vscale x 4 x i8> %a
123 declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
131 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i8_nxv4i16_nxv4i8:
133 ; CHECK: # %bb.0: # %entry
134 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
135 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
138 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
139 <vscale x 4 x i8> %0,
140 <vscale x 4 x i16> %1,
141 <vscale x 4 x i8> %2,
142 <vscale x 4 x i1> %3,
145 ret <vscale x 4 x i8> %a
148 declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
154 define <vscale x 8 x i8> @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
155 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8:
156 ; CHECK: # %bb.0: # %entry
157 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
158 ; CHECK-NEXT: vnsrl.wv v11, v8, v10
159 ; CHECK-NEXT: vmv.v.v v8, v11
162 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
163 <vscale x 8 x i8> undef,
164 <vscale x 8 x i16> %0,
165 <vscale x 8 x i8> %1,
168 ret <vscale x 8 x i8> %a
171 declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
179 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
180 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i8_nxv8i16_nxv8i8:
181 ; CHECK: # %bb.0: # %entry
182 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
183 ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t
186 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
187 <vscale x 8 x i8> %0,
188 <vscale x 8 x i16> %1,
189 <vscale x 8 x i8> %2,
190 <vscale x 8 x i1> %3,
193 ret <vscale x 8 x i8> %a
196 declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
202 define <vscale x 16 x i8> @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
203 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8:
204 ; CHECK: # %bb.0: # %entry
205 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
206 ; CHECK-NEXT: vnsrl.wv v14, v8, v12
207 ; CHECK-NEXT: vmv.v.v v8, v14
210 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
211 <vscale x 16 x i8> undef,
212 <vscale x 16 x i16> %0,
213 <vscale x 16 x i8> %1,
216 ret <vscale x 16 x i8> %a
219 declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
227 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
228 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i8_nxv16i16_nxv16i8:
229 ; CHECK: # %bb.0: # %entry
230 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
231 ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t
234 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
235 <vscale x 16 x i8> %0,
236 <vscale x 16 x i16> %1,
237 <vscale x 16 x i8> %2,
238 <vscale x 16 x i1> %3,
241 ret <vscale x 16 x i8> %a
244 declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
250 define <vscale x 32 x i8> @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
251 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8:
252 ; CHECK: # %bb.0: # %entry
253 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
254 ; CHECK-NEXT: vnsrl.wv v20, v8, v16
255 ; CHECK-NEXT: vmv.v.v v8, v20
258 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
259 <vscale x 32 x i8> undef,
260 <vscale x 32 x i16> %0,
261 <vscale x 32 x i8> %1,
264 ret <vscale x 32 x i8> %a
267 declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
275 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
276 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv32i8_nxv32i16_nxv32i8:
277 ; CHECK: # %bb.0: # %entry
278 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
279 ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t
282 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
283 <vscale x 32 x i8> %0,
284 <vscale x 32 x i16> %1,
285 <vscale x 32 x i8> %2,
286 <vscale x 32 x i1> %3,
289 ret <vscale x 32 x i8> %a
292 declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
298 define <vscale x 1 x i16> @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
299 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16:
300 ; CHECK: # %bb.0: # %entry
301 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
302 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
305 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
306 <vscale x 1 x i16> undef,
307 <vscale x 1 x i32> %0,
308 <vscale x 1 x i16> %1,
311 ret <vscale x 1 x i16> %a
314 declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
322 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
323 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i16_nxv1i32_nxv1i16:
324 ; CHECK: # %bb.0: # %entry
325 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
326 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
329 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
330 <vscale x 1 x i16> %0,
331 <vscale x 1 x i32> %1,
332 <vscale x 1 x i16> %2,
333 <vscale x 1 x i1> %3,
336 ret <vscale x 1 x i16> %a
339 declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
345 define <vscale x 2 x i16> @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
346 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16:
347 ; CHECK: # %bb.0: # %entry
348 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
349 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
352 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
353 <vscale x 2 x i16> undef,
354 <vscale x 2 x i32> %0,
355 <vscale x 2 x i16> %1,
358 ret <vscale x 2 x i16> %a
361 declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
369 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
370 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i16_nxv2i32_nxv2i16:
371 ; CHECK: # %bb.0: # %entry
372 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
373 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
376 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
377 <vscale x 2 x i16> %0,
378 <vscale x 2 x i32> %1,
379 <vscale x 2 x i16> %2,
380 <vscale x 2 x i1> %3,
383 ret <vscale x 2 x i16> %a
386 declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
392 define <vscale x 4 x i16> @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
393 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16:
394 ; CHECK: # %bb.0: # %entry
395 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
396 ; CHECK-NEXT: vnsrl.wv v11, v8, v10
397 ; CHECK-NEXT: vmv.v.v v8, v11
400 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
401 <vscale x 4 x i16> undef,
402 <vscale x 4 x i32> %0,
403 <vscale x 4 x i16> %1,
406 ret <vscale x 4 x i16> %a
409 declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
417 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
418 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i16_nxv4i32_nxv4i16:
419 ; CHECK: # %bb.0: # %entry
420 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
421 ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t
424 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
425 <vscale x 4 x i16> %0,
426 <vscale x 4 x i32> %1,
427 <vscale x 4 x i16> %2,
428 <vscale x 4 x i1> %3,
431 ret <vscale x 4 x i16> %a
434 declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
440 define <vscale x 8 x i16> @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
441 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16:
442 ; CHECK: # %bb.0: # %entry
443 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
444 ; CHECK-NEXT: vnsrl.wv v14, v8, v12
445 ; CHECK-NEXT: vmv.v.v v8, v14
448 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
449 <vscale x 8 x i16> undef,
450 <vscale x 8 x i32> %0,
451 <vscale x 8 x i16> %1,
454 ret <vscale x 8 x i16> %a
457 declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
465 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
466 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i16_nxv8i32_nxv8i16:
467 ; CHECK: # %bb.0: # %entry
468 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
469 ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t
472 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
473 <vscale x 8 x i16> %0,
474 <vscale x 8 x i32> %1,
475 <vscale x 8 x i16> %2,
476 <vscale x 8 x i1> %3,
479 ret <vscale x 8 x i16> %a
482 declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
488 define <vscale x 16 x i16> @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
489 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16:
490 ; CHECK: # %bb.0: # %entry
491 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
492 ; CHECK-NEXT: vnsrl.wv v20, v8, v16
493 ; CHECK-NEXT: vmv.v.v v8, v20
496 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
497 <vscale x 16 x i16> undef,
498 <vscale x 16 x i32> %0,
499 <vscale x 16 x i16> %1,
502 ret <vscale x 16 x i16> %a
505 declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
513 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
514 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv16i16_nxv16i32_nxv16i16:
515 ; CHECK: # %bb.0: # %entry
516 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
517 ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t
520 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
521 <vscale x 16 x i16> %0,
522 <vscale x 16 x i32> %1,
523 <vscale x 16 x i16> %2,
524 <vscale x 16 x i1> %3,
527 ret <vscale x 16 x i16> %a
530 declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
536 define <vscale x 1 x i32> @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
537 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32:
538 ; CHECK: # %bb.0: # %entry
539 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
540 ; CHECK-NEXT: vnsrl.wv v8, v8, v9
543 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
544 <vscale x 1 x i32> undef,
545 <vscale x 1 x i64> %0,
546 <vscale x 1 x i32> %1,
549 ret <vscale x 1 x i32> %a
552 declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
560 define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
561 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32:
562 ; CHECK: # %bb.0: # %entry
563 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
564 ; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
567 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
568 <vscale x 1 x i32> %0,
569 <vscale x 1 x i64> %1,
570 <vscale x 1 x i32> %2,
571 <vscale x 1 x i1> %3,
574 ret <vscale x 1 x i32> %a
577 declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
583 define <vscale x 2 x i32> @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
584 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32:
585 ; CHECK: # %bb.0: # %entry
586 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
587 ; CHECK-NEXT: vnsrl.wv v11, v8, v10
588 ; CHECK-NEXT: vmv.v.v v8, v11
591 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
592 <vscale x 2 x i32> undef,
593 <vscale x 2 x i64> %0,
594 <vscale x 2 x i32> %1,
597 ret <vscale x 2 x i32> %a
600 declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
608 define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
609 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32:
610 ; CHECK: # %bb.0: # %entry
611 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
612 ; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t
615 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
616 <vscale x 2 x i32> %0,
617 <vscale x 2 x i64> %1,
618 <vscale x 2 x i32> %2,
619 <vscale x 2 x i1> %3,
622 ret <vscale x 2 x i32> %a
625 declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
631 define <vscale x 4 x i32> @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
632 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32:
633 ; CHECK: # %bb.0: # %entry
634 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
635 ; CHECK-NEXT: vnsrl.wv v14, v8, v12
636 ; CHECK-NEXT: vmv.v.v v8, v14
639 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
640 <vscale x 4 x i32> undef,
641 <vscale x 4 x i64> %0,
642 <vscale x 4 x i32> %1,
645 ret <vscale x 4 x i32> %a
648 declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
656 define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
657 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32:
658 ; CHECK: # %bb.0: # %entry
659 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
660 ; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t
663 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
664 <vscale x 4 x i32> %0,
665 <vscale x 4 x i64> %1,
666 <vscale x 4 x i32> %2,
667 <vscale x 4 x i1> %3,
670 ret <vscale x 4 x i32> %a
673 declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
679 define <vscale x 8 x i32> @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
680 ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32:
681 ; CHECK: # %bb.0: # %entry
682 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
683 ; CHECK-NEXT: vnsrl.wv v20, v8, v16
684 ; CHECK-NEXT: vmv.v.v v8, v20
687 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
688 <vscale x 8 x i32> undef,
689 <vscale x 8 x i64> %0,
690 <vscale x 8 x i32> %1,
693 ret <vscale x 8 x i32> %a
696 declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
704 define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
705 ; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32:
706 ; CHECK: # %bb.0: # %entry
707 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
708 ; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t
711 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
712 <vscale x 8 x i32> %0,
713 <vscale x 8 x i64> %1,
714 <vscale x 8 x i32> %2,
715 <vscale x 8 x i1> %3,
718 ret <vscale x 8 x i32> %a
721 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
727 define <vscale x 1 x i8> @intrinsic_vnsrl_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
728 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i8_nxv1i16:
729 ; CHECK: # %bb.0: # %entry
730 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
731 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
734 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
735 <vscale x 1 x i8> undef,
736 <vscale x 1 x i16> %0,
740 ret <vscale x 1 x i8> %a
743 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
751 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
752 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i8_nxv1i16:
753 ; CHECK: # %bb.0: # %entry
754 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
755 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
758 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
759 <vscale x 1 x i8> %0,
760 <vscale x 1 x i16> %1,
762 <vscale x 1 x i1> %3,
765 ret <vscale x 1 x i8> %a
768 declare <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
774 define <vscale x 2 x i8> @intrinsic_vnsrl_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
775 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i8_nxv2i16:
776 ; CHECK: # %bb.0: # %entry
777 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
778 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
781 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
782 <vscale x 2 x i8> undef,
783 <vscale x 2 x i16> %0,
787 ret <vscale x 2 x i8> %a
790 declare <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
798 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
799 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i8_nxv2i16:
800 ; CHECK: # %bb.0: # %entry
801 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
802 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
805 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
806 <vscale x 2 x i8> %0,
807 <vscale x 2 x i16> %1,
809 <vscale x 2 x i1> %3,
812 ret <vscale x 2 x i8> %a
815 declare <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
821 define <vscale x 4 x i8> @intrinsic_vnsrl_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
822 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i8_nxv4i16:
823 ; CHECK: # %bb.0: # %entry
824 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
825 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
828 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
829 <vscale x 4 x i8> undef,
830 <vscale x 4 x i16> %0,
834 ret <vscale x 4 x i8> %a
837 declare <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
845 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
846 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i8_nxv4i16:
847 ; CHECK: # %bb.0: # %entry
848 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
849 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
852 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
853 <vscale x 4 x i8> %0,
854 <vscale x 4 x i16> %1,
856 <vscale x 4 x i1> %3,
859 ret <vscale x 4 x i8> %a
862 declare <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
868 define <vscale x 8 x i8> @intrinsic_vnsrl_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
869 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i8_nxv8i16:
870 ; CHECK: # %bb.0: # %entry
871 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
872 ; CHECK-NEXT: vnsrl.wx v10, v8, a0
873 ; CHECK-NEXT: vmv.v.v v8, v10
876 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
877 <vscale x 8 x i8> undef,
878 <vscale x 8 x i16> %0,
882 ret <vscale x 8 x i8> %a
885 declare <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
893 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
894 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i8_nxv8i16:
895 ; CHECK: # %bb.0: # %entry
896 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
897 ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t
900 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
901 <vscale x 8 x i8> %0,
902 <vscale x 8 x i16> %1,
904 <vscale x 8 x i1> %3,
907 ret <vscale x 8 x i8> %a
910 declare <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
916 define <vscale x 16 x i8> @intrinsic_vnsrl_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
917 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i8_nxv16i16:
918 ; CHECK: # %bb.0: # %entry
919 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
920 ; CHECK-NEXT: vnsrl.wx v12, v8, a0
921 ; CHECK-NEXT: vmv.v.v v8, v12
924 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
925 <vscale x 16 x i8> undef,
926 <vscale x 16 x i16> %0,
930 ret <vscale x 16 x i8> %a
933 declare <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
941 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
942 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i8_nxv16i16:
943 ; CHECK: # %bb.0: # %entry
944 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
945 ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t
948 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
949 <vscale x 16 x i8> %0,
950 <vscale x 16 x i16> %1,
952 <vscale x 16 x i1> %3,
955 ret <vscale x 16 x i8> %a
958 declare <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
964 define <vscale x 32 x i8> @intrinsic_vnsrl_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
965 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv32i8_nxv32i16:
966 ; CHECK: # %bb.0: # %entry
967 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
968 ; CHECK-NEXT: vnsrl.wx v16, v8, a0
969 ; CHECK-NEXT: vmv.v.v v8, v16
972 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
973 <vscale x 32 x i8> undef,
974 <vscale x 32 x i16> %0,
978 ret <vscale x 32 x i8> %a
981 declare <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
989 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
990 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv32i8_nxv32i16:
991 ; CHECK: # %bb.0: # %entry
992 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
993 ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t
996 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
997 <vscale x 32 x i8> %0,
998 <vscale x 32 x i16> %1,
1000 <vscale x 32 x i1> %3,
1003 ret <vscale x 32 x i8> %a
1006 declare <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
1012 define <vscale x 1 x i16> @intrinsic_vnsrl_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
1013 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i16_nxv1i32:
1014 ; CHECK: # %bb.0: # %entry
1015 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1016 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
1019 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
1020 <vscale x 1 x i16> undef,
1021 <vscale x 1 x i32> %0,
1025 ret <vscale x 1 x i16> %a
1028 declare <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
1036 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1037 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i16_nxv1i32:
1038 ; CHECK: # %bb.0: # %entry
1039 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1040 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
1043 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
1044 <vscale x 1 x i16> %0,
1045 <vscale x 1 x i32> %1,
1047 <vscale x 1 x i1> %3,
1050 ret <vscale x 1 x i16> %a
1053 declare <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
1059 define <vscale x 2 x i16> @intrinsic_vnsrl_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
1060 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i16_nxv2i32:
1061 ; CHECK: # %bb.0: # %entry
1062 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1063 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
1066 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
1067 <vscale x 2 x i16> undef,
1068 <vscale x 2 x i32> %0,
1072 ret <vscale x 2 x i16> %a
1075 declare <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
1083 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1084 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i16_nxv2i32:
1085 ; CHECK: # %bb.0: # %entry
1086 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1087 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
1090 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
1091 <vscale x 2 x i16> %0,
1092 <vscale x 2 x i32> %1,
1094 <vscale x 2 x i1> %3,
1097 ret <vscale x 2 x i16> %a
1100 declare <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
1106 define <vscale x 4 x i16> @intrinsic_vnsrl_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
1107 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i16_nxv4i32:
1108 ; CHECK: # %bb.0: # %entry
1109 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1110 ; CHECK-NEXT: vnsrl.wx v10, v8, a0
1111 ; CHECK-NEXT: vmv.v.v v8, v10
1114 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
1115 <vscale x 4 x i16> undef,
1116 <vscale x 4 x i32> %0,
1120 ret <vscale x 4 x i16> %a
1123 declare <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
1131 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1132 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i16_nxv4i32:
1133 ; CHECK: # %bb.0: # %entry
1134 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1135 ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t
1138 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
1139 <vscale x 4 x i16> %0,
1140 <vscale x 4 x i32> %1,
1142 <vscale x 4 x i1> %3,
1145 ret <vscale x 4 x i16> %a
1148 declare <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
1154 define <vscale x 8 x i16> @intrinsic_vnsrl_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
1155 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i16_nxv8i32:
1156 ; CHECK: # %bb.0: # %entry
1157 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1158 ; CHECK-NEXT: vnsrl.wx v12, v8, a0
1159 ; CHECK-NEXT: vmv.v.v v8, v12
1162 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
1163 <vscale x 8 x i16> undef,
1164 <vscale x 8 x i32> %0,
1168 ret <vscale x 8 x i16> %a
1171 declare <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
1179 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1180 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i16_nxv8i32:
1181 ; CHECK: # %bb.0: # %entry
1182 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1183 ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t
1186 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
1187 <vscale x 8 x i16> %0,
1188 <vscale x 8 x i32> %1,
1190 <vscale x 8 x i1> %3,
1193 ret <vscale x 8 x i16> %a
1196 declare <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
1197 <vscale x 16 x i16>,
1198 <vscale x 16 x i32>,
1202 define <vscale x 16 x i16> @intrinsic_vnsrl_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
1203 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i16_nxv16i32:
1204 ; CHECK: # %bb.0: # %entry
1205 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1206 ; CHECK-NEXT: vnsrl.wx v16, v8, a0
1207 ; CHECK-NEXT: vmv.v.v v8, v16
1210 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
1211 <vscale x 16 x i16> undef,
1212 <vscale x 16 x i32> %0,
1216 ret <vscale x 16 x i16> %a
1219 declare <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
1220 <vscale x 16 x i16>,
1221 <vscale x 16 x i32>,
1227 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1228 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv16i16_nxv16i32:
1229 ; CHECK: # %bb.0: # %entry
1230 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1231 ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t
1234 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
1235 <vscale x 16 x i16> %0,
1236 <vscale x 16 x i32> %1,
1238 <vscale x 16 x i1> %3,
1241 ret <vscale x 16 x i16> %a
1244 declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
1250 define <vscale x 1 x i32> @intrinsic_vnsrl_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
1251 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i32_nxv1i64:
1252 ; CHECK: # %bb.0: # %entry
1253 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1254 ; CHECK-NEXT: vnsrl.wx v8, v8, a0
1257 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
1258 <vscale x 1 x i32> undef,
1259 <vscale x 1 x i64> %0,
1263 ret <vscale x 1 x i32> %a
1266 declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
1274 define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1275 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64:
1276 ; CHECK: # %bb.0: # %entry
1277 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1278 ; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
1281 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
1282 <vscale x 1 x i32> %0,
1283 <vscale x 1 x i64> %1,
1285 <vscale x 1 x i1> %3,
1288 ret <vscale x 1 x i32> %a
1291 declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
1297 define <vscale x 2 x i32> @intrinsic_vnsrl_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
1298 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i32_nxv2i64:
1299 ; CHECK: # %bb.0: # %entry
1300 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1301 ; CHECK-NEXT: vnsrl.wx v10, v8, a0
1302 ; CHECK-NEXT: vmv.v.v v8, v10
1305 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
1306 <vscale x 2 x i32> undef,
1307 <vscale x 2 x i64> %0,
1311 ret <vscale x 2 x i32> %a
1314 declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
1322 define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1323 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64:
1324 ; CHECK: # %bb.0: # %entry
1325 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1326 ; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t
1329 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
1330 <vscale x 2 x i32> %0,
1331 <vscale x 2 x i64> %1,
1333 <vscale x 2 x i1> %3,
1336 ret <vscale x 2 x i32> %a
1339 declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
1345 define <vscale x 4 x i32> @intrinsic_vnsrl_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
1346 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i32_nxv4i64:
1347 ; CHECK: # %bb.0: # %entry
1348 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1349 ; CHECK-NEXT: vnsrl.wx v12, v8, a0
1350 ; CHECK-NEXT: vmv.v.v v8, v12
1353 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
1354 <vscale x 4 x i32> undef,
1355 <vscale x 4 x i64> %0,
1359 ret <vscale x 4 x i32> %a
1362 declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
1370 define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1371 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64:
1372 ; CHECK: # %bb.0: # %entry
1373 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1374 ; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t
1377 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
1378 <vscale x 4 x i32> %0,
1379 <vscale x 4 x i64> %1,
1381 <vscale x 4 x i1> %3,
1384 ret <vscale x 4 x i32> %a
1387 declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
1393 define <vscale x 8 x i32> @intrinsic_vnsrl_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
1394 ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i32_nxv8i64:
1395 ; CHECK: # %bb.0: # %entry
1396 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1397 ; CHECK-NEXT: vnsrl.wx v16, v8, a0
1398 ; CHECK-NEXT: vmv.v.v v8, v16
1401 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
1402 <vscale x 8 x i32> undef,
1403 <vscale x 8 x i64> %0,
1407 ret <vscale x 8 x i32> %a
1410 declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
1418 define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1419 ; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64:
1420 ; CHECK: # %bb.0: # %entry
1421 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1422 ; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t
1425 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
1426 <vscale x 8 x i32> %0,
1427 <vscale x 8 x i64> %1,
1429 <vscale x 8 x i1> %3,
1432 ret <vscale x 8 x i32> %a
1435 define <vscale x 1 x i8> @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, iXLen %1) nounwind {
1436 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8:
1437 ; CHECK: # %bb.0: # %entry
1438 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1439 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1442 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
1443 <vscale x 1 x i8> undef,
1444 <vscale x 1 x i16> %0,
1448 ret <vscale x 1 x i8> %a
1451 define <vscale x 1 x i8> @intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1452 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i8_nxv1i16_i8:
1453 ; CHECK: # %bb.0: # %entry
1454 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
1455 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1458 %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
1459 <vscale x 1 x i8> %0,
1460 <vscale x 1 x i16> %1,
1462 <vscale x 1 x i1> %2,
1465 ret <vscale x 1 x i8> %a
1468 define <vscale x 2 x i8> @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, iXLen %1) nounwind {
1469 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8:
1470 ; CHECK: # %bb.0: # %entry
1471 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1472 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1475 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
1476 <vscale x 2 x i8> undef,
1477 <vscale x 2 x i16> %0,
1481 ret <vscale x 2 x i8> %a
1484 define <vscale x 2 x i8> @intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1485 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i8_nxv2i16_i8:
1486 ; CHECK: # %bb.0: # %entry
1487 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
1488 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1491 %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
1492 <vscale x 2 x i8> %0,
1493 <vscale x 2 x i16> %1,
1495 <vscale x 2 x i1> %2,
1498 ret <vscale x 2 x i8> %a
1501 define <vscale x 4 x i8> @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, iXLen %1) nounwind {
1502 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8:
1503 ; CHECK: # %bb.0: # %entry
1504 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1505 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1508 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
1509 <vscale x 4 x i8> undef,
1510 <vscale x 4 x i16> %0,
1514 ret <vscale x 4 x i8> %a
1517 define <vscale x 4 x i8> @intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1518 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i8_nxv4i16_i8:
1519 ; CHECK: # %bb.0: # %entry
1520 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
1521 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1524 %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
1525 <vscale x 4 x i8> %0,
1526 <vscale x 4 x i16> %1,
1528 <vscale x 4 x i1> %2,
1531 ret <vscale x 4 x i8> %a
1534 define <vscale x 8 x i8> @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, iXLen %1) nounwind {
1535 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8:
1536 ; CHECK: # %bb.0: # %entry
1537 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1538 ; CHECK-NEXT: vnsrl.wi v10, v8, 9
1539 ; CHECK-NEXT: vmv.v.v v8, v10
1542 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
1543 <vscale x 8 x i8> undef,
1544 <vscale x 8 x i16> %0,
1548 ret <vscale x 8 x i8> %a
1551 define <vscale x 8 x i8> @intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1552 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i8_nxv8i16_i8:
1553 ; CHECK: # %bb.0: # %entry
1554 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
1555 ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t
1558 %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
1559 <vscale x 8 x i8> %0,
1560 <vscale x 8 x i16> %1,
1562 <vscale x 8 x i1> %2,
1565 ret <vscale x 8 x i8> %a
1568 define <vscale x 16 x i8> @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, iXLen %1) nounwind {
1569 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8:
1570 ; CHECK: # %bb.0: # %entry
1571 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1572 ; CHECK-NEXT: vnsrl.wi v12, v8, 9
1573 ; CHECK-NEXT: vmv.v.v v8, v12
1576 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
1577 <vscale x 16 x i8> undef,
1578 <vscale x 16 x i16> %0,
1582 ret <vscale x 16 x i8> %a
1585 define <vscale x 16 x i8> @intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1586 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i8_nxv16i16_i8:
1587 ; CHECK: # %bb.0: # %entry
1588 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
1589 ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t
1592 %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
1593 <vscale x 16 x i8> %0,
1594 <vscale x 16 x i16> %1,
1596 <vscale x 16 x i1> %2,
1599 ret <vscale x 16 x i8> %a
1602 define <vscale x 32 x i8> @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, iXLen %1) nounwind {
1603 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8:
1604 ; CHECK: # %bb.0: # %entry
1605 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1606 ; CHECK-NEXT: vnsrl.wi v16, v8, 9
1607 ; CHECK-NEXT: vmv.v.v v8, v16
1610 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
1611 <vscale x 32 x i8> undef,
1612 <vscale x 32 x i16> %0,
1616 ret <vscale x 32 x i8> %a
1619 define <vscale x 32 x i8> @intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1620 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv32i8_nxv32i16_i8:
1621 ; CHECK: # %bb.0: # %entry
1622 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
1623 ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t
1626 %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
1627 <vscale x 32 x i8> %0,
1628 <vscale x 32 x i16> %1,
1630 <vscale x 32 x i1> %2,
1633 ret <vscale x 32 x i8> %a
1636 define <vscale x 1 x i16> @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, iXLen %1) nounwind {
1637 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16:
1638 ; CHECK: # %bb.0: # %entry
1639 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1640 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1643 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
1644 <vscale x 1 x i16> undef,
1645 <vscale x 1 x i32> %0,
1649 ret <vscale x 1 x i16> %a
1652 define <vscale x 1 x i16> @intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1653 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i16_nxv1i32_i16:
1654 ; CHECK: # %bb.0: # %entry
1655 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
1656 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1659 %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
1660 <vscale x 1 x i16> %0,
1661 <vscale x 1 x i32> %1,
1663 <vscale x 1 x i1> %2,
1666 ret <vscale x 1 x i16> %a
1669 define <vscale x 2 x i16> @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, iXLen %1) nounwind {
1670 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16:
1671 ; CHECK: # %bb.0: # %entry
1672 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1673 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1676 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
1677 <vscale x 2 x i16> undef,
1678 <vscale x 2 x i32> %0,
1682 ret <vscale x 2 x i16> %a
1685 define <vscale x 2 x i16> @intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1686 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i16_nxv2i32_i16:
1687 ; CHECK: # %bb.0: # %entry
1688 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
1689 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1692 %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
1693 <vscale x 2 x i16> %0,
1694 <vscale x 2 x i32> %1,
1696 <vscale x 2 x i1> %2,
1699 ret <vscale x 2 x i16> %a
1702 define <vscale x 4 x i16> @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, iXLen %1) nounwind {
1703 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16:
1704 ; CHECK: # %bb.0: # %entry
1705 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1706 ; CHECK-NEXT: vnsrl.wi v10, v8, 9
1707 ; CHECK-NEXT: vmv.v.v v8, v10
1710 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
1711 <vscale x 4 x i16> undef,
1712 <vscale x 4 x i32> %0,
1716 ret <vscale x 4 x i16> %a
1719 define <vscale x 4 x i16> @intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1720 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i16_nxv4i32_i16:
1721 ; CHECK: # %bb.0: # %entry
1722 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
1723 ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t
1726 %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
1727 <vscale x 4 x i16> %0,
1728 <vscale x 4 x i32> %1,
1730 <vscale x 4 x i1> %2,
1733 ret <vscale x 4 x i16> %a
1736 define <vscale x 8 x i16> @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, iXLen %1) nounwind {
1737 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16:
1738 ; CHECK: # %bb.0: # %entry
1739 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1740 ; CHECK-NEXT: vnsrl.wi v12, v8, 9
1741 ; CHECK-NEXT: vmv.v.v v8, v12
1744 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
1745 <vscale x 8 x i16> undef,
1746 <vscale x 8 x i32> %0,
1750 ret <vscale x 8 x i16> %a
1753 define <vscale x 8 x i16> @intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1754 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i16_nxv8i32_i16:
1755 ; CHECK: # %bb.0: # %entry
1756 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
1757 ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t
1760 %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
1761 <vscale x 8 x i16> %0,
1762 <vscale x 8 x i32> %1,
1764 <vscale x 8 x i1> %2,
1767 ret <vscale x 8 x i16> %a
1770 define <vscale x 16 x i16> @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, iXLen %1) nounwind {
1771 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16:
1772 ; CHECK: # %bb.0: # %entry
1773 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1774 ; CHECK-NEXT: vnsrl.wi v16, v8, 9
1775 ; CHECK-NEXT: vmv.v.v v8, v16
1778 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
1779 <vscale x 16 x i16> undef,
1780 <vscale x 16 x i32> %0,
1784 ret <vscale x 16 x i16> %a
1787 define <vscale x 16 x i16> @intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1788 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv16i16_nxv16i32_i16:
1789 ; CHECK: # %bb.0: # %entry
1790 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
1791 ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t
1794 %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
1795 <vscale x 16 x i16> %0,
1796 <vscale x 16 x i32> %1,
1798 <vscale x 16 x i1> %2,
1801 ret <vscale x 16 x i16> %a
1804 define <vscale x 1 x i32> @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, iXLen %1) nounwind {
1805 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32:
1806 ; CHECK: # %bb.0: # %entry
1807 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1808 ; CHECK-NEXT: vnsrl.wi v8, v8, 9
1811 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
1812 <vscale x 1 x i32> undef,
1813 <vscale x 1 x i64> %0,
1817 ret <vscale x 1 x i32> %a
1820 define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1821 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32:
1822 ; CHECK: # %bb.0: # %entry
1823 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1824 ; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
1827 %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
1828 <vscale x 1 x i32> %0,
1829 <vscale x 1 x i64> %1,
1831 <vscale x 1 x i1> %2,
1834 ret <vscale x 1 x i32> %a
1837 define <vscale x 2 x i32> @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, iXLen %1) nounwind {
1838 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32:
1839 ; CHECK: # %bb.0: # %entry
1840 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1841 ; CHECK-NEXT: vnsrl.wi v10, v8, 9
1842 ; CHECK-NEXT: vmv.v.v v8, v10
1845 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
1846 <vscale x 2 x i32> undef,
1847 <vscale x 2 x i64> %0,
1851 ret <vscale x 2 x i32> %a
1854 define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1855 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32:
1856 ; CHECK: # %bb.0: # %entry
1857 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1858 ; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t
1861 %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
1862 <vscale x 2 x i32> %0,
1863 <vscale x 2 x i64> %1,
1865 <vscale x 2 x i1> %2,
1868 ret <vscale x 2 x i32> %a
1871 define <vscale x 4 x i32> @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, iXLen %1) nounwind {
1872 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32:
1873 ; CHECK: # %bb.0: # %entry
1874 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1875 ; CHECK-NEXT: vnsrl.wi v12, v8, 9
1876 ; CHECK-NEXT: vmv.v.v v8, v12
1879 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
1880 <vscale x 4 x i32> undef,
1881 <vscale x 4 x i64> %0,
1885 ret <vscale x 4 x i32> %a
1888 define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1889 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32:
1890 ; CHECK: # %bb.0: # %entry
1891 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1892 ; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t
1895 %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
1896 <vscale x 4 x i32> %0,
1897 <vscale x 4 x i64> %1,
1899 <vscale x 4 x i1> %2,
1902 ret <vscale x 4 x i32> %a
1905 define <vscale x 8 x i32> @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, iXLen %1) nounwind {
1906 ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32:
1907 ; CHECK: # %bb.0: # %entry
1908 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1909 ; CHECK-NEXT: vnsrl.wi v16, v8, 9
1910 ; CHECK-NEXT: vmv.v.v v8, v16
1913 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
1914 <vscale x 8 x i32> undef,
1915 <vscale x 8 x i64> %0,
1919 ret <vscale x 8 x i32> %a
1922 define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1923 ; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32:
1924 ; CHECK: # %bb.0: # %entry
1925 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1926 ; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t
1929 %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
1930 <vscale x 8 x i32> %0,
1931 <vscale x 8 x i64> %1,
1933 <vscale x 8 x i1> %2,
1936 ret <vscale x 8 x i32> %a