1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
13 define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
17 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
18 ; CHECK-NEXT: vmv1r.v v8, v9
21 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
22 <vscale x 1 x i8> undef,
24 <vscale x 1 x i32> %1,
27 ret <vscale x 1 x i8> %a
30 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
38 define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
42 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
45 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
48 <vscale x 1 x i32> %2,
52 ret <vscale x 1 x i8> %a
55 declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
61 define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
62 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
65 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
66 ; CHECK-NEXT: vmv1r.v v8, v9
69 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
70 <vscale x 2 x i8> undef,
72 <vscale x 2 x i32> %1,
75 ret <vscale x 2 x i8> %a
78 declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
86 define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
87 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
90 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
93 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
96 <vscale x 2 x i32> %2,
100 ret <vscale x 2 x i8> %a
103 declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
109 define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
110 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32:
111 ; CHECK: # %bb.0: # %entry
112 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
113 ; CHECK-NEXT: vluxei32.v v10, (a0), v8
114 ; CHECK-NEXT: vmv1r.v v8, v10
117 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
118 <vscale x 4 x i8> undef,
120 <vscale x 4 x i32> %1,
123 ret <vscale x 4 x i8> %a
126 declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
134 define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
135 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
136 ; CHECK: # %bb.0: # %entry
137 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
138 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
141 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
142 <vscale x 4 x i8> %0,
144 <vscale x 4 x i32> %2,
145 <vscale x 4 x i1> %3,
148 ret <vscale x 4 x i8> %a
151 declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
157 define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
158 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32:
159 ; CHECK: # %bb.0: # %entry
160 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
161 ; CHECK-NEXT: vluxei32.v v12, (a0), v8
162 ; CHECK-NEXT: vmv.v.v v8, v12
165 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
166 <vscale x 8 x i8> undef,
168 <vscale x 8 x i32> %1,
171 ret <vscale x 8 x i8> %a
174 declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
182 define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
186 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
189 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
190 <vscale x 8 x i8> %0,
192 <vscale x 8 x i32> %2,
193 <vscale x 8 x i1> %3,
196 ret <vscale x 8 x i8> %a
199 declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
205 define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
206 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32:
207 ; CHECK: # %bb.0: # %entry
208 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
209 ; CHECK-NEXT: vluxei32.v v16, (a0), v8
210 ; CHECK-NEXT: vmv.v.v v8, v16
213 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
214 <vscale x 16 x i8> undef,
216 <vscale x 16 x i32> %1,
219 ret <vscale x 16 x i8> %a
222 declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
230 define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
231 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
234 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
237 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
238 <vscale x 16 x i8> %0,
240 <vscale x 16 x i32> %2,
241 <vscale x 16 x i1> %3,
244 ret <vscale x 16 x i8> %a
247 declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
253 define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
254 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32:
255 ; CHECK: # %bb.0: # %entry
256 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
257 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
258 ; CHECK-NEXT: vmv1r.v v8, v9
261 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
262 <vscale x 1 x i16> undef,
264 <vscale x 1 x i32> %1,
267 ret <vscale x 1 x i16> %a
270 declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
278 define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
279 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
280 ; CHECK: # %bb.0: # %entry
281 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
282 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
285 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
286 <vscale x 1 x i16> %0,
288 <vscale x 1 x i32> %2,
289 <vscale x 1 x i1> %3,
292 ret <vscale x 1 x i16> %a
295 declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
301 define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
302 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32:
303 ; CHECK: # %bb.0: # %entry
304 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
305 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
306 ; CHECK-NEXT: vmv1r.v v8, v9
309 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
310 <vscale x 2 x i16> undef,
312 <vscale x 2 x i32> %1,
315 ret <vscale x 2 x i16> %a
318 declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
326 define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
327 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
328 ; CHECK: # %bb.0: # %entry
329 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
330 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
333 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
334 <vscale x 2 x i16> %0,
336 <vscale x 2 x i32> %2,
337 <vscale x 2 x i1> %3,
340 ret <vscale x 2 x i16> %a
343 declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
349 define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
350 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32:
351 ; CHECK: # %bb.0: # %entry
352 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
353 ; CHECK-NEXT: vluxei32.v v10, (a0), v8
354 ; CHECK-NEXT: vmv.v.v v8, v10
357 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
358 <vscale x 4 x i16> undef,
360 <vscale x 4 x i32> %1,
363 ret <vscale x 4 x i16> %a
366 declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
374 define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
375 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
376 ; CHECK: # %bb.0: # %entry
377 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
378 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
381 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
382 <vscale x 4 x i16> %0,
384 <vscale x 4 x i32> %2,
385 <vscale x 4 x i1> %3,
388 ret <vscale x 4 x i16> %a
391 declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
397 define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
398 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32:
399 ; CHECK: # %bb.0: # %entry
400 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
401 ; CHECK-NEXT: vluxei32.v v12, (a0), v8
402 ; CHECK-NEXT: vmv.v.v v8, v12
405 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
406 <vscale x 8 x i16> undef,
408 <vscale x 8 x i32> %1,
411 ret <vscale x 8 x i16> %a
414 declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
422 define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
423 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
424 ; CHECK: # %bb.0: # %entry
425 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
426 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
429 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
430 <vscale x 8 x i16> %0,
432 <vscale x 8 x i32> %2,
433 <vscale x 8 x i1> %3,
436 ret <vscale x 8 x i16> %a
439 declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
445 define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
446 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32:
447 ; CHECK: # %bb.0: # %entry
448 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
449 ; CHECK-NEXT: vluxei32.v v16, (a0), v8
450 ; CHECK-NEXT: vmv.v.v v8, v16
453 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
454 <vscale x 16 x i16> undef,
456 <vscale x 16 x i32> %1,
459 ret <vscale x 16 x i16> %a
462 declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
470 define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
471 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
472 ; CHECK: # %bb.0: # %entry
473 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
474 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
477 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
478 <vscale x 16 x i16> %0,
480 <vscale x 16 x i32> %2,
481 <vscale x 16 x i1> %3,
484 ret <vscale x 16 x i16> %a
487 declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
493 define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
494 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32:
495 ; CHECK: # %bb.0: # %entry
496 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
497 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
500 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
501 <vscale x 1 x i32> undef,
503 <vscale x 1 x i32> %1,
506 ret <vscale x 1 x i32> %a
509 declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
517 define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
518 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
519 ; CHECK: # %bb.0: # %entry
520 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
521 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
524 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
525 <vscale x 1 x i32> %0,
527 <vscale x 1 x i32> %2,
528 <vscale x 1 x i1> %3,
531 ret <vscale x 1 x i32> %a
534 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
540 define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
541 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32:
542 ; CHECK: # %bb.0: # %entry
543 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
544 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
547 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
548 <vscale x 2 x i32> undef,
550 <vscale x 2 x i32> %1,
553 ret <vscale x 2 x i32> %a
556 declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
564 define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
565 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
566 ; CHECK: # %bb.0: # %entry
567 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
568 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
571 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
572 <vscale x 2 x i32> %0,
574 <vscale x 2 x i32> %2,
575 <vscale x 2 x i1> %3,
578 ret <vscale x 2 x i32> %a
581 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
587 define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
588 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32:
589 ; CHECK: # %bb.0: # %entry
590 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
591 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
594 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
595 <vscale x 4 x i32> undef,
597 <vscale x 4 x i32> %1,
600 ret <vscale x 4 x i32> %a
603 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
611 define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
612 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
613 ; CHECK: # %bb.0: # %entry
614 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
615 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
618 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
619 <vscale x 4 x i32> %0,
621 <vscale x 4 x i32> %2,
622 <vscale x 4 x i1> %3,
625 ret <vscale x 4 x i32> %a
628 declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
634 define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
635 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32:
636 ; CHECK: # %bb.0: # %entry
637 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
638 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
641 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
642 <vscale x 8 x i32> undef,
644 <vscale x 8 x i32> %1,
647 ret <vscale x 8 x i32> %a
650 declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
658 define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
659 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
660 ; CHECK: # %bb.0: # %entry
661 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
662 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
665 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
666 <vscale x 8 x i32> %0,
668 <vscale x 8 x i32> %2,
669 <vscale x 8 x i1> %3,
672 ret <vscale x 8 x i32> %a
675 declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
681 define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
682 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32:
683 ; CHECK: # %bb.0: # %entry
684 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
685 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
688 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
689 <vscale x 16 x i32> undef,
691 <vscale x 16 x i32> %1,
694 ret <vscale x 16 x i32> %a
697 declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
705 define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
706 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
707 ; CHECK: # %bb.0: # %entry
708 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
709 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
712 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
713 <vscale x 16 x i32> %0,
715 <vscale x 16 x i32> %2,
716 <vscale x 16 x i1> %3,
719 ret <vscale x 16 x i32> %a
722 declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
728 define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
729 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32:
730 ; CHECK: # %bb.0: # %entry
731 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
732 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
733 ; CHECK-NEXT: vmv.v.v v8, v9
736 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
737 <vscale x 1 x i64> undef,
739 <vscale x 1 x i32> %1,
742 ret <vscale x 1 x i64> %a
745 declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
753 define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
754 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
755 ; CHECK: # %bb.0: # %entry
756 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
757 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
760 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
761 <vscale x 1 x i64> %0,
763 <vscale x 1 x i32> %2,
764 <vscale x 1 x i1> %3,
767 ret <vscale x 1 x i64> %a
770 declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
776 define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
777 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32:
778 ; CHECK: # %bb.0: # %entry
779 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
780 ; CHECK-NEXT: vluxei32.v v10, (a0), v8
781 ; CHECK-NEXT: vmv.v.v v8, v10
784 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
785 <vscale x 2 x i64> undef,
787 <vscale x 2 x i32> %1,
790 ret <vscale x 2 x i64> %a
793 declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
801 define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
802 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
803 ; CHECK: # %bb.0: # %entry
804 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
805 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
808 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
809 <vscale x 2 x i64> %0,
811 <vscale x 2 x i32> %2,
812 <vscale x 2 x i1> %3,
815 ret <vscale x 2 x i64> %a
818 declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
824 define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
825 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32:
826 ; CHECK: # %bb.0: # %entry
827 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
828 ; CHECK-NEXT: vluxei32.v v12, (a0), v8
829 ; CHECK-NEXT: vmv.v.v v8, v12
832 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
833 <vscale x 4 x i64> undef,
835 <vscale x 4 x i32> %1,
838 ret <vscale x 4 x i64> %a
841 declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
849 define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
850 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
851 ; CHECK: # %bb.0: # %entry
852 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
853 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
856 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
857 <vscale x 4 x i64> %0,
859 <vscale x 4 x i32> %2,
860 <vscale x 4 x i1> %3,
863 ret <vscale x 4 x i64> %a
866 declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
872 define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
873 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32:
874 ; CHECK: # %bb.0: # %entry
875 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
876 ; CHECK-NEXT: vluxei32.v v16, (a0), v8
877 ; CHECK-NEXT: vmv.v.v v8, v16
880 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
881 <vscale x 8 x i64> undef,
883 <vscale x 8 x i32> %1,
886 ret <vscale x 8 x i64> %a
889 declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
897 define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
898 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
899 ; CHECK: # %bb.0: # %entry
900 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
901 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
904 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
905 <vscale x 8 x i64> %0,
907 <vscale x 8 x i32> %2,
908 <vscale x 8 x i1> %3,
911 ret <vscale x 8 x i64> %a
914 declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
920 define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
921 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32:
922 ; CHECK: # %bb.0: # %entry
923 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
924 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
925 ; CHECK-NEXT: vmv1r.v v8, v9
928 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
929 <vscale x 1 x half> undef,
931 <vscale x 1 x i32> %1,
934 ret <vscale x 1 x half> %a
937 declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
945 define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
946 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
947 ; CHECK: # %bb.0: # %entry
948 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
949 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
952 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
953 <vscale x 1 x half> %0,
955 <vscale x 1 x i32> %2,
956 <vscale x 1 x i1> %3,
959 ret <vscale x 1 x half> %a
962 declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
968 define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
969 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32:
970 ; CHECK: # %bb.0: # %entry
971 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
972 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
973 ; CHECK-NEXT: vmv1r.v v8, v9
976 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
977 <vscale x 2 x half> undef,
979 <vscale x 2 x i32> %1,
982 ret <vscale x 2 x half> %a
985 declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
993 define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
994 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
995 ; CHECK: # %bb.0: # %entry
996 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
997 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
1000 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
1001 <vscale x 2 x half> %0,
1003 <vscale x 2 x i32> %2,
1004 <vscale x 2 x i1> %3,
1007 ret <vscale x 2 x half> %a
1010 declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
1011 <vscale x 4 x half>,
1016 define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1017 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32:
1018 ; CHECK: # %bb.0: # %entry
1019 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1020 ; CHECK-NEXT: vluxei32.v v10, (a0), v8
1021 ; CHECK-NEXT: vmv.v.v v8, v10
1024 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
1025 <vscale x 4 x half> undef,
1027 <vscale x 4 x i32> %1,
1030 ret <vscale x 4 x half> %a
1033 declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
1034 <vscale x 4 x half>,
1041 define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1042 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
1043 ; CHECK: # %bb.0: # %entry
1044 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1045 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
1048 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
1049 <vscale x 4 x half> %0,
1051 <vscale x 4 x i32> %2,
1052 <vscale x 4 x i1> %3,
1055 ret <vscale x 4 x half> %a
1058 declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
1059 <vscale x 8 x half>,
1064 define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1065 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32:
1066 ; CHECK: # %bb.0: # %entry
1067 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1068 ; CHECK-NEXT: vluxei32.v v12, (a0), v8
1069 ; CHECK-NEXT: vmv.v.v v8, v12
1072 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
1073 <vscale x 8 x half> undef,
1075 <vscale x 8 x i32> %1,
1078 ret <vscale x 8 x half> %a
1081 declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
1082 <vscale x 8 x half>,
1089 define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1090 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
1091 ; CHECK: # %bb.0: # %entry
1092 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1093 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
1096 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
1097 <vscale x 8 x half> %0,
1099 <vscale x 8 x i32> %2,
1100 <vscale x 8 x i1> %3,
1103 ret <vscale x 8 x half> %a
1106 declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
1107 <vscale x 16 x half>,
1109 <vscale x 16 x i32>,
1112 define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1113 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32:
1114 ; CHECK: # %bb.0: # %entry
1115 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1116 ; CHECK-NEXT: vluxei32.v v16, (a0), v8
1117 ; CHECK-NEXT: vmv.v.v v8, v16
1120 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
1121 <vscale x 16 x half> undef,
1123 <vscale x 16 x i32> %1,
1126 ret <vscale x 16 x half> %a
1129 declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
1130 <vscale x 16 x half>,
1132 <vscale x 16 x i32>,
1137 define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1138 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
1139 ; CHECK: # %bb.0: # %entry
1140 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1141 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
1144 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
1145 <vscale x 16 x half> %0,
1147 <vscale x 16 x i32> %2,
1148 <vscale x 16 x i1> %3,
1151 ret <vscale x 16 x half> %a
1154 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
1155 <vscale x 1 x float>,
1160 define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1161 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32:
1162 ; CHECK: # %bb.0: # %entry
1163 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1164 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
1167 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
1168 <vscale x 1 x float> undef,
1170 <vscale x 1 x i32> %1,
1173 ret <vscale x 1 x float> %a
1176 declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
1177 <vscale x 1 x float>,
1184 define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1185 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
1186 ; CHECK: # %bb.0: # %entry
1187 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1188 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
1191 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
1192 <vscale x 1 x float> %0,
1194 <vscale x 1 x i32> %2,
1195 <vscale x 1 x i1> %3,
1198 ret <vscale x 1 x float> %a
1201 declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
1202 <vscale x 2 x float>,
1207 define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1208 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32:
1209 ; CHECK: # %bb.0: # %entry
1210 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1211 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
1214 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
1215 <vscale x 2 x float> undef,
1217 <vscale x 2 x i32> %1,
1220 ret <vscale x 2 x float> %a
1223 declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
1224 <vscale x 2 x float>,
1231 define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1232 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
1233 ; CHECK: # %bb.0: # %entry
1234 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1235 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
1238 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
1239 <vscale x 2 x float> %0,
1241 <vscale x 2 x i32> %2,
1242 <vscale x 2 x i1> %3,
1245 ret <vscale x 2 x float> %a
1248 declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
1249 <vscale x 4 x float>,
1254 define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1255 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32:
1256 ; CHECK: # %bb.0: # %entry
1257 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1258 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
1261 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
1262 <vscale x 4 x float> undef,
1264 <vscale x 4 x i32> %1,
1267 ret <vscale x 4 x float> %a
1270 declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
1271 <vscale x 4 x float>,
1278 define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1279 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
1280 ; CHECK: # %bb.0: # %entry
1281 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1282 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
1285 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
1286 <vscale x 4 x float> %0,
1288 <vscale x 4 x i32> %2,
1289 <vscale x 4 x i1> %3,
1292 ret <vscale x 4 x float> %a
1295 declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
1296 <vscale x 8 x float>,
1301 define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1302 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32:
1303 ; CHECK: # %bb.0: # %entry
1304 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1305 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
1308 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
1309 <vscale x 8 x float> undef,
1311 <vscale x 8 x i32> %1,
1314 ret <vscale x 8 x float> %a
1317 declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
1318 <vscale x 8 x float>,
1325 define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1326 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
1327 ; CHECK: # %bb.0: # %entry
1328 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1329 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
1332 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
1333 <vscale x 8 x float> %0,
1335 <vscale x 8 x i32> %2,
1336 <vscale x 8 x i1> %3,
1339 ret <vscale x 8 x float> %a
1342 declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
1343 <vscale x 16 x float>,
1345 <vscale x 16 x i32>,
1348 define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1349 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32:
1350 ; CHECK: # %bb.0: # %entry
1351 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1352 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
1355 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
1356 <vscale x 16 x float> undef,
1358 <vscale x 16 x i32> %1,
1361 ret <vscale x 16 x float> %a
1364 declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
1365 <vscale x 16 x float>,
1367 <vscale x 16 x i32>,
1372 define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1373 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
1374 ; CHECK: # %bb.0: # %entry
1375 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1376 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
1379 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
1380 <vscale x 16 x float> %0,
1382 <vscale x 16 x i32> %2,
1383 <vscale x 16 x i1> %3,
1386 ret <vscale x 16 x float> %a
1389 declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
1390 <vscale x 1 x double>,
1395 define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1396 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32:
1397 ; CHECK: # %bb.0: # %entry
1398 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1399 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
1400 ; CHECK-NEXT: vmv.v.v v8, v9
1403 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
1404 <vscale x 1 x double> undef,
1406 <vscale x 1 x i32> %1,
1409 ret <vscale x 1 x double> %a
1412 declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
1413 <vscale x 1 x double>,
1420 define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1421 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
1422 ; CHECK: # %bb.0: # %entry
1423 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1424 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
1427 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
1428 <vscale x 1 x double> %0,
1430 <vscale x 1 x i32> %2,
1431 <vscale x 1 x i1> %3,
1434 ret <vscale x 1 x double> %a
1437 declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
1438 <vscale x 2 x double>,
1443 define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1444 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32:
1445 ; CHECK: # %bb.0: # %entry
1446 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1447 ; CHECK-NEXT: vluxei32.v v10, (a0), v8
1448 ; CHECK-NEXT: vmv.v.v v8, v10
1451 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
1452 <vscale x 2 x double> undef,
1454 <vscale x 2 x i32> %1,
1457 ret <vscale x 2 x double> %a
1460 declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
1461 <vscale x 2 x double>,
1468 define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1469 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
1470 ; CHECK: # %bb.0: # %entry
1471 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1472 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
1475 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
1476 <vscale x 2 x double> %0,
1478 <vscale x 2 x i32> %2,
1479 <vscale x 2 x i1> %3,
1482 ret <vscale x 2 x double> %a
1485 declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
1486 <vscale x 4 x double>,
1491 define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1492 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32:
1493 ; CHECK: # %bb.0: # %entry
1494 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1495 ; CHECK-NEXT: vluxei32.v v12, (a0), v8
1496 ; CHECK-NEXT: vmv.v.v v8, v12
1499 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
1500 <vscale x 4 x double> undef,
1502 <vscale x 4 x i32> %1,
1505 ret <vscale x 4 x double> %a
1508 declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
1509 <vscale x 4 x double>,
1516 define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1517 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
1518 ; CHECK: # %bb.0: # %entry
1519 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
1520 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
1523 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
1524 <vscale x 4 x double> %0,
1526 <vscale x 4 x i32> %2,
1527 <vscale x 4 x i1> %3,
1530 ret <vscale x 4 x double> %a
1533 declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
1534 <vscale x 8 x double>,
1539 define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1540 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32:
1541 ; CHECK: # %bb.0: # %entry
1542 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1543 ; CHECK-NEXT: vluxei32.v v16, (a0), v8
1544 ; CHECK-NEXT: vmv.v.v v8, v16
1547 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
1548 <vscale x 8 x double> undef,
1550 <vscale x 8 x i32> %1,
1553 ret <vscale x 8 x double> %a
1556 declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
1557 <vscale x 8 x double>,
1564 define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1565 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
1566 ; CHECK: # %bb.0: # %entry
1567 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1568 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
1571 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
1572 <vscale x 8 x double> %0,
1574 <vscale x 8 x i32> %2,
1575 <vscale x 8 x i1> %3,
1578 ret <vscale x 8 x double> %a
1581 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
1587 define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1588 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16:
1589 ; CHECK: # %bb.0: # %entry
1590 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1591 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
1592 ; CHECK-NEXT: vmv1r.v v8, v9
1595 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
1596 <vscale x 1 x i8> undef,
1598 <vscale x 1 x i16> %1,
1601 ret <vscale x 1 x i8> %a
1604 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
1612 define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1613 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
1614 ; CHECK: # %bb.0: # %entry
1615 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1616 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
1619 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
1620 <vscale x 1 x i8> %0,
1622 <vscale x 1 x i16> %2,
1623 <vscale x 1 x i1> %3,
1626 ret <vscale x 1 x i8> %a
1629 declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
1635 define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1636 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16:
1637 ; CHECK: # %bb.0: # %entry
1638 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1639 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
1640 ; CHECK-NEXT: vmv1r.v v8, v9
1643 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
1644 <vscale x 2 x i8> undef,
1646 <vscale x 2 x i16> %1,
1649 ret <vscale x 2 x i8> %a
1652 declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
1660 define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1661 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
1662 ; CHECK: # %bb.0: # %entry
1663 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1664 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
1667 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
1668 <vscale x 2 x i8> %0,
1670 <vscale x 2 x i16> %2,
1671 <vscale x 2 x i1> %3,
1674 ret <vscale x 2 x i8> %a
1677 declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
1683 define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1684 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16:
1685 ; CHECK: # %bb.0: # %entry
1686 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1687 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
1688 ; CHECK-NEXT: vmv1r.v v8, v9
1691 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
1692 <vscale x 4 x i8> undef,
1694 <vscale x 4 x i16> %1,
1697 ret <vscale x 4 x i8> %a
1700 declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
1708 define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1709 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
1710 ; CHECK: # %bb.0: # %entry
1711 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1712 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
1715 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
1716 <vscale x 4 x i8> %0,
1718 <vscale x 4 x i16> %2,
1719 <vscale x 4 x i1> %3,
1722 ret <vscale x 4 x i8> %a
1725 declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
1731 define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
1732 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16:
1733 ; CHECK: # %bb.0: # %entry
1734 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1735 ; CHECK-NEXT: vluxei16.v v10, (a0), v8
1736 ; CHECK-NEXT: vmv.v.v v8, v10
1739 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
1740 <vscale x 8 x i8> undef,
1742 <vscale x 8 x i16> %1,
1745 ret <vscale x 8 x i8> %a
1748 declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
1756 define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1757 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
1758 ; CHECK: # %bb.0: # %entry
1759 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1760 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
1763 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
1764 <vscale x 8 x i8> %0,
1766 <vscale x 8 x i16> %2,
1767 <vscale x 8 x i1> %3,
1770 ret <vscale x 8 x i8> %a
1773 declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
1776 <vscale x 16 x i16>,
1779 define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
1780 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16:
1781 ; CHECK: # %bb.0: # %entry
1782 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1783 ; CHECK-NEXT: vluxei16.v v12, (a0), v8
1784 ; CHECK-NEXT: vmv.v.v v8, v12
1787 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
1788 <vscale x 16 x i8> undef,
1790 <vscale x 16 x i16> %1,
1793 ret <vscale x 16 x i8> %a
1796 declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
1799 <vscale x 16 x i16>,
1804 define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1805 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
1806 ; CHECK: # %bb.0: # %entry
1807 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1808 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
1811 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
1812 <vscale x 16 x i8> %0,
1814 <vscale x 16 x i16> %2,
1815 <vscale x 16 x i1> %3,
1818 ret <vscale x 16 x i8> %a
1821 declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
1824 <vscale x 32 x i16>,
1827 define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
1828 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16:
1829 ; CHECK: # %bb.0: # %entry
1830 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1831 ; CHECK-NEXT: vluxei16.v v16, (a0), v8
1832 ; CHECK-NEXT: vmv.v.v v8, v16
1835 %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
1836 <vscale x 32 x i8> undef,
1838 <vscale x 32 x i16> %1,
1841 ret <vscale x 32 x i8> %a
1844 declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
1847 <vscale x 32 x i16>,
1852 define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1853 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
1854 ; CHECK: # %bb.0: # %entry
1855 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1856 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
1859 %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
1860 <vscale x 32 x i8> %0,
1862 <vscale x 32 x i16> %2,
1863 <vscale x 32 x i1> %3,
1866 ret <vscale x 32 x i8> %a
1869 declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
1875 define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1876 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16:
1877 ; CHECK: # %bb.0: # %entry
1878 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1879 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
1882 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
1883 <vscale x 1 x i16> undef,
1885 <vscale x 1 x i16> %1,
1888 ret <vscale x 1 x i16> %a
1891 declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
1899 define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1900 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
1901 ; CHECK: # %bb.0: # %entry
1902 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1903 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
1906 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
1907 <vscale x 1 x i16> %0,
1909 <vscale x 1 x i16> %2,
1910 <vscale x 1 x i1> %3,
1913 ret <vscale x 1 x i16> %a
1916 declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
1922 define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1923 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16:
1924 ; CHECK: # %bb.0: # %entry
1925 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1926 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
1929 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
1930 <vscale x 2 x i16> undef,
1932 <vscale x 2 x i16> %1,
1935 ret <vscale x 2 x i16> %a
1938 declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
1946 define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1947 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
1948 ; CHECK: # %bb.0: # %entry
1949 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1950 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
1953 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
1954 <vscale x 2 x i16> %0,
1956 <vscale x 2 x i16> %2,
1957 <vscale x 2 x i1> %3,
1960 ret <vscale x 2 x i16> %a
1963 declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
1969 define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1970 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16:
1971 ; CHECK: # %bb.0: # %entry
1972 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1973 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
1976 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
1977 <vscale x 4 x i16> undef,
1979 <vscale x 4 x i16> %1,
1982 ret <vscale x 4 x i16> %a
1985 declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
1993 define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1994 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
1995 ; CHECK: # %bb.0: # %entry
1996 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1997 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2000 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
2001 <vscale x 4 x i16> %0,
2003 <vscale x 4 x i16> %2,
2004 <vscale x 4 x i1> %3,
2007 ret <vscale x 4 x i16> %a
2010 declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
2016 define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2017 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16:
2018 ; CHECK: # %bb.0: # %entry
2019 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2020 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2023 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
2024 <vscale x 8 x i16> undef,
2026 <vscale x 8 x i16> %1,
2029 ret <vscale x 8 x i16> %a
2032 declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
2040 define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2041 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
2042 ; CHECK: # %bb.0: # %entry
2043 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2044 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
2047 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
2048 <vscale x 8 x i16> %0,
2050 <vscale x 8 x i16> %2,
2051 <vscale x 8 x i1> %3,
2054 ret <vscale x 8 x i16> %a
2057 declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
2058 <vscale x 16 x i16>,
2060 <vscale x 16 x i16>,
2063 define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2064 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16:
2065 ; CHECK: # %bb.0: # %entry
2066 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2067 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2070 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
2071 <vscale x 16 x i16> undef,
2073 <vscale x 16 x i16> %1,
2076 ret <vscale x 16 x i16> %a
2079 declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
2080 <vscale x 16 x i16>,
2082 <vscale x 16 x i16>,
2087 define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2088 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
2089 ; CHECK: # %bb.0: # %entry
2090 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
2091 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
2094 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
2095 <vscale x 16 x i16> %0,
2097 <vscale x 16 x i16> %2,
2098 <vscale x 16 x i1> %3,
2101 ret <vscale x 16 x i16> %a
2104 declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
2105 <vscale x 32 x i16>,
2107 <vscale x 32 x i16>,
2110 define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2111 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16:
2112 ; CHECK: # %bb.0: # %entry
2113 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2114 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2117 %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
2118 <vscale x 32 x i16> undef,
2120 <vscale x 32 x i16> %1,
2123 ret <vscale x 32 x i16> %a
2126 declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
2127 <vscale x 32 x i16>,
2129 <vscale x 32 x i16>,
2134 define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2135 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
2136 ; CHECK: # %bb.0: # %entry
2137 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
2138 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
2141 %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
2142 <vscale x 32 x i16> %0,
2144 <vscale x 32 x i16> %2,
2145 <vscale x 32 x i1> %3,
2148 ret <vscale x 32 x i16> %a
2151 declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
2157 define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2158 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16:
2159 ; CHECK: # %bb.0: # %entry
2160 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2161 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
2162 ; CHECK-NEXT: vmv1r.v v8, v9
2165 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
2166 <vscale x 1 x i32> undef,
2168 <vscale x 1 x i16> %1,
2171 ret <vscale x 1 x i32> %a
2174 declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
2182 define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2183 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
2184 ; CHECK: # %bb.0: # %entry
2185 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2186 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2189 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
2190 <vscale x 1 x i32> %0,
2192 <vscale x 1 x i16> %2,
2193 <vscale x 1 x i1> %3,
2196 ret <vscale x 1 x i32> %a
2199 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
2205 define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2206 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16:
2207 ; CHECK: # %bb.0: # %entry
2208 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2209 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
2210 ; CHECK-NEXT: vmv.v.v v8, v9
2213 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
2214 <vscale x 2 x i32> undef,
2216 <vscale x 2 x i16> %1,
2219 ret <vscale x 2 x i32> %a
2222 declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
2230 define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2231 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
2232 ; CHECK: # %bb.0: # %entry
2233 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
2234 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2237 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
2238 <vscale x 2 x i32> %0,
2240 <vscale x 2 x i16> %2,
2241 <vscale x 2 x i1> %3,
2244 ret <vscale x 2 x i32> %a
2247 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
2253 define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2254 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16:
2255 ; CHECK: # %bb.0: # %entry
2256 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2257 ; CHECK-NEXT: vluxei16.v v10, (a0), v8
2258 ; CHECK-NEXT: vmv.v.v v8, v10
2261 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
2262 <vscale x 4 x i32> undef,
2264 <vscale x 4 x i16> %1,
2267 ret <vscale x 4 x i32> %a
2270 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
2278 define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2279 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
2280 ; CHECK: # %bb.0: # %entry
2281 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
2282 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
2285 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
2286 <vscale x 4 x i32> %0,
2288 <vscale x 4 x i16> %2,
2289 <vscale x 4 x i1> %3,
2292 ret <vscale x 4 x i32> %a
2295 declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
2301 define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2302 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16:
2303 ; CHECK: # %bb.0: # %entry
2304 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2305 ; CHECK-NEXT: vluxei16.v v12, (a0), v8
2306 ; CHECK-NEXT: vmv.v.v v8, v12
2309 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
2310 <vscale x 8 x i32> undef,
2312 <vscale x 8 x i16> %1,
2315 ret <vscale x 8 x i32> %a
2318 declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
2326 define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2327 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
2328 ; CHECK: # %bb.0: # %entry
2329 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
2330 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
2333 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
2334 <vscale x 8 x i32> %0,
2336 <vscale x 8 x i16> %2,
2337 <vscale x 8 x i1> %3,
2340 ret <vscale x 8 x i32> %a
2343 declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
2344 <vscale x 16 x i32>,
2346 <vscale x 16 x i16>,
2349 define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2350 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16:
2351 ; CHECK: # %bb.0: # %entry
2352 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2353 ; CHECK-NEXT: vluxei16.v v16, (a0), v8
2354 ; CHECK-NEXT: vmv.v.v v8, v16
2357 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
2358 <vscale x 16 x i32> undef,
2360 <vscale x 16 x i16> %1,
2363 ret <vscale x 16 x i32> %a
2366 declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
2367 <vscale x 16 x i32>,
2369 <vscale x 16 x i16>,
2374 define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2375 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
2376 ; CHECK: # %bb.0: # %entry
2377 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
2378 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
2381 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
2382 <vscale x 16 x i32> %0,
2384 <vscale x 16 x i16> %2,
2385 <vscale x 16 x i1> %3,
2388 ret <vscale x 16 x i32> %a
2391 declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
2397 define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2398 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16:
2399 ; CHECK: # %bb.0: # %entry
2400 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2401 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
2402 ; CHECK-NEXT: vmv.v.v v8, v9
2405 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
2406 <vscale x 1 x i64> undef,
2408 <vscale x 1 x i16> %1,
2411 ret <vscale x 1 x i64> %a
2414 declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
2422 define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2423 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
2424 ; CHECK: # %bb.0: # %entry
2425 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
2426 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2429 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
2430 <vscale x 1 x i64> %0,
2432 <vscale x 1 x i16> %2,
2433 <vscale x 1 x i1> %3,
2436 ret <vscale x 1 x i64> %a
2439 declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
2445 define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2446 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16:
2447 ; CHECK: # %bb.0: # %entry
2448 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2449 ; CHECK-NEXT: vluxei16.v v10, (a0), v8
2450 ; CHECK-NEXT: vmv.v.v v8, v10
2453 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
2454 <vscale x 2 x i64> undef,
2456 <vscale x 2 x i16> %1,
2459 ret <vscale x 2 x i64> %a
2462 declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
2470 define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2471 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
2472 ; CHECK: # %bb.0: # %entry
2473 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
2474 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
2477 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
2478 <vscale x 2 x i64> %0,
2480 <vscale x 2 x i16> %2,
2481 <vscale x 2 x i1> %3,
2484 ret <vscale x 2 x i64> %a
2487 declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
2493 define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2494 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16:
2495 ; CHECK: # %bb.0: # %entry
2496 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2497 ; CHECK-NEXT: vluxei16.v v12, (a0), v8
2498 ; CHECK-NEXT: vmv.v.v v8, v12
2501 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
2502 <vscale x 4 x i64> undef,
2504 <vscale x 4 x i16> %1,
2507 ret <vscale x 4 x i64> %a
2510 declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
2518 define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2519 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
2520 ; CHECK: # %bb.0: # %entry
2521 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2522 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
2525 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
2526 <vscale x 4 x i64> %0,
2528 <vscale x 4 x i16> %2,
2529 <vscale x 4 x i1> %3,
2532 ret <vscale x 4 x i64> %a
2535 declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
2541 define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2542 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16:
2543 ; CHECK: # %bb.0: # %entry
2544 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2545 ; CHECK-NEXT: vluxei16.v v16, (a0), v8
2546 ; CHECK-NEXT: vmv.v.v v8, v16
2549 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
2550 <vscale x 8 x i64> undef,
2552 <vscale x 8 x i16> %1,
2555 ret <vscale x 8 x i64> %a
2558 declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
2566 define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2567 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
2568 ; CHECK: # %bb.0: # %entry
2569 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2570 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
2573 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
2574 <vscale x 8 x i64> %0,
2576 <vscale x 8 x i16> %2,
2577 <vscale x 8 x i1> %3,
2580 ret <vscale x 8 x i64> %a
2583 declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
2584 <vscale x 1 x half>,
2589 define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2590 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16:
2591 ; CHECK: # %bb.0: # %entry
2592 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2593 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2596 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
2597 <vscale x 1 x half> undef,
2599 <vscale x 1 x i16> %1,
2602 ret <vscale x 1 x half> %a
2605 declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
2606 <vscale x 1 x half>,
2613 define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2614 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
2615 ; CHECK: # %bb.0: # %entry
2616 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2617 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2620 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
2621 <vscale x 1 x half> %0,
2623 <vscale x 1 x i16> %2,
2624 <vscale x 1 x i1> %3,
2627 ret <vscale x 1 x half> %a
2630 declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
2631 <vscale x 2 x half>,
2636 define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2637 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16:
2638 ; CHECK: # %bb.0: # %entry
2639 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2640 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2643 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
2644 <vscale x 2 x half> undef,
2646 <vscale x 2 x i16> %1,
2649 ret <vscale x 2 x half> %a
2652 declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
2653 <vscale x 2 x half>,
2660 define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2661 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
2662 ; CHECK: # %bb.0: # %entry
2663 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2664 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2667 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
2668 <vscale x 2 x half> %0,
2670 <vscale x 2 x i16> %2,
2671 <vscale x 2 x i1> %3,
2674 ret <vscale x 2 x half> %a
2677 declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
2678 <vscale x 4 x half>,
2683 define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2684 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16:
2685 ; CHECK: # %bb.0: # %entry
2686 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2687 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2690 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
2691 <vscale x 4 x half> undef,
2693 <vscale x 4 x i16> %1,
2696 ret <vscale x 4 x half> %a
2699 declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
2700 <vscale x 4 x half>,
2707 define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2708 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
2709 ; CHECK: # %bb.0: # %entry
2710 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2711 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2714 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
2715 <vscale x 4 x half> %0,
2717 <vscale x 4 x i16> %2,
2718 <vscale x 4 x i1> %3,
2721 ret <vscale x 4 x half> %a
2724 declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
2725 <vscale x 8 x half>,
2730 define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2731 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16:
2732 ; CHECK: # %bb.0: # %entry
2733 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2734 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2737 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
2738 <vscale x 8 x half> undef,
2740 <vscale x 8 x i16> %1,
2743 ret <vscale x 8 x half> %a
2746 declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
2747 <vscale x 8 x half>,
2754 define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2755 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
2756 ; CHECK: # %bb.0: # %entry
2757 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2758 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
2761 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
2762 <vscale x 8 x half> %0,
2764 <vscale x 8 x i16> %2,
2765 <vscale x 8 x i1> %3,
2768 ret <vscale x 8 x half> %a
2771 declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
2772 <vscale x 16 x half>,
2774 <vscale x 16 x i16>,
2777 define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2778 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16:
2779 ; CHECK: # %bb.0: # %entry
2780 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2781 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2784 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
2785 <vscale x 16 x half> undef,
2787 <vscale x 16 x i16> %1,
2790 ret <vscale x 16 x half> %a
2793 declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
2794 <vscale x 16 x half>,
2796 <vscale x 16 x i16>,
2801 define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2802 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
2803 ; CHECK: # %bb.0: # %entry
2804 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
2805 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
2808 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
2809 <vscale x 16 x half> %0,
2811 <vscale x 16 x i16> %2,
2812 <vscale x 16 x i1> %3,
2815 ret <vscale x 16 x half> %a
2818 declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
2819 <vscale x 32 x half>,
2821 <vscale x 32 x i16>,
2824 define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2825 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16:
2826 ; CHECK: # %bb.0: # %entry
2827 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2828 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2831 %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
2832 <vscale x 32 x half> undef,
2834 <vscale x 32 x i16> %1,
2837 ret <vscale x 32 x half> %a
2840 declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
2841 <vscale x 32 x half>,
2843 <vscale x 32 x i16>,
2848 define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2849 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
2850 ; CHECK: # %bb.0: # %entry
2851 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
2852 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
2855 %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
2856 <vscale x 32 x half> %0,
2858 <vscale x 32 x i16> %2,
2859 <vscale x 32 x i1> %3,
2862 ret <vscale x 32 x half> %a
2865 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
2866 <vscale x 1 x float>,
2871 define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2872 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16:
2873 ; CHECK: # %bb.0: # %entry
2874 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2875 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
2876 ; CHECK-NEXT: vmv1r.v v8, v9
2879 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
2880 <vscale x 1 x float> undef,
2882 <vscale x 1 x i16> %1,
2885 ret <vscale x 1 x float> %a
2888 declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
2889 <vscale x 1 x float>,
2896 define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2897 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
2898 ; CHECK: # %bb.0: # %entry
2899 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2900 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2903 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
2904 <vscale x 1 x float> %0,
2906 <vscale x 1 x i16> %2,
2907 <vscale x 1 x i1> %3,
2910 ret <vscale x 1 x float> %a
2913 declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
2914 <vscale x 2 x float>,
2919 define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2920 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16:
2921 ; CHECK: # %bb.0: # %entry
2922 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2923 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
2924 ; CHECK-NEXT: vmv.v.v v8, v9
2927 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
2928 <vscale x 2 x float> undef,
2930 <vscale x 2 x i16> %1,
2933 ret <vscale x 2 x float> %a
2936 declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
2937 <vscale x 2 x float>,
2944 define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2945 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
2946 ; CHECK: # %bb.0: # %entry
2947 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
2948 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2951 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
2952 <vscale x 2 x float> %0,
2954 <vscale x 2 x i16> %2,
2955 <vscale x 2 x i1> %3,
2958 ret <vscale x 2 x float> %a
2961 declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
2962 <vscale x 4 x float>,
2967 define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2968 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16:
2969 ; CHECK: # %bb.0: # %entry
2970 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2971 ; CHECK-NEXT: vluxei16.v v10, (a0), v8
2972 ; CHECK-NEXT: vmv.v.v v8, v10
2975 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
2976 <vscale x 4 x float> undef,
2978 <vscale x 4 x i16> %1,
2981 ret <vscale x 4 x float> %a
2984 declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
2985 <vscale x 4 x float>,
2992 define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2993 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
2994 ; CHECK: # %bb.0: # %entry
2995 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
2996 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
2999 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
3000 <vscale x 4 x float> %0,
3002 <vscale x 4 x i16> %2,
3003 <vscale x 4 x i1> %3,
3006 ret <vscale x 4 x float> %a
3009 declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
3010 <vscale x 8 x float>,
3015 define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
3016 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16:
3017 ; CHECK: # %bb.0: # %entry
3018 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3019 ; CHECK-NEXT: vluxei16.v v12, (a0), v8
3020 ; CHECK-NEXT: vmv.v.v v8, v12
3023 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
3024 <vscale x 8 x float> undef,
3026 <vscale x 8 x i16> %1,
3029 ret <vscale x 8 x float> %a
3032 declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
3033 <vscale x 8 x float>,
3040 define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3041 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
3042 ; CHECK: # %bb.0: # %entry
3043 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
3044 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
3047 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
3048 <vscale x 8 x float> %0,
3050 <vscale x 8 x i16> %2,
3051 <vscale x 8 x i1> %3,
3054 ret <vscale x 8 x float> %a
3057 declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
3058 <vscale x 16 x float>,
3060 <vscale x 16 x i16>,
3063 define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
3064 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16:
3065 ; CHECK: # %bb.0: # %entry
3066 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3067 ; CHECK-NEXT: vluxei16.v v16, (a0), v8
3068 ; CHECK-NEXT: vmv.v.v v8, v16
3071 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
3072 <vscale x 16 x float> undef,
3074 <vscale x 16 x i16> %1,
3077 ret <vscale x 16 x float> %a
3080 declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
3081 <vscale x 16 x float>,
3083 <vscale x 16 x i16>,
3088 define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3089 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
3090 ; CHECK: # %bb.0: # %entry
3091 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
3092 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
3095 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
3096 <vscale x 16 x float> %0,
3098 <vscale x 16 x i16> %2,
3099 <vscale x 16 x i1> %3,
3102 ret <vscale x 16 x float> %a
3105 declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
3106 <vscale x 1 x double>,
3111 define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
3112 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16:
3113 ; CHECK: # %bb.0: # %entry
3114 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3115 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
3116 ; CHECK-NEXT: vmv.v.v v8, v9
3119 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
3120 <vscale x 1 x double> undef,
3122 <vscale x 1 x i16> %1,
3125 ret <vscale x 1 x double> %a
3128 declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
3129 <vscale x 1 x double>,
3136 define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3137 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
3138 ; CHECK: # %bb.0: # %entry
3139 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3140 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
3143 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
3144 <vscale x 1 x double> %0,
3146 <vscale x 1 x i16> %2,
3147 <vscale x 1 x i1> %3,
3150 ret <vscale x 1 x double> %a
3153 declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
3154 <vscale x 2 x double>,
3159 define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
3160 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16:
3161 ; CHECK: # %bb.0: # %entry
3162 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3163 ; CHECK-NEXT: vluxei16.v v10, (a0), v8
3164 ; CHECK-NEXT: vmv.v.v v8, v10
3167 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
3168 <vscale x 2 x double> undef,
3170 <vscale x 2 x i16> %1,
3173 ret <vscale x 2 x double> %a
3176 declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
3177 <vscale x 2 x double>,
3184 define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3185 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
3186 ; CHECK: # %bb.0: # %entry
3187 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
3188 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
3191 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
3192 <vscale x 2 x double> %0,
3194 <vscale x 2 x i16> %2,
3195 <vscale x 2 x i1> %3,
3198 ret <vscale x 2 x double> %a
3201 declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
3202 <vscale x 4 x double>,
3207 define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
3208 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16:
3209 ; CHECK: # %bb.0: # %entry
3210 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
3211 ; CHECK-NEXT: vluxei16.v v12, (a0), v8
3212 ; CHECK-NEXT: vmv.v.v v8, v12
3215 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
3216 <vscale x 4 x double> undef,
3218 <vscale x 4 x i16> %1,
3221 ret <vscale x 4 x double> %a
3224 declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
3225 <vscale x 4 x double>,
3232 define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3233 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
3234 ; CHECK: # %bb.0: # %entry
3235 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
3236 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
3239 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
3240 <vscale x 4 x double> %0,
3242 <vscale x 4 x i16> %2,
3243 <vscale x 4 x i1> %3,
3246 ret <vscale x 4 x double> %a
3249 declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
3250 <vscale x 8 x double>,
3255 define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
3256 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16:
3257 ; CHECK: # %bb.0: # %entry
3258 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
3259 ; CHECK-NEXT: vluxei16.v v16, (a0), v8
3260 ; CHECK-NEXT: vmv.v.v v8, v16
3263 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
3264 <vscale x 8 x double> undef,
3266 <vscale x 8 x i16> %1,
3269 ret <vscale x 8 x double> %a
3272 declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
3273 <vscale x 8 x double>,
3280 define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3281 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
3282 ; CHECK: # %bb.0: # %entry
3283 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
3284 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
3287 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
3288 <vscale x 8 x double> %0,
3290 <vscale x 8 x i16> %2,
3291 <vscale x 8 x i1> %3,
3294 ret <vscale x 8 x double> %a
3297 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
3303 define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
3304 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8:
3305 ; CHECK: # %bb.0: # %entry
3306 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
3307 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3310 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
3311 <vscale x 1 x i8> undef,
3313 <vscale x 1 x i8> %1,
3316 ret <vscale x 1 x i8> %a
3319 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
3327 define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3328 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
3329 ; CHECK: # %bb.0: # %entry
3330 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
3331 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3334 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
3335 <vscale x 1 x i8> %0,
3337 <vscale x 1 x i8> %2,
3338 <vscale x 1 x i1> %3,
3341 ret <vscale x 1 x i8> %a
3344 declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
3350 define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
3351 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8:
3352 ; CHECK: # %bb.0: # %entry
3353 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
3354 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3357 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
3358 <vscale x 2 x i8> undef,
3360 <vscale x 2 x i8> %1,
3363 ret <vscale x 2 x i8> %a
3366 declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
3374 define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3375 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
3376 ; CHECK: # %bb.0: # %entry
3377 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
3378 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3381 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
3382 <vscale x 2 x i8> %0,
3384 <vscale x 2 x i8> %2,
3385 <vscale x 2 x i1> %3,
3388 ret <vscale x 2 x i8> %a
3391 declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
3397 define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
3398 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8:
3399 ; CHECK: # %bb.0: # %entry
3400 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3401 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3404 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
3405 <vscale x 4 x i8> undef,
3407 <vscale x 4 x i8> %1,
3410 ret <vscale x 4 x i8> %a
3413 declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
3421 define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3422 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
3423 ; CHECK: # %bb.0: # %entry
3424 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3425 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3428 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
3429 <vscale x 4 x i8> %0,
3431 <vscale x 4 x i8> %2,
3432 <vscale x 4 x i1> %3,
3435 ret <vscale x 4 x i8> %a
3438 declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
3444 define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
3445 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8:
3446 ; CHECK: # %bb.0: # %entry
3447 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3448 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3451 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
3452 <vscale x 8 x i8> undef,
3454 <vscale x 8 x i8> %1,
3457 ret <vscale x 8 x i8> %a
3460 declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
3468 define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3469 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
3470 ; CHECK: # %bb.0: # %entry
3471 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3472 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3475 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
3476 <vscale x 8 x i8> %0,
3478 <vscale x 8 x i8> %2,
3479 <vscale x 8 x i1> %3,
3482 ret <vscale x 8 x i8> %a
3485 declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
3491 define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
3492 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8:
3493 ; CHECK: # %bb.0: # %entry
3494 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
3495 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3498 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
3499 <vscale x 16 x i8> undef,
3501 <vscale x 16 x i8> %1,
3504 ret <vscale x 16 x i8> %a
3507 declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
3515 define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3516 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
3517 ; CHECK: # %bb.0: # %entry
3518 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
3519 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
3522 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
3523 <vscale x 16 x i8> %0,
3525 <vscale x 16 x i8> %2,
3526 <vscale x 16 x i1> %3,
3529 ret <vscale x 16 x i8> %a
3532 declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
3538 define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
3539 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8:
3540 ; CHECK: # %bb.0: # %entry
3541 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
3542 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3545 %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
3546 <vscale x 32 x i8> undef,
3548 <vscale x 32 x i8> %1,
3551 ret <vscale x 32 x i8> %a
3554 declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
3562 define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3563 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
3564 ; CHECK: # %bb.0: # %entry
3565 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
3566 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
3569 %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
3570 <vscale x 32 x i8> %0,
3572 <vscale x 32 x i8> %2,
3573 <vscale x 32 x i1> %3,
3576 ret <vscale x 32 x i8> %a
3579 declare <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
3585 define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
3586 ; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8:
3587 ; CHECK: # %bb.0: # %entry
3588 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
3589 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3592 %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
3593 <vscale x 64 x i8> undef,
3595 <vscale x 64 x i8> %1,
3598 ret <vscale x 64 x i8> %a
3601 declare <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
3609 define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
3610 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
3611 ; CHECK: # %bb.0: # %entry
3612 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
3613 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
3616 %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
3617 <vscale x 64 x i8> %0,
3619 <vscale x 64 x i8> %2,
3620 <vscale x 64 x i1> %3,
3623 ret <vscale x 64 x i8> %a
3626 declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
3632 define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
3633 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8:
3634 ; CHECK: # %bb.0: # %entry
3635 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3636 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
3637 ; CHECK-NEXT: vmv1r.v v8, v9
3640 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
3641 <vscale x 1 x i16> undef,
3643 <vscale x 1 x i8> %1,
3646 ret <vscale x 1 x i16> %a
3649 declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
3657 define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3658 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
3659 ; CHECK: # %bb.0: # %entry
3660 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3661 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3664 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
3665 <vscale x 1 x i16> %0,
3667 <vscale x 1 x i8> %2,
3668 <vscale x 1 x i1> %3,
3671 ret <vscale x 1 x i16> %a
3674 declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
3680 define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
3681 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8:
3682 ; CHECK: # %bb.0: # %entry
3683 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3684 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
3685 ; CHECK-NEXT: vmv1r.v v8, v9
3688 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
3689 <vscale x 2 x i16> undef,
3691 <vscale x 2 x i8> %1,
3694 ret <vscale x 2 x i16> %a
3697 declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
3705 define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3706 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
3707 ; CHECK: # %bb.0: # %entry
3708 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
3709 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3712 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
3713 <vscale x 2 x i16> %0,
3715 <vscale x 2 x i8> %2,
3716 <vscale x 2 x i1> %3,
3719 ret <vscale x 2 x i16> %a
3722 declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
3728 define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
3729 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8:
3730 ; CHECK: # %bb.0: # %entry
3731 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3732 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
3733 ; CHECK-NEXT: vmv.v.v v8, v9
3736 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
3737 <vscale x 4 x i16> undef,
3739 <vscale x 4 x i8> %1,
3742 ret <vscale x 4 x i16> %a
3745 declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
3753 define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3754 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
3755 ; CHECK: # %bb.0: # %entry
3756 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
3757 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3760 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
3761 <vscale x 4 x i16> %0,
3763 <vscale x 4 x i8> %2,
3764 <vscale x 4 x i1> %3,
3767 ret <vscale x 4 x i16> %a
3770 declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
3776 define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
3777 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8:
3778 ; CHECK: # %bb.0: # %entry
3779 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3780 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
3781 ; CHECK-NEXT: vmv.v.v v8, v10
3784 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
3785 <vscale x 8 x i16> undef,
3787 <vscale x 8 x i8> %1,
3790 ret <vscale x 8 x i16> %a
3793 declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
3801 define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3802 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
3803 ; CHECK: # %bb.0: # %entry
3804 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3805 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
3808 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
3809 <vscale x 8 x i16> %0,
3811 <vscale x 8 x i8> %2,
3812 <vscale x 8 x i1> %3,
3815 ret <vscale x 8 x i16> %a
3818 declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
3819 <vscale x 16 x i16>,
3824 define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
3825 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8:
3826 ; CHECK: # %bb.0: # %entry
3827 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3828 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
3829 ; CHECK-NEXT: vmv.v.v v8, v12
3832 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
3833 <vscale x 16 x i16> undef,
3835 <vscale x 16 x i8> %1,
3838 ret <vscale x 16 x i16> %a
3841 declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
3842 <vscale x 16 x i16>,
3849 define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3850 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
3851 ; CHECK: # %bb.0: # %entry
3852 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
3853 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
3856 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
3857 <vscale x 16 x i16> %0,
3859 <vscale x 16 x i8> %2,
3860 <vscale x 16 x i1> %3,
3863 ret <vscale x 16 x i16> %a
3866 declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
3867 <vscale x 32 x i16>,
3872 define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
3873 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8:
3874 ; CHECK: # %bb.0: # %entry
3875 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3876 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
3877 ; CHECK-NEXT: vmv.v.v v8, v16
3880 %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
3881 <vscale x 32 x i16> undef,
3883 <vscale x 32 x i8> %1,
3886 ret <vscale x 32 x i16> %a
3889 declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
3890 <vscale x 32 x i16>,
3897 define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3898 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
3899 ; CHECK: # %bb.0: # %entry
3900 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
3901 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
3904 %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
3905 <vscale x 32 x i16> %0,
3907 <vscale x 32 x i8> %2,
3908 <vscale x 32 x i1> %3,
3911 ret <vscale x 32 x i16> %a
3914 declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
3920 define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
3921 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8:
3922 ; CHECK: # %bb.0: # %entry
3923 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3924 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
3925 ; CHECK-NEXT: vmv1r.v v8, v9
3928 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
3929 <vscale x 1 x i32> undef,
3931 <vscale x 1 x i8> %1,
3934 ret <vscale x 1 x i32> %a
3937 declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
3945 define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3946 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
3947 ; CHECK: # %bb.0: # %entry
3948 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3949 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3952 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
3953 <vscale x 1 x i32> %0,
3955 <vscale x 1 x i8> %2,
3956 <vscale x 1 x i1> %3,
3959 ret <vscale x 1 x i32> %a
3962 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
3968 define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
3969 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8:
3970 ; CHECK: # %bb.0: # %entry
3971 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3972 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
3973 ; CHECK-NEXT: vmv.v.v v8, v9
3976 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
3977 <vscale x 2 x i32> undef,
3979 <vscale x 2 x i8> %1,
3982 ret <vscale x 2 x i32> %a
3985 declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
3993 define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3994 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
3995 ; CHECK: # %bb.0: # %entry
3996 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
3997 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4000 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
4001 <vscale x 2 x i32> %0,
4003 <vscale x 2 x i8> %2,
4004 <vscale x 2 x i1> %3,
4007 ret <vscale x 2 x i32> %a
4010 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
4016 define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4017 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8:
4018 ; CHECK: # %bb.0: # %entry
4019 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4020 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
4021 ; CHECK-NEXT: vmv.v.v v8, v10
4024 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
4025 <vscale x 4 x i32> undef,
4027 <vscale x 4 x i8> %1,
4030 ret <vscale x 4 x i32> %a
4033 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
4041 define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4042 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
4043 ; CHECK: # %bb.0: # %entry
4044 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
4045 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
4048 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
4049 <vscale x 4 x i32> %0,
4051 <vscale x 4 x i8> %2,
4052 <vscale x 4 x i1> %3,
4055 ret <vscale x 4 x i32> %a
4058 declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
4064 define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4065 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8:
4066 ; CHECK: # %bb.0: # %entry
4067 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4068 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
4069 ; CHECK-NEXT: vmv.v.v v8, v12
4072 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
4073 <vscale x 8 x i32> undef,
4075 <vscale x 8 x i8> %1,
4078 ret <vscale x 8 x i32> %a
4081 declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
4089 define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4090 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
4091 ; CHECK: # %bb.0: # %entry
4092 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
4093 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
4096 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
4097 <vscale x 8 x i32> %0,
4099 <vscale x 8 x i8> %2,
4100 <vscale x 8 x i1> %3,
4103 ret <vscale x 8 x i32> %a
4106 declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
4107 <vscale x 16 x i32>,
4112 define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
4113 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8:
4114 ; CHECK: # %bb.0: # %entry
4115 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
4116 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
4117 ; CHECK-NEXT: vmv.v.v v8, v16
4120 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
4121 <vscale x 16 x i32> undef,
4123 <vscale x 16 x i8> %1,
4126 ret <vscale x 16 x i32> %a
4129 declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
4130 <vscale x 16 x i32>,
4137 define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4138 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
4139 ; CHECK: # %bb.0: # %entry
4140 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
4141 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
4144 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
4145 <vscale x 16 x i32> %0,
4147 <vscale x 16 x i8> %2,
4148 <vscale x 16 x i1> %3,
4151 ret <vscale x 16 x i32> %a
4154 declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
4160 define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4161 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8:
4162 ; CHECK: # %bb.0: # %entry
4163 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
4164 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4165 ; CHECK-NEXT: vmv.v.v v8, v9
4168 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
4169 <vscale x 1 x i64> undef,
4171 <vscale x 1 x i8> %1,
4174 ret <vscale x 1 x i64> %a
4177 declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
4185 define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4186 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
4187 ; CHECK: # %bb.0: # %entry
4188 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
4189 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4192 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
4193 <vscale x 1 x i64> %0,
4195 <vscale x 1 x i8> %2,
4196 <vscale x 1 x i1> %3,
4199 ret <vscale x 1 x i64> %a
4202 declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
4208 define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4209 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8:
4210 ; CHECK: # %bb.0: # %entry
4211 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
4212 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
4213 ; CHECK-NEXT: vmv.v.v v8, v10
4216 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
4217 <vscale x 2 x i64> undef,
4219 <vscale x 2 x i8> %1,
4222 ret <vscale x 2 x i64> %a
4225 declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
4233 define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4234 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
4235 ; CHECK: # %bb.0: # %entry
4236 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
4237 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
4240 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
4241 <vscale x 2 x i64> %0,
4243 <vscale x 2 x i8> %2,
4244 <vscale x 2 x i1> %3,
4247 ret <vscale x 2 x i64> %a
4250 declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
4256 define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4257 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8:
4258 ; CHECK: # %bb.0: # %entry
4259 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
4260 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
4261 ; CHECK-NEXT: vmv.v.v v8, v12
4264 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
4265 <vscale x 4 x i64> undef,
4267 <vscale x 4 x i8> %1,
4270 ret <vscale x 4 x i64> %a
4273 declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
4281 define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4282 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
4283 ; CHECK: # %bb.0: # %entry
4284 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
4285 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
4288 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
4289 <vscale x 4 x i64> %0,
4291 <vscale x 4 x i8> %2,
4292 <vscale x 4 x i1> %3,
4295 ret <vscale x 4 x i64> %a
4298 declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
4304 define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4305 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8:
4306 ; CHECK: # %bb.0: # %entry
4307 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
4308 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
4309 ; CHECK-NEXT: vmv.v.v v8, v16
4312 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
4313 <vscale x 8 x i64> undef,
4315 <vscale x 8 x i8> %1,
4318 ret <vscale x 8 x i64> %a
4321 declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
4329 define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4330 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
4331 ; CHECK: # %bb.0: # %entry
4332 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
4333 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
4336 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
4337 <vscale x 8 x i64> %0,
4339 <vscale x 8 x i8> %2,
4340 <vscale x 8 x i1> %3,
4343 ret <vscale x 8 x i64> %a
4346 declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
4347 <vscale x 1 x half>,
4352 define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4353 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8:
4354 ; CHECK: # %bb.0: # %entry
4355 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4356 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4357 ; CHECK-NEXT: vmv1r.v v8, v9
4360 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
4361 <vscale x 1 x half> undef,
4363 <vscale x 1 x i8> %1,
4366 ret <vscale x 1 x half> %a
4369 declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
4370 <vscale x 1 x half>,
4377 define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4378 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
4379 ; CHECK: # %bb.0: # %entry
4380 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4381 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4384 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
4385 <vscale x 1 x half> %0,
4387 <vscale x 1 x i8> %2,
4388 <vscale x 1 x i1> %3,
4391 ret <vscale x 1 x half> %a
4394 declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
4395 <vscale x 2 x half>,
4400 define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4401 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8:
4402 ; CHECK: # %bb.0: # %entry
4403 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4404 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4405 ; CHECK-NEXT: vmv1r.v v8, v9
4408 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
4409 <vscale x 2 x half> undef,
4411 <vscale x 2 x i8> %1,
4414 ret <vscale x 2 x half> %a
4417 declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
4418 <vscale x 2 x half>,
4425 define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4426 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
4427 ; CHECK: # %bb.0: # %entry
4428 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
4429 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4432 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
4433 <vscale x 2 x half> %0,
4435 <vscale x 2 x i8> %2,
4436 <vscale x 2 x i1> %3,
4439 ret <vscale x 2 x half> %a
4442 declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
4443 <vscale x 4 x half>,
4448 define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4449 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8:
4450 ; CHECK: # %bb.0: # %entry
4451 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4452 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4453 ; CHECK-NEXT: vmv.v.v v8, v9
4456 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
4457 <vscale x 4 x half> undef,
4459 <vscale x 4 x i8> %1,
4462 ret <vscale x 4 x half> %a
4465 declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
4466 <vscale x 4 x half>,
4473 define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4474 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
4475 ; CHECK: # %bb.0: # %entry
4476 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
4477 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4480 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
4481 <vscale x 4 x half> %0,
4483 <vscale x 4 x i8> %2,
4484 <vscale x 4 x i1> %3,
4487 ret <vscale x 4 x half> %a
4490 declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
4491 <vscale x 8 x half>,
4496 define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4497 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8:
4498 ; CHECK: # %bb.0: # %entry
4499 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
4500 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
4501 ; CHECK-NEXT: vmv.v.v v8, v10
4504 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
4505 <vscale x 8 x half> undef,
4507 <vscale x 8 x i8> %1,
4510 ret <vscale x 8 x half> %a
4513 declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
4514 <vscale x 8 x half>,
4521 define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4522 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
4523 ; CHECK: # %bb.0: # %entry
4524 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
4525 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
4528 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
4529 <vscale x 8 x half> %0,
4531 <vscale x 8 x i8> %2,
4532 <vscale x 8 x i1> %3,
4535 ret <vscale x 8 x half> %a
4538 declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
4539 <vscale x 16 x half>,
4544 define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
4545 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8:
4546 ; CHECK: # %bb.0: # %entry
4547 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
4548 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
4549 ; CHECK-NEXT: vmv.v.v v8, v12
4552 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
4553 <vscale x 16 x half> undef,
4555 <vscale x 16 x i8> %1,
4558 ret <vscale x 16 x half> %a
4561 declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
4562 <vscale x 16 x half>,
4569 define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4570 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
4571 ; CHECK: # %bb.0: # %entry
4572 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
4573 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
4576 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
4577 <vscale x 16 x half> %0,
4579 <vscale x 16 x i8> %2,
4580 <vscale x 16 x i1> %3,
4583 ret <vscale x 16 x half> %a
4586 declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
4587 <vscale x 32 x half>,
4592 define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
4593 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8:
4594 ; CHECK: # %bb.0: # %entry
4595 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
4596 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
4597 ; CHECK-NEXT: vmv.v.v v8, v16
4600 %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
4601 <vscale x 32 x half> undef,
4603 <vscale x 32 x i8> %1,
4606 ret <vscale x 32 x half> %a
4609 declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
4610 <vscale x 32 x half>,
4617 define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
4618 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
4619 ; CHECK: # %bb.0: # %entry
4620 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
4621 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
4624 %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
4625 <vscale x 32 x half> %0,
4627 <vscale x 32 x i8> %2,
4628 <vscale x 32 x i1> %3,
4631 ret <vscale x 32 x half> %a
4634 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
4635 <vscale x 1 x float>,
4640 define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4641 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8:
4642 ; CHECK: # %bb.0: # %entry
4643 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4644 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4645 ; CHECK-NEXT: vmv1r.v v8, v9
4648 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
4649 <vscale x 1 x float> undef,
4651 <vscale x 1 x i8> %1,
4654 ret <vscale x 1 x float> %a
4657 declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
4658 <vscale x 1 x float>,
4665 define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4666 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
4667 ; CHECK: # %bb.0: # %entry
4668 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
4669 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4672 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
4673 <vscale x 1 x float> %0,
4675 <vscale x 1 x i8> %2,
4676 <vscale x 1 x i1> %3,
4679 ret <vscale x 1 x float> %a
4682 declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
4683 <vscale x 2 x float>,
4688 define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4689 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8:
4690 ; CHECK: # %bb.0: # %entry
4691 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4692 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4693 ; CHECK-NEXT: vmv.v.v v8, v9
4696 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
4697 <vscale x 2 x float> undef,
4699 <vscale x 2 x i8> %1,
4702 ret <vscale x 2 x float> %a
4705 declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
4706 <vscale x 2 x float>,
4713 define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4714 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
4715 ; CHECK: # %bb.0: # %entry
4716 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
4717 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4720 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
4721 <vscale x 2 x float> %0,
4723 <vscale x 2 x i8> %2,
4724 <vscale x 2 x i1> %3,
4727 ret <vscale x 2 x float> %a
4730 declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
4731 <vscale x 4 x float>,
4736 define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4737 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8:
4738 ; CHECK: # %bb.0: # %entry
4739 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4740 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
4741 ; CHECK-NEXT: vmv.v.v v8, v10
4744 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
4745 <vscale x 4 x float> undef,
4747 <vscale x 4 x i8> %1,
4750 ret <vscale x 4 x float> %a
4753 declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
4754 <vscale x 4 x float>,
4761 define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4762 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
4763 ; CHECK: # %bb.0: # %entry
4764 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
4765 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
4768 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
4769 <vscale x 4 x float> %0,
4771 <vscale x 4 x i8> %2,
4772 <vscale x 4 x i1> %3,
4775 ret <vscale x 4 x float> %a
4778 declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
4779 <vscale x 8 x float>,
4784 define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4785 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8:
4786 ; CHECK: # %bb.0: # %entry
4787 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4788 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
4789 ; CHECK-NEXT: vmv.v.v v8, v12
4792 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
4793 <vscale x 8 x float> undef,
4795 <vscale x 8 x i8> %1,
4798 ret <vscale x 8 x float> %a
4801 declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
4802 <vscale x 8 x float>,
4809 define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4810 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
4811 ; CHECK: # %bb.0: # %entry
4812 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
4813 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
4816 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
4817 <vscale x 8 x float> %0,
4819 <vscale x 8 x i8> %2,
4820 <vscale x 8 x i1> %3,
4823 ret <vscale x 8 x float> %a
4826 declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
4827 <vscale x 16 x float>,
4832 define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
4833 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8:
4834 ; CHECK: # %bb.0: # %entry
4835 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
4836 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
4837 ; CHECK-NEXT: vmv.v.v v8, v16
4840 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
4841 <vscale x 16 x float> undef,
4843 <vscale x 16 x i8> %1,
4846 ret <vscale x 16 x float> %a
4849 declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
4850 <vscale x 16 x float>,
4857 define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4858 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
4859 ; CHECK: # %bb.0: # %entry
4860 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
4861 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
4864 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
4865 <vscale x 16 x float> %0,
4867 <vscale x 16 x i8> %2,
4868 <vscale x 16 x i1> %3,
4871 ret <vscale x 16 x float> %a
4874 declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
4875 <vscale x 1 x double>,
4880 define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4881 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8:
4882 ; CHECK: # %bb.0: # %entry
4883 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
4884 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4885 ; CHECK-NEXT: vmv.v.v v8, v9
4888 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
4889 <vscale x 1 x double> undef,
4891 <vscale x 1 x i8> %1,
4894 ret <vscale x 1 x double> %a
4897 declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
4898 <vscale x 1 x double>,
4905 define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4906 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
4907 ; CHECK: # %bb.0: # %entry
4908 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
4909 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4912 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
4913 <vscale x 1 x double> %0,
4915 <vscale x 1 x i8> %2,
4916 <vscale x 1 x i1> %3,
4919 ret <vscale x 1 x double> %a
4922 declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
4923 <vscale x 2 x double>,
4928 define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4929 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8:
4930 ; CHECK: # %bb.0: # %entry
4931 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
4932 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
4933 ; CHECK-NEXT: vmv.v.v v8, v10
4936 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
4937 <vscale x 2 x double> undef,
4939 <vscale x 2 x i8> %1,
4942 ret <vscale x 2 x double> %a
4945 declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
4946 <vscale x 2 x double>,
4953 define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4954 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
4955 ; CHECK: # %bb.0: # %entry
4956 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
4957 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
4960 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
4961 <vscale x 2 x double> %0,
4963 <vscale x 2 x i8> %2,
4964 <vscale x 2 x i1> %3,
4967 ret <vscale x 2 x double> %a
4970 declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
4971 <vscale x 4 x double>,
4976 define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4977 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8:
4978 ; CHECK: # %bb.0: # %entry
4979 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
4980 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
4981 ; CHECK-NEXT: vmv.v.v v8, v12
4984 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
4985 <vscale x 4 x double> undef,
4987 <vscale x 4 x i8> %1,
4990 ret <vscale x 4 x double> %a
4993 declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
4994 <vscale x 4 x double>,
5001 define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
5002 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
5003 ; CHECK: # %bb.0: # %entry
5004 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
5005 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
5008 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
5009 <vscale x 4 x double> %0,
5011 <vscale x 4 x i8> %2,
5012 <vscale x 4 x i1> %3,
5015 ret <vscale x 4 x double> %a
5018 declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
5019 <vscale x 8 x double>,
5024 define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
5025 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8:
5026 ; CHECK: # %bb.0: # %entry
5027 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
5028 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
5029 ; CHECK-NEXT: vmv.v.v v8, v16
5032 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
5033 <vscale x 8 x double> undef,
5035 <vscale x 8 x i8> %1,
5038 ret <vscale x 8 x double> %a
5041 declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
5042 <vscale x 8 x double>,
5049 define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5050 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
5051 ; CHECK: # %bb.0: # %entry
5052 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
5053 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
5056 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
5057 <vscale x 8 x double> %0,
5059 <vscale x 8 x i8> %2,
5060 <vscale x 8 x i1> %3,
5063 ret <vscale x 8 x double> %a