1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
13 define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
17 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
18 ; CHECK-NEXT: vmv1r.v v8, v9
21 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
22 <vscale x 1 x i8> undef,
24 <vscale x 1 x i32> %1,
27 ret <vscale x 1 x i8> %a
30 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
38 define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
42 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
45 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
48 <vscale x 1 x i32> %2,
52 ret <vscale x 1 x i8> %a
55 declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
61 define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
62 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
65 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
66 ; CHECK-NEXT: vmv1r.v v8, v9
69 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
70 <vscale x 2 x i8> undef,
72 <vscale x 2 x i32> %1,
75 ret <vscale x 2 x i8> %a
78 declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
86 define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
87 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
90 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
93 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
96 <vscale x 2 x i32> %2,
100 ret <vscale x 2 x i8> %a
103 declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
109 define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
110 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32:
111 ; CHECK: # %bb.0: # %entry
112 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
113 ; CHECK-NEXT: vluxei32.v v10, (a0), v8
114 ; CHECK-NEXT: vmv1r.v v8, v10
117 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
118 <vscale x 4 x i8> undef,
120 <vscale x 4 x i32> %1,
123 ret <vscale x 4 x i8> %a
126 declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
134 define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
135 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
136 ; CHECK: # %bb.0: # %entry
137 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
138 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
141 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
142 <vscale x 4 x i8> %0,
144 <vscale x 4 x i32> %2,
145 <vscale x 4 x i1> %3,
148 ret <vscale x 4 x i8> %a
151 declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
157 define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
158 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32:
159 ; CHECK: # %bb.0: # %entry
160 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
161 ; CHECK-NEXT: vluxei32.v v12, (a0), v8
162 ; CHECK-NEXT: vmv.v.v v8, v12
165 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
166 <vscale x 8 x i8> undef,
168 <vscale x 8 x i32> %1,
171 ret <vscale x 8 x i8> %a
174 declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
182 define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
186 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
189 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
190 <vscale x 8 x i8> %0,
192 <vscale x 8 x i32> %2,
193 <vscale x 8 x i1> %3,
196 ret <vscale x 8 x i8> %a
199 declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
205 define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
206 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32:
207 ; CHECK: # %bb.0: # %entry
208 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
209 ; CHECK-NEXT: vluxei32.v v16, (a0), v8
210 ; CHECK-NEXT: vmv.v.v v8, v16
213 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
214 <vscale x 16 x i8> undef,
216 <vscale x 16 x i32> %1,
219 ret <vscale x 16 x i8> %a
222 declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
230 define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
231 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
234 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
237 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
238 <vscale x 16 x i8> %0,
240 <vscale x 16 x i32> %2,
241 <vscale x 16 x i1> %3,
244 ret <vscale x 16 x i8> %a
247 declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
253 define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
254 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32:
255 ; CHECK: # %bb.0: # %entry
256 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
257 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
258 ; CHECK-NEXT: vmv1r.v v8, v9
261 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
262 <vscale x 1 x i16> undef,
264 <vscale x 1 x i32> %1,
267 ret <vscale x 1 x i16> %a
270 declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
278 define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
279 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
280 ; CHECK: # %bb.0: # %entry
281 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
282 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
285 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
286 <vscale x 1 x i16> %0,
288 <vscale x 1 x i32> %2,
289 <vscale x 1 x i1> %3,
292 ret <vscale x 1 x i16> %a
295 declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
301 define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
302 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32:
303 ; CHECK: # %bb.0: # %entry
304 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
305 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
306 ; CHECK-NEXT: vmv1r.v v8, v9
309 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
310 <vscale x 2 x i16> undef,
312 <vscale x 2 x i32> %1,
315 ret <vscale x 2 x i16> %a
318 declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
326 define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
327 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
328 ; CHECK: # %bb.0: # %entry
329 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
330 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
333 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
334 <vscale x 2 x i16> %0,
336 <vscale x 2 x i32> %2,
337 <vscale x 2 x i1> %3,
340 ret <vscale x 2 x i16> %a
343 declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
349 define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
350 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32:
351 ; CHECK: # %bb.0: # %entry
352 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
353 ; CHECK-NEXT: vluxei32.v v10, (a0), v8
354 ; CHECK-NEXT: vmv.v.v v8, v10
357 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
358 <vscale x 4 x i16> undef,
360 <vscale x 4 x i32> %1,
363 ret <vscale x 4 x i16> %a
366 declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
374 define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
375 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
376 ; CHECK: # %bb.0: # %entry
377 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
378 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
381 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
382 <vscale x 4 x i16> %0,
384 <vscale x 4 x i32> %2,
385 <vscale x 4 x i1> %3,
388 ret <vscale x 4 x i16> %a
391 declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
397 define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
398 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32:
399 ; CHECK: # %bb.0: # %entry
400 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
401 ; CHECK-NEXT: vluxei32.v v12, (a0), v8
402 ; CHECK-NEXT: vmv.v.v v8, v12
405 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
406 <vscale x 8 x i16> undef,
408 <vscale x 8 x i32> %1,
411 ret <vscale x 8 x i16> %a
414 declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
422 define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
423 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
424 ; CHECK: # %bb.0: # %entry
425 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
426 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
429 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
430 <vscale x 8 x i16> %0,
432 <vscale x 8 x i32> %2,
433 <vscale x 8 x i1> %3,
436 ret <vscale x 8 x i16> %a
439 declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
445 define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
446 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32:
447 ; CHECK: # %bb.0: # %entry
448 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
449 ; CHECK-NEXT: vluxei32.v v16, (a0), v8
450 ; CHECK-NEXT: vmv.v.v v8, v16
453 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
454 <vscale x 16 x i16> undef,
456 <vscale x 16 x i32> %1,
459 ret <vscale x 16 x i16> %a
462 declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
470 define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
471 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
472 ; CHECK: # %bb.0: # %entry
473 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
474 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
477 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
478 <vscale x 16 x i16> %0,
480 <vscale x 16 x i32> %2,
481 <vscale x 16 x i1> %3,
484 ret <vscale x 16 x i16> %a
487 declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
493 define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
494 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32:
495 ; CHECK: # %bb.0: # %entry
496 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
497 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
500 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
501 <vscale x 1 x i32> undef,
503 <vscale x 1 x i32> %1,
506 ret <vscale x 1 x i32> %a
509 declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
517 define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
518 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
519 ; CHECK: # %bb.0: # %entry
520 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
521 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
524 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
525 <vscale x 1 x i32> %0,
527 <vscale x 1 x i32> %2,
528 <vscale x 1 x i1> %3,
531 ret <vscale x 1 x i32> %a
534 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
540 define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
541 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32:
542 ; CHECK: # %bb.0: # %entry
543 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
544 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
547 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
548 <vscale x 2 x i32> undef,
550 <vscale x 2 x i32> %1,
553 ret <vscale x 2 x i32> %a
556 declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
564 define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
565 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
566 ; CHECK: # %bb.0: # %entry
567 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
568 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
571 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
572 <vscale x 2 x i32> %0,
574 <vscale x 2 x i32> %2,
575 <vscale x 2 x i1> %3,
578 ret <vscale x 2 x i32> %a
581 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
587 define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
588 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32:
589 ; CHECK: # %bb.0: # %entry
590 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
591 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
594 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
595 <vscale x 4 x i32> undef,
597 <vscale x 4 x i32> %1,
600 ret <vscale x 4 x i32> %a
603 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
611 define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
612 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
613 ; CHECK: # %bb.0: # %entry
614 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
615 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
618 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
619 <vscale x 4 x i32> %0,
621 <vscale x 4 x i32> %2,
622 <vscale x 4 x i1> %3,
625 ret <vscale x 4 x i32> %a
628 declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
634 define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
635 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32:
636 ; CHECK: # %bb.0: # %entry
637 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
638 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
641 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
642 <vscale x 8 x i32> undef,
644 <vscale x 8 x i32> %1,
647 ret <vscale x 8 x i32> %a
650 declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
658 define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
659 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
660 ; CHECK: # %bb.0: # %entry
661 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
662 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
665 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
666 <vscale x 8 x i32> %0,
668 <vscale x 8 x i32> %2,
669 <vscale x 8 x i1> %3,
672 ret <vscale x 8 x i32> %a
675 declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
681 define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
682 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32:
683 ; CHECK: # %bb.0: # %entry
684 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
685 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
688 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
689 <vscale x 16 x i32> undef,
691 <vscale x 16 x i32> %1,
694 ret <vscale x 16 x i32> %a
697 declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
705 define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
706 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
707 ; CHECK: # %bb.0: # %entry
708 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
709 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
712 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
713 <vscale x 16 x i32> %0,
715 <vscale x 16 x i32> %2,
716 <vscale x 16 x i1> %3,
719 ret <vscale x 16 x i32> %a
722 declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
728 define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
729 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32:
730 ; CHECK: # %bb.0: # %entry
731 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
732 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
733 ; CHECK-NEXT: vmv.v.v v8, v9
736 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
737 <vscale x 1 x i64> undef,
739 <vscale x 1 x i32> %1,
742 ret <vscale x 1 x i64> %a
745 declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
753 define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
754 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
755 ; CHECK: # %bb.0: # %entry
756 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
757 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
760 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
761 <vscale x 1 x i64> %0,
763 <vscale x 1 x i32> %2,
764 <vscale x 1 x i1> %3,
767 ret <vscale x 1 x i64> %a
770 declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
776 define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
777 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32:
778 ; CHECK: # %bb.0: # %entry
779 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
780 ; CHECK-NEXT: vluxei32.v v10, (a0), v8
781 ; CHECK-NEXT: vmv.v.v v8, v10
784 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
785 <vscale x 2 x i64> undef,
787 <vscale x 2 x i32> %1,
790 ret <vscale x 2 x i64> %a
793 declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
801 define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
802 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
803 ; CHECK: # %bb.0: # %entry
804 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
805 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
808 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
809 <vscale x 2 x i64> %0,
811 <vscale x 2 x i32> %2,
812 <vscale x 2 x i1> %3,
815 ret <vscale x 2 x i64> %a
818 declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
824 define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
825 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32:
826 ; CHECK: # %bb.0: # %entry
827 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
828 ; CHECK-NEXT: vluxei32.v v12, (a0), v8
829 ; CHECK-NEXT: vmv.v.v v8, v12
832 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
833 <vscale x 4 x i64> undef,
835 <vscale x 4 x i32> %1,
838 ret <vscale x 4 x i64> %a
841 declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
849 define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
850 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
851 ; CHECK: # %bb.0: # %entry
852 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
853 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
856 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
857 <vscale x 4 x i64> %0,
859 <vscale x 4 x i32> %2,
860 <vscale x 4 x i1> %3,
863 ret <vscale x 4 x i64> %a
866 declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
872 define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
873 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32:
874 ; CHECK: # %bb.0: # %entry
875 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
876 ; CHECK-NEXT: vluxei32.v v16, (a0), v8
877 ; CHECK-NEXT: vmv.v.v v8, v16
880 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
881 <vscale x 8 x i64> undef,
883 <vscale x 8 x i32> %1,
886 ret <vscale x 8 x i64> %a
889 declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
897 define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
898 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
899 ; CHECK: # %bb.0: # %entry
900 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
901 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
904 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
905 <vscale x 8 x i64> %0,
907 <vscale x 8 x i32> %2,
908 <vscale x 8 x i1> %3,
911 ret <vscale x 8 x i64> %a
914 declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
920 define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
921 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32:
922 ; CHECK: # %bb.0: # %entry
923 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
924 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
925 ; CHECK-NEXT: vmv1r.v v8, v9
928 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
929 <vscale x 1 x half> undef,
931 <vscale x 1 x i32> %1,
934 ret <vscale x 1 x half> %a
937 declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
945 define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
946 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
947 ; CHECK: # %bb.0: # %entry
948 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
949 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
952 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
953 <vscale x 1 x half> %0,
955 <vscale x 1 x i32> %2,
956 <vscale x 1 x i1> %3,
959 ret <vscale x 1 x half> %a
962 declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
968 define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
969 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32:
970 ; CHECK: # %bb.0: # %entry
971 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
972 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
973 ; CHECK-NEXT: vmv1r.v v8, v9
976 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
977 <vscale x 2 x half> undef,
979 <vscale x 2 x i32> %1,
982 ret <vscale x 2 x half> %a
985 declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
993 define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
994 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
995 ; CHECK: # %bb.0: # %entry
996 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
997 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
1000 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
1001 <vscale x 2 x half> %0,
1003 <vscale x 2 x i32> %2,
1004 <vscale x 2 x i1> %3,
1007 ret <vscale x 2 x half> %a
1010 declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
1011 <vscale x 4 x half>,
1016 define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1017 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32:
1018 ; CHECK: # %bb.0: # %entry
1019 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1020 ; CHECK-NEXT: vluxei32.v v10, (a0), v8
1021 ; CHECK-NEXT: vmv.v.v v8, v10
1024 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
1025 <vscale x 4 x half> undef,
1027 <vscale x 4 x i32> %1,
1030 ret <vscale x 4 x half> %a
1033 declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
1034 <vscale x 4 x half>,
1041 define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1042 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
1043 ; CHECK: # %bb.0: # %entry
1044 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1045 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
1048 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
1049 <vscale x 4 x half> %0,
1051 <vscale x 4 x i32> %2,
1052 <vscale x 4 x i1> %3,
1055 ret <vscale x 4 x half> %a
1058 declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
1059 <vscale x 8 x half>,
1064 define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1065 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32:
1066 ; CHECK: # %bb.0: # %entry
1067 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1068 ; CHECK-NEXT: vluxei32.v v12, (a0), v8
1069 ; CHECK-NEXT: vmv.v.v v8, v12
1072 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
1073 <vscale x 8 x half> undef,
1075 <vscale x 8 x i32> %1,
1078 ret <vscale x 8 x half> %a
1081 declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
1082 <vscale x 8 x half>,
1089 define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1090 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
1091 ; CHECK: # %bb.0: # %entry
1092 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1093 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
1096 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
1097 <vscale x 8 x half> %0,
1099 <vscale x 8 x i32> %2,
1100 <vscale x 8 x i1> %3,
1103 ret <vscale x 8 x half> %a
1106 declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
1107 <vscale x 16 x half>,
1109 <vscale x 16 x i32>,
1112 define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1113 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32:
1114 ; CHECK: # %bb.0: # %entry
1115 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1116 ; CHECK-NEXT: vluxei32.v v16, (a0), v8
1117 ; CHECK-NEXT: vmv.v.v v8, v16
1120 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
1121 <vscale x 16 x half> undef,
1123 <vscale x 16 x i32> %1,
1126 ret <vscale x 16 x half> %a
1129 declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
1130 <vscale x 16 x half>,
1132 <vscale x 16 x i32>,
1137 define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1138 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
1139 ; CHECK: # %bb.0: # %entry
1140 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1141 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
1144 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
1145 <vscale x 16 x half> %0,
1147 <vscale x 16 x i32> %2,
1148 <vscale x 16 x i1> %3,
1151 ret <vscale x 16 x half> %a
1154 declare <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.nxv1i32(
1155 <vscale x 1 x bfloat>,
1160 define <vscale x 1 x bfloat> @intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1161 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i32:
1162 ; CHECK: # %bb.0: # %entry
1163 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1164 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
1165 ; CHECK-NEXT: vmv1r.v v8, v9
1168 %a = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.nxv1i32(
1169 <vscale x 1 x bfloat> undef,
1171 <vscale x 1 x i32> %1,
1174 ret <vscale x 1 x bfloat> %a
1177 declare <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i32(
1178 <vscale x 1 x bfloat>,
1185 define <vscale x 1 x bfloat> @intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1186 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32:
1187 ; CHECK: # %bb.0: # %entry
1188 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1189 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
1192 %a = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i32(
1193 <vscale x 1 x bfloat> %0,
1195 <vscale x 1 x i32> %2,
1196 <vscale x 1 x i1> %3,
1199 ret <vscale x 1 x bfloat> %a
1202 declare <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.nxv2i32(
1203 <vscale x 2 x bfloat>,
1208 define <vscale x 2 x bfloat> @intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1209 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i32:
1210 ; CHECK: # %bb.0: # %entry
1211 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1212 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
1213 ; CHECK-NEXT: vmv1r.v v8, v9
1216 %a = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.nxv2i32(
1217 <vscale x 2 x bfloat> undef,
1219 <vscale x 2 x i32> %1,
1222 ret <vscale x 2 x bfloat> %a
1225 declare <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i32(
1226 <vscale x 2 x bfloat>,
1233 define <vscale x 2 x bfloat> @intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1234 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32:
1235 ; CHECK: # %bb.0: # %entry
1236 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1237 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
1240 %a = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i32(
1241 <vscale x 2 x bfloat> %0,
1243 <vscale x 2 x i32> %2,
1244 <vscale x 2 x i1> %3,
1247 ret <vscale x 2 x bfloat> %a
1250 declare <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.nxv4i32(
1251 <vscale x 4 x bfloat>,
1256 define <vscale x 4 x bfloat> @intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1257 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i32:
1258 ; CHECK: # %bb.0: # %entry
1259 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1260 ; CHECK-NEXT: vluxei32.v v10, (a0), v8
1261 ; CHECK-NEXT: vmv.v.v v8, v10
1264 %a = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.nxv4i32(
1265 <vscale x 4 x bfloat> undef,
1267 <vscale x 4 x i32> %1,
1270 ret <vscale x 4 x bfloat> %a
1273 declare <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i32(
1274 <vscale x 4 x bfloat>,
1281 define <vscale x 4 x bfloat> @intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1282 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32:
1283 ; CHECK: # %bb.0: # %entry
1284 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1285 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
1288 %a = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i32(
1289 <vscale x 4 x bfloat> %0,
1291 <vscale x 4 x i32> %2,
1292 <vscale x 4 x i1> %3,
1295 ret <vscale x 4 x bfloat> %a
1298 declare <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.nxv8i32(
1299 <vscale x 8 x bfloat>,
1304 define <vscale x 8 x bfloat> @intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1305 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i32:
1306 ; CHECK: # %bb.0: # %entry
1307 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1308 ; CHECK-NEXT: vluxei32.v v12, (a0), v8
1309 ; CHECK-NEXT: vmv.v.v v8, v12
1312 %a = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.nxv8i32(
1313 <vscale x 8 x bfloat> undef,
1315 <vscale x 8 x i32> %1,
1318 ret <vscale x 8 x bfloat> %a
1321 declare <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i32(
1322 <vscale x 8 x bfloat>,
1329 define <vscale x 8 x bfloat> @intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1330 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32:
1331 ; CHECK: # %bb.0: # %entry
1332 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1333 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
1336 %a = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i32(
1337 <vscale x 8 x bfloat> %0,
1339 <vscale x 8 x i32> %2,
1340 <vscale x 8 x i1> %3,
1343 ret <vscale x 8 x bfloat> %a
1346 declare <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.nxv16i32(
1347 <vscale x 16 x bfloat>,
1349 <vscale x 16 x i32>,
1352 define <vscale x 16 x bfloat> @intrinsic_vluxei_v_nxv16bf16_nxv16bf16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1353 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16bf16_nxv16bf16_nxv16i32:
1354 ; CHECK: # %bb.0: # %entry
1355 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1356 ; CHECK-NEXT: vluxei32.v v16, (a0), v8
1357 ; CHECK-NEXT: vmv.v.v v8, v16
1360 %a = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.nxv16i32(
1361 <vscale x 16 x bfloat> undef,
1363 <vscale x 16 x i32> %1,
1366 ret <vscale x 16 x bfloat> %a
1369 declare <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.nxv16i32(
1370 <vscale x 16 x bfloat>,
1372 <vscale x 16 x i32>,
1377 define <vscale x 16 x bfloat> @intrinsic_vluxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1378 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32:
1379 ; CHECK: # %bb.0: # %entry
1380 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1381 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
1384 %a = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.nxv16i32(
1385 <vscale x 16 x bfloat> %0,
1387 <vscale x 16 x i32> %2,
1388 <vscale x 16 x i1> %3,
1391 ret <vscale x 16 x bfloat> %a
1394 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
1395 <vscale x 1 x float>,
1400 define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1401 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32:
1402 ; CHECK: # %bb.0: # %entry
1403 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1404 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
1407 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
1408 <vscale x 1 x float> undef,
1410 <vscale x 1 x i32> %1,
1413 ret <vscale x 1 x float> %a
1416 declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
1417 <vscale x 1 x float>,
1424 define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1425 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
1426 ; CHECK: # %bb.0: # %entry
1427 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1428 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
1431 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
1432 <vscale x 1 x float> %0,
1434 <vscale x 1 x i32> %2,
1435 <vscale x 1 x i1> %3,
1438 ret <vscale x 1 x float> %a
1441 declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
1442 <vscale x 2 x float>,
1447 define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1448 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32:
1449 ; CHECK: # %bb.0: # %entry
1450 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1451 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
1454 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
1455 <vscale x 2 x float> undef,
1457 <vscale x 2 x i32> %1,
1460 ret <vscale x 2 x float> %a
1463 declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
1464 <vscale x 2 x float>,
1471 define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1472 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
1473 ; CHECK: # %bb.0: # %entry
1474 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1475 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
1478 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
1479 <vscale x 2 x float> %0,
1481 <vscale x 2 x i32> %2,
1482 <vscale x 2 x i1> %3,
1485 ret <vscale x 2 x float> %a
1488 declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
1489 <vscale x 4 x float>,
1494 define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1495 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32:
1496 ; CHECK: # %bb.0: # %entry
1497 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1498 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
1501 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
1502 <vscale x 4 x float> undef,
1504 <vscale x 4 x i32> %1,
1507 ret <vscale x 4 x float> %a
1510 declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
1511 <vscale x 4 x float>,
1518 define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1519 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
1520 ; CHECK: # %bb.0: # %entry
1521 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1522 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
1525 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
1526 <vscale x 4 x float> %0,
1528 <vscale x 4 x i32> %2,
1529 <vscale x 4 x i1> %3,
1532 ret <vscale x 4 x float> %a
1535 declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
1536 <vscale x 8 x float>,
1541 define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1542 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32:
1543 ; CHECK: # %bb.0: # %entry
1544 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1545 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
1548 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
1549 <vscale x 8 x float> undef,
1551 <vscale x 8 x i32> %1,
1554 ret <vscale x 8 x float> %a
1557 declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
1558 <vscale x 8 x float>,
1565 define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1566 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
1567 ; CHECK: # %bb.0: # %entry
1568 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1569 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
1572 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
1573 <vscale x 8 x float> %0,
1575 <vscale x 8 x i32> %2,
1576 <vscale x 8 x i1> %3,
1579 ret <vscale x 8 x float> %a
1582 declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
1583 <vscale x 16 x float>,
1585 <vscale x 16 x i32>,
1588 define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1589 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32:
1590 ; CHECK: # %bb.0: # %entry
1591 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1592 ; CHECK-NEXT: vluxei32.v v8, (a0), v8
1595 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
1596 <vscale x 16 x float> undef,
1598 <vscale x 16 x i32> %1,
1601 ret <vscale x 16 x float> %a
1604 declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
1605 <vscale x 16 x float>,
1607 <vscale x 16 x i32>,
1612 define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1613 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
1614 ; CHECK: # %bb.0: # %entry
1615 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1616 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
1619 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
1620 <vscale x 16 x float> %0,
1622 <vscale x 16 x i32> %2,
1623 <vscale x 16 x i1> %3,
1626 ret <vscale x 16 x float> %a
1629 declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
1630 <vscale x 1 x double>,
1635 define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1636 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32:
1637 ; CHECK: # %bb.0: # %entry
1638 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1639 ; CHECK-NEXT: vluxei32.v v9, (a0), v8
1640 ; CHECK-NEXT: vmv.v.v v8, v9
1643 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
1644 <vscale x 1 x double> undef,
1646 <vscale x 1 x i32> %1,
1649 ret <vscale x 1 x double> %a
1652 declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
1653 <vscale x 1 x double>,
1660 define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1661 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
1662 ; CHECK: # %bb.0: # %entry
1663 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1664 ; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
1667 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
1668 <vscale x 1 x double> %0,
1670 <vscale x 1 x i32> %2,
1671 <vscale x 1 x i1> %3,
1674 ret <vscale x 1 x double> %a
1677 declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
1678 <vscale x 2 x double>,
1683 define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1684 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32:
1685 ; CHECK: # %bb.0: # %entry
1686 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1687 ; CHECK-NEXT: vluxei32.v v10, (a0), v8
1688 ; CHECK-NEXT: vmv.v.v v8, v10
1691 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
1692 <vscale x 2 x double> undef,
1694 <vscale x 2 x i32> %1,
1697 ret <vscale x 2 x double> %a
1700 declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
1701 <vscale x 2 x double>,
1708 define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1709 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
1710 ; CHECK: # %bb.0: # %entry
1711 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1712 ; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
1715 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
1716 <vscale x 2 x double> %0,
1718 <vscale x 2 x i32> %2,
1719 <vscale x 2 x i1> %3,
1722 ret <vscale x 2 x double> %a
1725 declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
1726 <vscale x 4 x double>,
1731 define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1732 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32:
1733 ; CHECK: # %bb.0: # %entry
1734 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1735 ; CHECK-NEXT: vluxei32.v v12, (a0), v8
1736 ; CHECK-NEXT: vmv.v.v v8, v12
1739 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
1740 <vscale x 4 x double> undef,
1742 <vscale x 4 x i32> %1,
1745 ret <vscale x 4 x double> %a
1748 declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
1749 <vscale x 4 x double>,
1756 define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1757 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
1758 ; CHECK: # %bb.0: # %entry
1759 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
1760 ; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
1763 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
1764 <vscale x 4 x double> %0,
1766 <vscale x 4 x i32> %2,
1767 <vscale x 4 x i1> %3,
1770 ret <vscale x 4 x double> %a
1773 declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
1774 <vscale x 8 x double>,
1779 define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1780 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32:
1781 ; CHECK: # %bb.0: # %entry
1782 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1783 ; CHECK-NEXT: vluxei32.v v16, (a0), v8
1784 ; CHECK-NEXT: vmv.v.v v8, v16
1787 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
1788 <vscale x 8 x double> undef,
1790 <vscale x 8 x i32> %1,
1793 ret <vscale x 8 x double> %a
1796 declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
1797 <vscale x 8 x double>,
1804 define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1805 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
1806 ; CHECK: # %bb.0: # %entry
1807 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1808 ; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
1811 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
1812 <vscale x 8 x double> %0,
1814 <vscale x 8 x i32> %2,
1815 <vscale x 8 x i1> %3,
1818 ret <vscale x 8 x double> %a
1821 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
1827 define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1828 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16:
1829 ; CHECK: # %bb.0: # %entry
1830 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1831 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
1832 ; CHECK-NEXT: vmv1r.v v8, v9
1835 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
1836 <vscale x 1 x i8> undef,
1838 <vscale x 1 x i16> %1,
1841 ret <vscale x 1 x i8> %a
1844 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
1852 define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1853 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
1854 ; CHECK: # %bb.0: # %entry
1855 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1856 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
1859 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
1860 <vscale x 1 x i8> %0,
1862 <vscale x 1 x i16> %2,
1863 <vscale x 1 x i1> %3,
1866 ret <vscale x 1 x i8> %a
1869 declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
1875 define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1876 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16:
1877 ; CHECK: # %bb.0: # %entry
1878 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1879 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
1880 ; CHECK-NEXT: vmv1r.v v8, v9
1883 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
1884 <vscale x 2 x i8> undef,
1886 <vscale x 2 x i16> %1,
1889 ret <vscale x 2 x i8> %a
1892 declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
1900 define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1901 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
1902 ; CHECK: # %bb.0: # %entry
1903 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1904 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
1907 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
1908 <vscale x 2 x i8> %0,
1910 <vscale x 2 x i16> %2,
1911 <vscale x 2 x i1> %3,
1914 ret <vscale x 2 x i8> %a
1917 declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
1923 define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1924 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16:
1925 ; CHECK: # %bb.0: # %entry
1926 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1927 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
1928 ; CHECK-NEXT: vmv1r.v v8, v9
1931 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
1932 <vscale x 4 x i8> undef,
1934 <vscale x 4 x i16> %1,
1937 ret <vscale x 4 x i8> %a
1940 declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
1948 define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1949 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
1950 ; CHECK: # %bb.0: # %entry
1951 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1952 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
1955 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
1956 <vscale x 4 x i8> %0,
1958 <vscale x 4 x i16> %2,
1959 <vscale x 4 x i1> %3,
1962 ret <vscale x 4 x i8> %a
1965 declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
1971 define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
1972 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16:
1973 ; CHECK: # %bb.0: # %entry
1974 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1975 ; CHECK-NEXT: vluxei16.v v10, (a0), v8
1976 ; CHECK-NEXT: vmv.v.v v8, v10
1979 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
1980 <vscale x 8 x i8> undef,
1982 <vscale x 8 x i16> %1,
1985 ret <vscale x 8 x i8> %a
1988 declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
1996 define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1997 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
1998 ; CHECK: # %bb.0: # %entry
1999 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
2000 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
2003 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
2004 <vscale x 8 x i8> %0,
2006 <vscale x 8 x i16> %2,
2007 <vscale x 8 x i1> %3,
2010 ret <vscale x 8 x i8> %a
2013 declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
2016 <vscale x 16 x i16>,
2019 define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2020 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16:
2021 ; CHECK: # %bb.0: # %entry
2022 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
2023 ; CHECK-NEXT: vluxei16.v v12, (a0), v8
2024 ; CHECK-NEXT: vmv.v.v v8, v12
2027 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
2028 <vscale x 16 x i8> undef,
2030 <vscale x 16 x i16> %1,
2033 ret <vscale x 16 x i8> %a
2036 declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
2039 <vscale x 16 x i16>,
2044 define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2045 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
2046 ; CHECK: # %bb.0: # %entry
2047 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
2048 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
2051 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
2052 <vscale x 16 x i8> %0,
2054 <vscale x 16 x i16> %2,
2055 <vscale x 16 x i1> %3,
2058 ret <vscale x 16 x i8> %a
2061 declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
2064 <vscale x 32 x i16>,
2067 define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2068 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16:
2069 ; CHECK: # %bb.0: # %entry
2070 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
2071 ; CHECK-NEXT: vluxei16.v v16, (a0), v8
2072 ; CHECK-NEXT: vmv.v.v v8, v16
2075 %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
2076 <vscale x 32 x i8> undef,
2078 <vscale x 32 x i16> %1,
2081 ret <vscale x 32 x i8> %a
2084 declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
2087 <vscale x 32 x i16>,
2092 define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2093 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
2094 ; CHECK: # %bb.0: # %entry
2095 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
2096 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
2099 %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
2100 <vscale x 32 x i8> %0,
2102 <vscale x 32 x i16> %2,
2103 <vscale x 32 x i1> %3,
2106 ret <vscale x 32 x i8> %a
2109 declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
2115 define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2116 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16:
2117 ; CHECK: # %bb.0: # %entry
2118 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2119 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2122 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
2123 <vscale x 1 x i16> undef,
2125 <vscale x 1 x i16> %1,
2128 ret <vscale x 1 x i16> %a
2131 declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
2139 define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2140 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
2141 ; CHECK: # %bb.0: # %entry
2142 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2143 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2146 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
2147 <vscale x 1 x i16> %0,
2149 <vscale x 1 x i16> %2,
2150 <vscale x 1 x i1> %3,
2153 ret <vscale x 1 x i16> %a
2156 declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
2162 define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2163 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16:
2164 ; CHECK: # %bb.0: # %entry
2165 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2166 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2169 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
2170 <vscale x 2 x i16> undef,
2172 <vscale x 2 x i16> %1,
2175 ret <vscale x 2 x i16> %a
2178 declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
2186 define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2187 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
2188 ; CHECK: # %bb.0: # %entry
2189 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2190 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2193 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
2194 <vscale x 2 x i16> %0,
2196 <vscale x 2 x i16> %2,
2197 <vscale x 2 x i1> %3,
2200 ret <vscale x 2 x i16> %a
2203 declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
2209 define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2210 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16:
2211 ; CHECK: # %bb.0: # %entry
2212 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2213 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2216 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
2217 <vscale x 4 x i16> undef,
2219 <vscale x 4 x i16> %1,
2222 ret <vscale x 4 x i16> %a
2225 declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
2233 define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2234 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
2235 ; CHECK: # %bb.0: # %entry
2236 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2237 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2240 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
2241 <vscale x 4 x i16> %0,
2243 <vscale x 4 x i16> %2,
2244 <vscale x 4 x i1> %3,
2247 ret <vscale x 4 x i16> %a
2250 declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
2256 define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2257 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16:
2258 ; CHECK: # %bb.0: # %entry
2259 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2260 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2263 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
2264 <vscale x 8 x i16> undef,
2266 <vscale x 8 x i16> %1,
2269 ret <vscale x 8 x i16> %a
2272 declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
2280 define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2281 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
2282 ; CHECK: # %bb.0: # %entry
2283 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2284 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
2287 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
2288 <vscale x 8 x i16> %0,
2290 <vscale x 8 x i16> %2,
2291 <vscale x 8 x i1> %3,
2294 ret <vscale x 8 x i16> %a
2297 declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
2298 <vscale x 16 x i16>,
2300 <vscale x 16 x i16>,
2303 define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2304 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16:
2305 ; CHECK: # %bb.0: # %entry
2306 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2307 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2310 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
2311 <vscale x 16 x i16> undef,
2313 <vscale x 16 x i16> %1,
2316 ret <vscale x 16 x i16> %a
2319 declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
2320 <vscale x 16 x i16>,
2322 <vscale x 16 x i16>,
2327 define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2328 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
2329 ; CHECK: # %bb.0: # %entry
2330 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
2331 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
2334 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
2335 <vscale x 16 x i16> %0,
2337 <vscale x 16 x i16> %2,
2338 <vscale x 16 x i1> %3,
2341 ret <vscale x 16 x i16> %a
2344 declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
2345 <vscale x 32 x i16>,
2347 <vscale x 32 x i16>,
2350 define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2351 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16:
2352 ; CHECK: # %bb.0: # %entry
2353 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2354 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2357 %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
2358 <vscale x 32 x i16> undef,
2360 <vscale x 32 x i16> %1,
2363 ret <vscale x 32 x i16> %a
2366 declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
2367 <vscale x 32 x i16>,
2369 <vscale x 32 x i16>,
2374 define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2375 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
2376 ; CHECK: # %bb.0: # %entry
2377 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
2378 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
2381 %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
2382 <vscale x 32 x i16> %0,
2384 <vscale x 32 x i16> %2,
2385 <vscale x 32 x i1> %3,
2388 ret <vscale x 32 x i16> %a
2391 declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
2397 define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2398 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16:
2399 ; CHECK: # %bb.0: # %entry
2400 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2401 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
2402 ; CHECK-NEXT: vmv1r.v v8, v9
2405 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
2406 <vscale x 1 x i32> undef,
2408 <vscale x 1 x i16> %1,
2411 ret <vscale x 1 x i32> %a
2414 declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
2422 define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2423 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
2424 ; CHECK: # %bb.0: # %entry
2425 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2426 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2429 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
2430 <vscale x 1 x i32> %0,
2432 <vscale x 1 x i16> %2,
2433 <vscale x 1 x i1> %3,
2436 ret <vscale x 1 x i32> %a
2439 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
2445 define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2446 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16:
2447 ; CHECK: # %bb.0: # %entry
2448 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2449 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
2450 ; CHECK-NEXT: vmv.v.v v8, v9
2453 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
2454 <vscale x 2 x i32> undef,
2456 <vscale x 2 x i16> %1,
2459 ret <vscale x 2 x i32> %a
2462 declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
2470 define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2471 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
2472 ; CHECK: # %bb.0: # %entry
2473 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
2474 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2477 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
2478 <vscale x 2 x i32> %0,
2480 <vscale x 2 x i16> %2,
2481 <vscale x 2 x i1> %3,
2484 ret <vscale x 2 x i32> %a
2487 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
2493 define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2494 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16:
2495 ; CHECK: # %bb.0: # %entry
2496 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2497 ; CHECK-NEXT: vluxei16.v v10, (a0), v8
2498 ; CHECK-NEXT: vmv.v.v v8, v10
2501 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
2502 <vscale x 4 x i32> undef,
2504 <vscale x 4 x i16> %1,
2507 ret <vscale x 4 x i32> %a
2510 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
2518 define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2519 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
2520 ; CHECK: # %bb.0: # %entry
2521 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
2522 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
2525 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
2526 <vscale x 4 x i32> %0,
2528 <vscale x 4 x i16> %2,
2529 <vscale x 4 x i1> %3,
2532 ret <vscale x 4 x i32> %a
2535 declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
2541 define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2542 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16:
2543 ; CHECK: # %bb.0: # %entry
2544 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2545 ; CHECK-NEXT: vluxei16.v v12, (a0), v8
2546 ; CHECK-NEXT: vmv.v.v v8, v12
2549 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
2550 <vscale x 8 x i32> undef,
2552 <vscale x 8 x i16> %1,
2555 ret <vscale x 8 x i32> %a
2558 declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
2566 define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2567 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
2568 ; CHECK: # %bb.0: # %entry
2569 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
2570 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
2573 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
2574 <vscale x 8 x i32> %0,
2576 <vscale x 8 x i16> %2,
2577 <vscale x 8 x i1> %3,
2580 ret <vscale x 8 x i32> %a
2583 declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
2584 <vscale x 16 x i32>,
2586 <vscale x 16 x i16>,
2589 define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2590 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16:
2591 ; CHECK: # %bb.0: # %entry
2592 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2593 ; CHECK-NEXT: vluxei16.v v16, (a0), v8
2594 ; CHECK-NEXT: vmv.v.v v8, v16
2597 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
2598 <vscale x 16 x i32> undef,
2600 <vscale x 16 x i16> %1,
2603 ret <vscale x 16 x i32> %a
2606 declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
2607 <vscale x 16 x i32>,
2609 <vscale x 16 x i16>,
2614 define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2615 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
2616 ; CHECK: # %bb.0: # %entry
2617 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
2618 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
2621 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
2622 <vscale x 16 x i32> %0,
2624 <vscale x 16 x i16> %2,
2625 <vscale x 16 x i1> %3,
2628 ret <vscale x 16 x i32> %a
2631 declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
2637 define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2638 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16:
2639 ; CHECK: # %bb.0: # %entry
2640 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2641 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
2642 ; CHECK-NEXT: vmv.v.v v8, v9
2645 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
2646 <vscale x 1 x i64> undef,
2648 <vscale x 1 x i16> %1,
2651 ret <vscale x 1 x i64> %a
2654 declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
2662 define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2663 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
2664 ; CHECK: # %bb.0: # %entry
2665 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
2666 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2669 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
2670 <vscale x 1 x i64> %0,
2672 <vscale x 1 x i16> %2,
2673 <vscale x 1 x i1> %3,
2676 ret <vscale x 1 x i64> %a
2679 declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
2685 define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2686 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16:
2687 ; CHECK: # %bb.0: # %entry
2688 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2689 ; CHECK-NEXT: vluxei16.v v10, (a0), v8
2690 ; CHECK-NEXT: vmv.v.v v8, v10
2693 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
2694 <vscale x 2 x i64> undef,
2696 <vscale x 2 x i16> %1,
2699 ret <vscale x 2 x i64> %a
2702 declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
2710 define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2711 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
2712 ; CHECK: # %bb.0: # %entry
2713 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
2714 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
2717 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
2718 <vscale x 2 x i64> %0,
2720 <vscale x 2 x i16> %2,
2721 <vscale x 2 x i1> %3,
2724 ret <vscale x 2 x i64> %a
2727 declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
2733 define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2734 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16:
2735 ; CHECK: # %bb.0: # %entry
2736 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2737 ; CHECK-NEXT: vluxei16.v v12, (a0), v8
2738 ; CHECK-NEXT: vmv.v.v v8, v12
2741 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
2742 <vscale x 4 x i64> undef,
2744 <vscale x 4 x i16> %1,
2747 ret <vscale x 4 x i64> %a
2750 declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
2758 define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2759 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
2760 ; CHECK: # %bb.0: # %entry
2761 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2762 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
2765 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
2766 <vscale x 4 x i64> %0,
2768 <vscale x 4 x i16> %2,
2769 <vscale x 4 x i1> %3,
2772 ret <vscale x 4 x i64> %a
2775 declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
2781 define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2782 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16:
2783 ; CHECK: # %bb.0: # %entry
2784 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2785 ; CHECK-NEXT: vluxei16.v v16, (a0), v8
2786 ; CHECK-NEXT: vmv.v.v v8, v16
2789 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
2790 <vscale x 8 x i64> undef,
2792 <vscale x 8 x i16> %1,
2795 ret <vscale x 8 x i64> %a
2798 declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
2806 define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2807 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
2808 ; CHECK: # %bb.0: # %entry
2809 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2810 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
2813 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
2814 <vscale x 8 x i64> %0,
2816 <vscale x 8 x i16> %2,
2817 <vscale x 8 x i1> %3,
2820 ret <vscale x 8 x i64> %a
2823 declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
2824 <vscale x 1 x half>,
2829 define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2830 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16:
2831 ; CHECK: # %bb.0: # %entry
2832 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2833 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2836 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
2837 <vscale x 1 x half> undef,
2839 <vscale x 1 x i16> %1,
2842 ret <vscale x 1 x half> %a
2845 declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
2846 <vscale x 1 x half>,
2853 define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2854 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
2855 ; CHECK: # %bb.0: # %entry
2856 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2857 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2860 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
2861 <vscale x 1 x half> %0,
2863 <vscale x 1 x i16> %2,
2864 <vscale x 1 x i1> %3,
2867 ret <vscale x 1 x half> %a
2870 declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
2871 <vscale x 2 x half>,
2876 define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2877 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16:
2878 ; CHECK: # %bb.0: # %entry
2879 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2880 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2883 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
2884 <vscale x 2 x half> undef,
2886 <vscale x 2 x i16> %1,
2889 ret <vscale x 2 x half> %a
2892 declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
2893 <vscale x 2 x half>,
2900 define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2901 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
2902 ; CHECK: # %bb.0: # %entry
2903 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2904 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2907 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
2908 <vscale x 2 x half> %0,
2910 <vscale x 2 x i16> %2,
2911 <vscale x 2 x i1> %3,
2914 ret <vscale x 2 x half> %a
2917 declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
2918 <vscale x 4 x half>,
2923 define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2924 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16:
2925 ; CHECK: # %bb.0: # %entry
2926 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2927 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2930 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
2931 <vscale x 4 x half> undef,
2933 <vscale x 4 x i16> %1,
2936 ret <vscale x 4 x half> %a
2939 declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
2940 <vscale x 4 x half>,
2947 define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2948 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
2949 ; CHECK: # %bb.0: # %entry
2950 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2951 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
2954 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
2955 <vscale x 4 x half> %0,
2957 <vscale x 4 x i16> %2,
2958 <vscale x 4 x i1> %3,
2961 ret <vscale x 4 x half> %a
2964 declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
2965 <vscale x 8 x half>,
2970 define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2971 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16:
2972 ; CHECK: # %bb.0: # %entry
2973 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2974 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
2977 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
2978 <vscale x 8 x half> undef,
2980 <vscale x 8 x i16> %1,
2983 ret <vscale x 8 x half> %a
2986 declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
2987 <vscale x 8 x half>,
2994 define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2995 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
2996 ; CHECK: # %bb.0: # %entry
2997 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2998 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
3001 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
3002 <vscale x 8 x half> %0,
3004 <vscale x 8 x i16> %2,
3005 <vscale x 8 x i1> %3,
3008 ret <vscale x 8 x half> %a
3011 declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
3012 <vscale x 16 x half>,
3014 <vscale x 16 x i16>,
3017 define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
3018 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16:
3019 ; CHECK: # %bb.0: # %entry
3020 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3021 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
3024 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
3025 <vscale x 16 x half> undef,
3027 <vscale x 16 x i16> %1,
3030 ret <vscale x 16 x half> %a
3033 declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
3034 <vscale x 16 x half>,
3036 <vscale x 16 x i16>,
3041 define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3042 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
3043 ; CHECK: # %bb.0: # %entry
3044 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
3045 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
3048 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
3049 <vscale x 16 x half> %0,
3051 <vscale x 16 x i16> %2,
3052 <vscale x 16 x i1> %3,
3055 ret <vscale x 16 x half> %a
3058 declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
3059 <vscale x 32 x half>,
3061 <vscale x 32 x i16>,
3064 define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
3065 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16:
3066 ; CHECK: # %bb.0: # %entry
3067 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3068 ; CHECK-NEXT: vluxei16.v v8, (a0), v8
3071 %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
3072 <vscale x 32 x half> undef,
3074 <vscale x 32 x i16> %1,
3077 ret <vscale x 32 x half> %a
3080 declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
3081 <vscale x 32 x half>,
3083 <vscale x 32 x i16>,
3088 define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3089 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
3090 ; CHECK: # %bb.0: # %entry
3091 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
3092 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
3095 %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
3096 <vscale x 32 x half> %0,
3098 <vscale x 32 x i16> %2,
3099 <vscale x 32 x i1> %3,
3102 ret <vscale x 32 x half> %a
3105 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
3106 <vscale x 1 x float>,
3111 define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
3112 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16:
3113 ; CHECK: # %bb.0: # %entry
3114 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3115 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
3116 ; CHECK-NEXT: vmv1r.v v8, v9
3119 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
3120 <vscale x 1 x float> undef,
3122 <vscale x 1 x i16> %1,
3125 ret <vscale x 1 x float> %a
3128 declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
3129 <vscale x 1 x float>,
3136 define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3137 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
3138 ; CHECK: # %bb.0: # %entry
3139 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3140 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
3143 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
3144 <vscale x 1 x float> %0,
3146 <vscale x 1 x i16> %2,
3147 <vscale x 1 x i1> %3,
3150 ret <vscale x 1 x float> %a
3153 declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
3154 <vscale x 2 x float>,
3159 define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
3160 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16:
3161 ; CHECK: # %bb.0: # %entry
3162 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3163 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
3164 ; CHECK-NEXT: vmv.v.v v8, v9
3167 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
3168 <vscale x 2 x float> undef,
3170 <vscale x 2 x i16> %1,
3173 ret <vscale x 2 x float> %a
3176 declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
3177 <vscale x 2 x float>,
3184 define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3185 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
3186 ; CHECK: # %bb.0: # %entry
3187 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
3188 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
3191 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
3192 <vscale x 2 x float> %0,
3194 <vscale x 2 x i16> %2,
3195 <vscale x 2 x i1> %3,
3198 ret <vscale x 2 x float> %a
3201 declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
3202 <vscale x 4 x float>,
3207 define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
3208 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16:
3209 ; CHECK: # %bb.0: # %entry
3210 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3211 ; CHECK-NEXT: vluxei16.v v10, (a0), v8
3212 ; CHECK-NEXT: vmv.v.v v8, v10
3215 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
3216 <vscale x 4 x float> undef,
3218 <vscale x 4 x i16> %1,
3221 ret <vscale x 4 x float> %a
3224 declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
3225 <vscale x 4 x float>,
3232 define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3233 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
3234 ; CHECK: # %bb.0: # %entry
3235 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
3236 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
3239 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
3240 <vscale x 4 x float> %0,
3242 <vscale x 4 x i16> %2,
3243 <vscale x 4 x i1> %3,
3246 ret <vscale x 4 x float> %a
3249 declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
3250 <vscale x 8 x float>,
3255 define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
3256 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16:
3257 ; CHECK: # %bb.0: # %entry
3258 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3259 ; CHECK-NEXT: vluxei16.v v12, (a0), v8
3260 ; CHECK-NEXT: vmv.v.v v8, v12
3263 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
3264 <vscale x 8 x float> undef,
3266 <vscale x 8 x i16> %1,
3269 ret <vscale x 8 x float> %a
3272 declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
3273 <vscale x 8 x float>,
3280 define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3281 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
3282 ; CHECK: # %bb.0: # %entry
3283 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
3284 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
3287 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
3288 <vscale x 8 x float> %0,
3290 <vscale x 8 x i16> %2,
3291 <vscale x 8 x i1> %3,
3294 ret <vscale x 8 x float> %a
3297 declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
3298 <vscale x 16 x float>,
3300 <vscale x 16 x i16>,
3303 define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
3304 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16:
3305 ; CHECK: # %bb.0: # %entry
3306 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3307 ; CHECK-NEXT: vluxei16.v v16, (a0), v8
3308 ; CHECK-NEXT: vmv.v.v v8, v16
3311 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
3312 <vscale x 16 x float> undef,
3314 <vscale x 16 x i16> %1,
3317 ret <vscale x 16 x float> %a
3320 declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
3321 <vscale x 16 x float>,
3323 <vscale x 16 x i16>,
3328 define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3329 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
3330 ; CHECK: # %bb.0: # %entry
3331 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
3332 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
3335 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
3336 <vscale x 16 x float> %0,
3338 <vscale x 16 x i16> %2,
3339 <vscale x 16 x i1> %3,
3342 ret <vscale x 16 x float> %a
3345 declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
3346 <vscale x 1 x double>,
3351 define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
3352 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16:
3353 ; CHECK: # %bb.0: # %entry
3354 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3355 ; CHECK-NEXT: vluxei16.v v9, (a0), v8
3356 ; CHECK-NEXT: vmv.v.v v8, v9
3359 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
3360 <vscale x 1 x double> undef,
3362 <vscale x 1 x i16> %1,
3365 ret <vscale x 1 x double> %a
3368 declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
3369 <vscale x 1 x double>,
3376 define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3377 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
3378 ; CHECK: # %bb.0: # %entry
3379 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3380 ; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
3383 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
3384 <vscale x 1 x double> %0,
3386 <vscale x 1 x i16> %2,
3387 <vscale x 1 x i1> %3,
3390 ret <vscale x 1 x double> %a
3393 declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
3394 <vscale x 2 x double>,
3399 define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
3400 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16:
3401 ; CHECK: # %bb.0: # %entry
3402 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3403 ; CHECK-NEXT: vluxei16.v v10, (a0), v8
3404 ; CHECK-NEXT: vmv.v.v v8, v10
3407 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
3408 <vscale x 2 x double> undef,
3410 <vscale x 2 x i16> %1,
3413 ret <vscale x 2 x double> %a
3416 declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
3417 <vscale x 2 x double>,
3424 define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3425 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
3426 ; CHECK: # %bb.0: # %entry
3427 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
3428 ; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
3431 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
3432 <vscale x 2 x double> %0,
3434 <vscale x 2 x i16> %2,
3435 <vscale x 2 x i1> %3,
3438 ret <vscale x 2 x double> %a
3441 declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
3442 <vscale x 4 x double>,
3447 define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
3448 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16:
3449 ; CHECK: # %bb.0: # %entry
3450 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
3451 ; CHECK-NEXT: vluxei16.v v12, (a0), v8
3452 ; CHECK-NEXT: vmv.v.v v8, v12
3455 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
3456 <vscale x 4 x double> undef,
3458 <vscale x 4 x i16> %1,
3461 ret <vscale x 4 x double> %a
3464 declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
3465 <vscale x 4 x double>,
3472 define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3473 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
3474 ; CHECK: # %bb.0: # %entry
3475 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
3476 ; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
3479 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
3480 <vscale x 4 x double> %0,
3482 <vscale x 4 x i16> %2,
3483 <vscale x 4 x i1> %3,
3486 ret <vscale x 4 x double> %a
3489 declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
3490 <vscale x 8 x double>,
3495 define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
3496 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16:
3497 ; CHECK: # %bb.0: # %entry
3498 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
3499 ; CHECK-NEXT: vluxei16.v v16, (a0), v8
3500 ; CHECK-NEXT: vmv.v.v v8, v16
3503 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
3504 <vscale x 8 x double> undef,
3506 <vscale x 8 x i16> %1,
3509 ret <vscale x 8 x double> %a
3512 declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
3513 <vscale x 8 x double>,
3520 define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3521 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
3522 ; CHECK: # %bb.0: # %entry
3523 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
3524 ; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
3527 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
3528 <vscale x 8 x double> %0,
3530 <vscale x 8 x i16> %2,
3531 <vscale x 8 x i1> %3,
3534 ret <vscale x 8 x double> %a
3537 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
3543 define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
3544 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8:
3545 ; CHECK: # %bb.0: # %entry
3546 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
3547 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3550 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
3551 <vscale x 1 x i8> undef,
3553 <vscale x 1 x i8> %1,
3556 ret <vscale x 1 x i8> %a
3559 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
3567 define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3568 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
3569 ; CHECK: # %bb.0: # %entry
3570 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
3571 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3574 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
3575 <vscale x 1 x i8> %0,
3577 <vscale x 1 x i8> %2,
3578 <vscale x 1 x i1> %3,
3581 ret <vscale x 1 x i8> %a
3584 declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
3590 define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
3591 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8:
3592 ; CHECK: # %bb.0: # %entry
3593 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
3594 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3597 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
3598 <vscale x 2 x i8> undef,
3600 <vscale x 2 x i8> %1,
3603 ret <vscale x 2 x i8> %a
3606 declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
3614 define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3615 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
3616 ; CHECK: # %bb.0: # %entry
3617 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
3618 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3621 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
3622 <vscale x 2 x i8> %0,
3624 <vscale x 2 x i8> %2,
3625 <vscale x 2 x i1> %3,
3628 ret <vscale x 2 x i8> %a
3631 declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
3637 define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
3638 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8:
3639 ; CHECK: # %bb.0: # %entry
3640 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3641 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3644 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
3645 <vscale x 4 x i8> undef,
3647 <vscale x 4 x i8> %1,
3650 ret <vscale x 4 x i8> %a
3653 declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
3661 define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3662 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
3663 ; CHECK: # %bb.0: # %entry
3664 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3665 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3668 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
3669 <vscale x 4 x i8> %0,
3671 <vscale x 4 x i8> %2,
3672 <vscale x 4 x i1> %3,
3675 ret <vscale x 4 x i8> %a
3678 declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
3684 define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
3685 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8:
3686 ; CHECK: # %bb.0: # %entry
3687 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3688 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3691 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
3692 <vscale x 8 x i8> undef,
3694 <vscale x 8 x i8> %1,
3697 ret <vscale x 8 x i8> %a
3700 declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
3708 define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3709 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
3710 ; CHECK: # %bb.0: # %entry
3711 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3712 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3715 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
3716 <vscale x 8 x i8> %0,
3718 <vscale x 8 x i8> %2,
3719 <vscale x 8 x i1> %3,
3722 ret <vscale x 8 x i8> %a
3725 declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
3731 define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
3732 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8:
3733 ; CHECK: # %bb.0: # %entry
3734 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
3735 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3738 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
3739 <vscale x 16 x i8> undef,
3741 <vscale x 16 x i8> %1,
3744 ret <vscale x 16 x i8> %a
3747 declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
3755 define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3756 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
3757 ; CHECK: # %bb.0: # %entry
3758 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
3759 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
3762 %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
3763 <vscale x 16 x i8> %0,
3765 <vscale x 16 x i8> %2,
3766 <vscale x 16 x i1> %3,
3769 ret <vscale x 16 x i8> %a
3772 declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
3778 define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
3779 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8:
3780 ; CHECK: # %bb.0: # %entry
3781 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
3782 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3785 %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
3786 <vscale x 32 x i8> undef,
3788 <vscale x 32 x i8> %1,
3791 ret <vscale x 32 x i8> %a
3794 declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
3802 define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3803 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
3804 ; CHECK: # %bb.0: # %entry
3805 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
3806 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
3809 %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
3810 <vscale x 32 x i8> %0,
3812 <vscale x 32 x i8> %2,
3813 <vscale x 32 x i1> %3,
3816 ret <vscale x 32 x i8> %a
3819 declare <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
3825 define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
3826 ; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8:
3827 ; CHECK: # %bb.0: # %entry
3828 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
3829 ; CHECK-NEXT: vluxei8.v v8, (a0), v8
3832 %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
3833 <vscale x 64 x i8> undef,
3835 <vscale x 64 x i8> %1,
3838 ret <vscale x 64 x i8> %a
3841 declare <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
3849 define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
3850 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
3851 ; CHECK: # %bb.0: # %entry
3852 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
3853 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
3856 %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
3857 <vscale x 64 x i8> %0,
3859 <vscale x 64 x i8> %2,
3860 <vscale x 64 x i1> %3,
3863 ret <vscale x 64 x i8> %a
3866 declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
3872 define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
3873 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8:
3874 ; CHECK: # %bb.0: # %entry
3875 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3876 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
3877 ; CHECK-NEXT: vmv1r.v v8, v9
3880 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
3881 <vscale x 1 x i16> undef,
3883 <vscale x 1 x i8> %1,
3886 ret <vscale x 1 x i16> %a
3889 declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
3897 define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3898 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
3899 ; CHECK: # %bb.0: # %entry
3900 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3901 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3904 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
3905 <vscale x 1 x i16> %0,
3907 <vscale x 1 x i8> %2,
3908 <vscale x 1 x i1> %3,
3911 ret <vscale x 1 x i16> %a
3914 declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
3920 define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
3921 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8:
3922 ; CHECK: # %bb.0: # %entry
3923 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3924 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
3925 ; CHECK-NEXT: vmv1r.v v8, v9
3928 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
3929 <vscale x 2 x i16> undef,
3931 <vscale x 2 x i8> %1,
3934 ret <vscale x 2 x i16> %a
3937 declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
3945 define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3946 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
3947 ; CHECK: # %bb.0: # %entry
3948 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
3949 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
3952 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
3953 <vscale x 2 x i16> %0,
3955 <vscale x 2 x i8> %2,
3956 <vscale x 2 x i1> %3,
3959 ret <vscale x 2 x i16> %a
3962 declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
3968 define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
3969 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8:
3970 ; CHECK: # %bb.0: # %entry
3971 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3972 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
3973 ; CHECK-NEXT: vmv.v.v v8, v9
3976 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
3977 <vscale x 4 x i16> undef,
3979 <vscale x 4 x i8> %1,
3982 ret <vscale x 4 x i16> %a
3985 declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
3993 define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3994 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
3995 ; CHECK: # %bb.0: # %entry
3996 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
3997 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4000 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
4001 <vscale x 4 x i16> %0,
4003 <vscale x 4 x i8> %2,
4004 <vscale x 4 x i1> %3,
4007 ret <vscale x 4 x i16> %a
4010 declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
4016 define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4017 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8:
4018 ; CHECK: # %bb.0: # %entry
4019 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
4020 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
4021 ; CHECK-NEXT: vmv.v.v v8, v10
4024 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
4025 <vscale x 8 x i16> undef,
4027 <vscale x 8 x i8> %1,
4030 ret <vscale x 8 x i16> %a
4033 declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
4041 define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4042 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
4043 ; CHECK: # %bb.0: # %entry
4044 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
4045 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
4048 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
4049 <vscale x 8 x i16> %0,
4051 <vscale x 8 x i8> %2,
4052 <vscale x 8 x i1> %3,
4055 ret <vscale x 8 x i16> %a
4058 declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
4059 <vscale x 16 x i16>,
4064 define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
4065 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8:
4066 ; CHECK: # %bb.0: # %entry
4067 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
4068 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
4069 ; CHECK-NEXT: vmv.v.v v8, v12
4072 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
4073 <vscale x 16 x i16> undef,
4075 <vscale x 16 x i8> %1,
4078 ret <vscale x 16 x i16> %a
4081 declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
4082 <vscale x 16 x i16>,
4089 define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4090 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
4091 ; CHECK: # %bb.0: # %entry
4092 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
4093 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
4096 %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
4097 <vscale x 16 x i16> %0,
4099 <vscale x 16 x i8> %2,
4100 <vscale x 16 x i1> %3,
4103 ret <vscale x 16 x i16> %a
4106 declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
4107 <vscale x 32 x i16>,
4112 define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
4113 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8:
4114 ; CHECK: # %bb.0: # %entry
4115 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
4116 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
4117 ; CHECK-NEXT: vmv.v.v v8, v16
4120 %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
4121 <vscale x 32 x i16> undef,
4123 <vscale x 32 x i8> %1,
4126 ret <vscale x 32 x i16> %a
4129 declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
4130 <vscale x 32 x i16>,
4137 define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
4138 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
4139 ; CHECK: # %bb.0: # %entry
4140 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
4141 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
4144 %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
4145 <vscale x 32 x i16> %0,
4147 <vscale x 32 x i8> %2,
4148 <vscale x 32 x i1> %3,
4151 ret <vscale x 32 x i16> %a
4154 declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
4160 define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4161 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8:
4162 ; CHECK: # %bb.0: # %entry
4163 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4164 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4165 ; CHECK-NEXT: vmv1r.v v8, v9
4168 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
4169 <vscale x 1 x i32> undef,
4171 <vscale x 1 x i8> %1,
4174 ret <vscale x 1 x i32> %a
4177 declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
4185 define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4186 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
4187 ; CHECK: # %bb.0: # %entry
4188 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
4189 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4192 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
4193 <vscale x 1 x i32> %0,
4195 <vscale x 1 x i8> %2,
4196 <vscale x 1 x i1> %3,
4199 ret <vscale x 1 x i32> %a
4202 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
4208 define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4209 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8:
4210 ; CHECK: # %bb.0: # %entry
4211 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4212 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4213 ; CHECK-NEXT: vmv.v.v v8, v9
4216 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
4217 <vscale x 2 x i32> undef,
4219 <vscale x 2 x i8> %1,
4222 ret <vscale x 2 x i32> %a
4225 declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
4233 define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4234 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
4235 ; CHECK: # %bb.0: # %entry
4236 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
4237 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4240 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
4241 <vscale x 2 x i32> %0,
4243 <vscale x 2 x i8> %2,
4244 <vscale x 2 x i1> %3,
4247 ret <vscale x 2 x i32> %a
4250 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
4256 define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4257 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8:
4258 ; CHECK: # %bb.0: # %entry
4259 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4260 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
4261 ; CHECK-NEXT: vmv.v.v v8, v10
4264 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
4265 <vscale x 4 x i32> undef,
4267 <vscale x 4 x i8> %1,
4270 ret <vscale x 4 x i32> %a
4273 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
4281 define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4282 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
4283 ; CHECK: # %bb.0: # %entry
4284 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
4285 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
4288 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
4289 <vscale x 4 x i32> %0,
4291 <vscale x 4 x i8> %2,
4292 <vscale x 4 x i1> %3,
4295 ret <vscale x 4 x i32> %a
4298 declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
4304 define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4305 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8:
4306 ; CHECK: # %bb.0: # %entry
4307 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4308 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
4309 ; CHECK-NEXT: vmv.v.v v8, v12
4312 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
4313 <vscale x 8 x i32> undef,
4315 <vscale x 8 x i8> %1,
4318 ret <vscale x 8 x i32> %a
4321 declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
4329 define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4330 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
4331 ; CHECK: # %bb.0: # %entry
4332 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
4333 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
4336 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
4337 <vscale x 8 x i32> %0,
4339 <vscale x 8 x i8> %2,
4340 <vscale x 8 x i1> %3,
4343 ret <vscale x 8 x i32> %a
4346 declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
4347 <vscale x 16 x i32>,
4352 define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
4353 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8:
4354 ; CHECK: # %bb.0: # %entry
4355 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
4356 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
4357 ; CHECK-NEXT: vmv.v.v v8, v16
4360 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
4361 <vscale x 16 x i32> undef,
4363 <vscale x 16 x i8> %1,
4366 ret <vscale x 16 x i32> %a
4369 declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
4370 <vscale x 16 x i32>,
4377 define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4378 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
4379 ; CHECK: # %bb.0: # %entry
4380 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
4381 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
4384 %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
4385 <vscale x 16 x i32> %0,
4387 <vscale x 16 x i8> %2,
4388 <vscale x 16 x i1> %3,
4391 ret <vscale x 16 x i32> %a
4394 declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
4400 define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4401 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8:
4402 ; CHECK: # %bb.0: # %entry
4403 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
4404 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4405 ; CHECK-NEXT: vmv.v.v v8, v9
4408 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
4409 <vscale x 1 x i64> undef,
4411 <vscale x 1 x i8> %1,
4414 ret <vscale x 1 x i64> %a
4417 declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
4425 define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4426 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
4427 ; CHECK: # %bb.0: # %entry
4428 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
4429 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4432 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
4433 <vscale x 1 x i64> %0,
4435 <vscale x 1 x i8> %2,
4436 <vscale x 1 x i1> %3,
4439 ret <vscale x 1 x i64> %a
4442 declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
4448 define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4449 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8:
4450 ; CHECK: # %bb.0: # %entry
4451 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
4452 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
4453 ; CHECK-NEXT: vmv.v.v v8, v10
4456 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
4457 <vscale x 2 x i64> undef,
4459 <vscale x 2 x i8> %1,
4462 ret <vscale x 2 x i64> %a
4465 declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
4473 define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4474 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
4475 ; CHECK: # %bb.0: # %entry
4476 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
4477 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
4480 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
4481 <vscale x 2 x i64> %0,
4483 <vscale x 2 x i8> %2,
4484 <vscale x 2 x i1> %3,
4487 ret <vscale x 2 x i64> %a
4490 declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
4496 define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4497 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8:
4498 ; CHECK: # %bb.0: # %entry
4499 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
4500 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
4501 ; CHECK-NEXT: vmv.v.v v8, v12
4504 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
4505 <vscale x 4 x i64> undef,
4507 <vscale x 4 x i8> %1,
4510 ret <vscale x 4 x i64> %a
4513 declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
4521 define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4522 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
4523 ; CHECK: # %bb.0: # %entry
4524 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
4525 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
4528 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
4529 <vscale x 4 x i64> %0,
4531 <vscale x 4 x i8> %2,
4532 <vscale x 4 x i1> %3,
4535 ret <vscale x 4 x i64> %a
4538 declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
4544 define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4545 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8:
4546 ; CHECK: # %bb.0: # %entry
4547 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
4548 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
4549 ; CHECK-NEXT: vmv.v.v v8, v16
4552 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
4553 <vscale x 8 x i64> undef,
4555 <vscale x 8 x i8> %1,
4558 ret <vscale x 8 x i64> %a
4561 declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
4569 define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4570 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
4571 ; CHECK: # %bb.0: # %entry
4572 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
4573 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
4576 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
4577 <vscale x 8 x i64> %0,
4579 <vscale x 8 x i8> %2,
4580 <vscale x 8 x i1> %3,
4583 ret <vscale x 8 x i64> %a
4586 declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
4587 <vscale x 1 x half>,
4592 define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4593 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8:
4594 ; CHECK: # %bb.0: # %entry
4595 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4596 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4597 ; CHECK-NEXT: vmv1r.v v8, v9
4600 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
4601 <vscale x 1 x half> undef,
4603 <vscale x 1 x i8> %1,
4606 ret <vscale x 1 x half> %a
4609 declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
4610 <vscale x 1 x half>,
4617 define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4618 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
4619 ; CHECK: # %bb.0: # %entry
4620 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4621 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4624 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
4625 <vscale x 1 x half> %0,
4627 <vscale x 1 x i8> %2,
4628 <vscale x 1 x i1> %3,
4631 ret <vscale x 1 x half> %a
4634 declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
4635 <vscale x 2 x half>,
4640 define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4641 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8:
4642 ; CHECK: # %bb.0: # %entry
4643 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4644 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4645 ; CHECK-NEXT: vmv1r.v v8, v9
4648 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
4649 <vscale x 2 x half> undef,
4651 <vscale x 2 x i8> %1,
4654 ret <vscale x 2 x half> %a
4657 declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
4658 <vscale x 2 x half>,
4665 define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4666 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
4667 ; CHECK: # %bb.0: # %entry
4668 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
4669 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4672 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
4673 <vscale x 2 x half> %0,
4675 <vscale x 2 x i8> %2,
4676 <vscale x 2 x i1> %3,
4679 ret <vscale x 2 x half> %a
4682 declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
4683 <vscale x 4 x half>,
4688 define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4689 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8:
4690 ; CHECK: # %bb.0: # %entry
4691 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4692 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4693 ; CHECK-NEXT: vmv.v.v v8, v9
4696 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
4697 <vscale x 4 x half> undef,
4699 <vscale x 4 x i8> %1,
4702 ret <vscale x 4 x half> %a
4705 declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
4706 <vscale x 4 x half>,
4713 define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4714 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
4715 ; CHECK: # %bb.0: # %entry
4716 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
4717 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4720 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
4721 <vscale x 4 x half> %0,
4723 <vscale x 4 x i8> %2,
4724 <vscale x 4 x i1> %3,
4727 ret <vscale x 4 x half> %a
4730 declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
4731 <vscale x 8 x half>,
4736 define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4737 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8:
4738 ; CHECK: # %bb.0: # %entry
4739 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
4740 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
4741 ; CHECK-NEXT: vmv.v.v v8, v10
4744 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
4745 <vscale x 8 x half> undef,
4747 <vscale x 8 x i8> %1,
4750 ret <vscale x 8 x half> %a
4753 declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
4754 <vscale x 8 x half>,
4761 define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4762 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
4763 ; CHECK: # %bb.0: # %entry
4764 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
4765 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
4768 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
4769 <vscale x 8 x half> %0,
4771 <vscale x 8 x i8> %2,
4772 <vscale x 8 x i1> %3,
4775 ret <vscale x 8 x half> %a
4778 declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
4779 <vscale x 16 x half>,
4784 define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
4785 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8:
4786 ; CHECK: # %bb.0: # %entry
4787 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
4788 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
4789 ; CHECK-NEXT: vmv.v.v v8, v12
4792 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
4793 <vscale x 16 x half> undef,
4795 <vscale x 16 x i8> %1,
4798 ret <vscale x 16 x half> %a
4801 declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
4802 <vscale x 16 x half>,
4809 define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4810 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
4811 ; CHECK: # %bb.0: # %entry
4812 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
4813 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
4816 %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
4817 <vscale x 16 x half> %0,
4819 <vscale x 16 x i8> %2,
4820 <vscale x 16 x i1> %3,
4823 ret <vscale x 16 x half> %a
4826 declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
4827 <vscale x 32 x half>,
4832 define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
4833 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8:
4834 ; CHECK: # %bb.0: # %entry
4835 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
4836 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
4837 ; CHECK-NEXT: vmv.v.v v8, v16
4840 %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
4841 <vscale x 32 x half> undef,
4843 <vscale x 32 x i8> %1,
4846 ret <vscale x 32 x half> %a
4849 declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
4850 <vscale x 32 x half>,
4857 define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
4858 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
4859 ; CHECK: # %bb.0: # %entry
4860 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
4861 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
4864 %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
4865 <vscale x 32 x half> %0,
4867 <vscale x 32 x i8> %2,
4868 <vscale x 32 x i1> %3,
4871 ret <vscale x 32 x half> %a
4874 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
4875 <vscale x 1 x float>,
4880 define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4881 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8:
4882 ; CHECK: # %bb.0: # %entry
4883 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4884 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4885 ; CHECK-NEXT: vmv1r.v v8, v9
4888 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
4889 <vscale x 1 x float> undef,
4891 <vscale x 1 x i8> %1,
4894 ret <vscale x 1 x float> %a
4897 declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
4898 <vscale x 1 x float>,
4905 define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4906 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
4907 ; CHECK: # %bb.0: # %entry
4908 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
4909 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4912 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
4913 <vscale x 1 x float> %0,
4915 <vscale x 1 x i8> %2,
4916 <vscale x 1 x i1> %3,
4919 ret <vscale x 1 x float> %a
4922 declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
4923 <vscale x 2 x float>,
4928 define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4929 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8:
4930 ; CHECK: # %bb.0: # %entry
4931 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4932 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
4933 ; CHECK-NEXT: vmv.v.v v8, v9
4936 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
4937 <vscale x 2 x float> undef,
4939 <vscale x 2 x i8> %1,
4942 ret <vscale x 2 x float> %a
4945 declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
4946 <vscale x 2 x float>,
4953 define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4954 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
4955 ; CHECK: # %bb.0: # %entry
4956 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
4957 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
4960 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
4961 <vscale x 2 x float> %0,
4963 <vscale x 2 x i8> %2,
4964 <vscale x 2 x i1> %3,
4967 ret <vscale x 2 x float> %a
4970 declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
4971 <vscale x 4 x float>,
4976 define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4977 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8:
4978 ; CHECK: # %bb.0: # %entry
4979 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4980 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
4981 ; CHECK-NEXT: vmv.v.v v8, v10
4984 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
4985 <vscale x 4 x float> undef,
4987 <vscale x 4 x i8> %1,
4990 ret <vscale x 4 x float> %a
4993 declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
4994 <vscale x 4 x float>,
5001 define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
5002 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
5003 ; CHECK: # %bb.0: # %entry
5004 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
5005 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
5008 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
5009 <vscale x 4 x float> %0,
5011 <vscale x 4 x i8> %2,
5012 <vscale x 4 x i1> %3,
5015 ret <vscale x 4 x float> %a
5018 declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
5019 <vscale x 8 x float>,
5024 define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
5025 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8:
5026 ; CHECK: # %bb.0: # %entry
5027 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
5028 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
5029 ; CHECK-NEXT: vmv.v.v v8, v12
5032 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
5033 <vscale x 8 x float> undef,
5035 <vscale x 8 x i8> %1,
5038 ret <vscale x 8 x float> %a
5041 declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
5042 <vscale x 8 x float>,
5049 define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5050 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
5051 ; CHECK: # %bb.0: # %entry
5052 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
5053 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
5056 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
5057 <vscale x 8 x float> %0,
5059 <vscale x 8 x i8> %2,
5060 <vscale x 8 x i1> %3,
5063 ret <vscale x 8 x float> %a
5066 declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
5067 <vscale x 16 x float>,
5072 define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
5073 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8:
5074 ; CHECK: # %bb.0: # %entry
5075 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
5076 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
5077 ; CHECK-NEXT: vmv.v.v v8, v16
5080 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
5081 <vscale x 16 x float> undef,
5083 <vscale x 16 x i8> %1,
5086 ret <vscale x 16 x float> %a
5089 declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
5090 <vscale x 16 x float>,
5097 define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
5098 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
5099 ; CHECK: # %bb.0: # %entry
5100 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
5101 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
5104 %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
5105 <vscale x 16 x float> %0,
5107 <vscale x 16 x i8> %2,
5108 <vscale x 16 x i1> %3,
5111 ret <vscale x 16 x float> %a
5114 declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
5115 <vscale x 1 x double>,
5120 define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
5121 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8:
5122 ; CHECK: # %bb.0: # %entry
5123 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5124 ; CHECK-NEXT: vluxei8.v v9, (a0), v8
5125 ; CHECK-NEXT: vmv.v.v v8, v9
5128 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
5129 <vscale x 1 x double> undef,
5131 <vscale x 1 x i8> %1,
5134 ret <vscale x 1 x double> %a
5137 declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
5138 <vscale x 1 x double>,
5145 define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
5146 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
5147 ; CHECK: # %bb.0: # %entry
5148 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
5149 ; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
5152 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
5153 <vscale x 1 x double> %0,
5155 <vscale x 1 x i8> %2,
5156 <vscale x 1 x i1> %3,
5159 ret <vscale x 1 x double> %a
5162 declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
5163 <vscale x 2 x double>,
5168 define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
5169 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8:
5170 ; CHECK: # %bb.0: # %entry
5171 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5172 ; CHECK-NEXT: vluxei8.v v10, (a0), v8
5173 ; CHECK-NEXT: vmv.v.v v8, v10
5176 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
5177 <vscale x 2 x double> undef,
5179 <vscale x 2 x i8> %1,
5182 ret <vscale x 2 x double> %a
5185 declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
5186 <vscale x 2 x double>,
5193 define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
5194 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
5195 ; CHECK: # %bb.0: # %entry
5196 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
5197 ; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
5200 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
5201 <vscale x 2 x double> %0,
5203 <vscale x 2 x i8> %2,
5204 <vscale x 2 x i1> %3,
5207 ret <vscale x 2 x double> %a
5210 declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
5211 <vscale x 4 x double>,
5216 define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
5217 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8:
5218 ; CHECK: # %bb.0: # %entry
5219 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
5220 ; CHECK-NEXT: vluxei8.v v12, (a0), v8
5221 ; CHECK-NEXT: vmv.v.v v8, v12
5224 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
5225 <vscale x 4 x double> undef,
5227 <vscale x 4 x i8> %1,
5230 ret <vscale x 4 x double> %a
5233 declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
5234 <vscale x 4 x double>,
5241 define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
5242 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
5243 ; CHECK: # %bb.0: # %entry
5244 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
5245 ; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
5248 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
5249 <vscale x 4 x double> %0,
5251 <vscale x 4 x i8> %2,
5252 <vscale x 4 x i1> %3,
5255 ret <vscale x 4 x double> %a
5258 declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
5259 <vscale x 8 x double>,
5264 define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
5265 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8:
5266 ; CHECK: # %bb.0: # %entry
5267 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
5268 ; CHECK-NEXT: vluxei8.v v16, (a0), v8
5269 ; CHECK-NEXT: vmv.v.v v8, v16
5272 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
5273 <vscale x 8 x double> undef,
5275 <vscale x 8 x i8> %1,
5278 ret <vscale x 8 x double> %a
5281 declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
5282 <vscale x 8 x double>,
5289 define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5290 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
5291 ; CHECK: # %bb.0: # %entry
5292 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
5293 ; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
5296 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
5297 <vscale x 8 x double> %0,
5299 <vscale x 8 x i8> %2,
5300 <vscale x 8 x i1> %3,
5303 ret <vscale x 8 x double> %a