1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh,+f,+d -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
5 ; The intrinsics are not supported with RV32.
7 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
13 define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
17 ; CHECK-NEXT: vluxei64.v v9, (a0), v8
18 ; CHECK-NEXT: vmv1r.v v8, v9
21 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
22 <vscale x 1 x i8> undef,
24 <vscale x 1 x i64> %1,
27 ret <vscale x 1 x i8> %a
30 declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
38 define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
39 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
42 ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
45 %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
48 <vscale x 1 x i64> %2,
52 ret <vscale x 1 x i8> %a
55 declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
61 define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
62 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
65 ; CHECK-NEXT: vluxei64.v v10, (a0), v8
66 ; CHECK-NEXT: vmv1r.v v8, v10
69 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
70 <vscale x 2 x i8> undef,
72 <vscale x 2 x i64> %1,
75 ret <vscale x 2 x i8> %a
78 declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
86 define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
87 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
90 ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
93 %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
96 <vscale x 2 x i64> %2,
100 ret <vscale x 2 x i8> %a
103 declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
109 define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
110 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64:
111 ; CHECK: # %bb.0: # %entry
112 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
113 ; CHECK-NEXT: vluxei64.v v12, (a0), v8
114 ; CHECK-NEXT: vmv1r.v v8, v12
117 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
118 <vscale x 4 x i8> undef,
120 <vscale x 4 x i64> %1,
123 ret <vscale x 4 x i8> %a
126 declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
134 define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
135 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
136 ; CHECK: # %bb.0: # %entry
137 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
138 ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
141 %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
142 <vscale x 4 x i8> %0,
144 <vscale x 4 x i64> %2,
145 <vscale x 4 x i1> %3,
148 ret <vscale x 4 x i8> %a
151 declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
157 define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
158 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64:
159 ; CHECK: # %bb.0: # %entry
160 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
161 ; CHECK-NEXT: vluxei64.v v16, (a0), v8
162 ; CHECK-NEXT: vmv.v.v v8, v16
165 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
166 <vscale x 8 x i8> undef,
168 <vscale x 8 x i64> %1,
171 ret <vscale x 8 x i8> %a
174 declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
182 define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
183 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
186 ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
189 %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
190 <vscale x 8 x i8> %0,
192 <vscale x 8 x i64> %2,
193 <vscale x 8 x i1> %3,
196 ret <vscale x 8 x i8> %a
199 declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
205 define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
206 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64:
207 ; CHECK: # %bb.0: # %entry
208 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
209 ; CHECK-NEXT: vluxei64.v v9, (a0), v8
210 ; CHECK-NEXT: vmv1r.v v8, v9
213 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
214 <vscale x 1 x i16> undef,
216 <vscale x 1 x i64> %1,
219 ret <vscale x 1 x i16> %a
222 declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
230 define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
231 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
234 ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
237 %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
238 <vscale x 1 x i16> %0,
240 <vscale x 1 x i64> %2,
241 <vscale x 1 x i1> %3,
244 ret <vscale x 1 x i16> %a
247 declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
253 define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
254 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64:
255 ; CHECK: # %bb.0: # %entry
256 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
257 ; CHECK-NEXT: vluxei64.v v10, (a0), v8
258 ; CHECK-NEXT: vmv1r.v v8, v10
261 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
262 <vscale x 2 x i16> undef,
264 <vscale x 2 x i64> %1,
267 ret <vscale x 2 x i16> %a
270 declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
278 define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
279 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
280 ; CHECK: # %bb.0: # %entry
281 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
282 ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
285 %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
286 <vscale x 2 x i16> %0,
288 <vscale x 2 x i64> %2,
289 <vscale x 2 x i1> %3,
292 ret <vscale x 2 x i16> %a
295 declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
301 define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
302 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64:
303 ; CHECK: # %bb.0: # %entry
304 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
305 ; CHECK-NEXT: vluxei64.v v12, (a0), v8
306 ; CHECK-NEXT: vmv.v.v v8, v12
309 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
310 <vscale x 4 x i16> undef,
312 <vscale x 4 x i64> %1,
315 ret <vscale x 4 x i16> %a
318 declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
326 define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
327 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
328 ; CHECK: # %bb.0: # %entry
329 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
330 ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
333 %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
334 <vscale x 4 x i16> %0,
336 <vscale x 4 x i64> %2,
337 <vscale x 4 x i1> %3,
340 ret <vscale x 4 x i16> %a
343 declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
349 define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
350 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64:
351 ; CHECK: # %bb.0: # %entry
352 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
353 ; CHECK-NEXT: vluxei64.v v16, (a0), v8
354 ; CHECK-NEXT: vmv.v.v v8, v16
357 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
358 <vscale x 8 x i16> undef,
360 <vscale x 8 x i64> %1,
363 ret <vscale x 8 x i16> %a
366 declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
374 define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
375 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
376 ; CHECK: # %bb.0: # %entry
377 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
378 ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
381 %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
382 <vscale x 8 x i16> %0,
384 <vscale x 8 x i64> %2,
385 <vscale x 8 x i1> %3,
388 ret <vscale x 8 x i16> %a
391 declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
397 define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
398 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64:
399 ; CHECK: # %bb.0: # %entry
400 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
401 ; CHECK-NEXT: vluxei64.v v9, (a0), v8
402 ; CHECK-NEXT: vmv1r.v v8, v9
405 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
406 <vscale x 1 x i32> undef,
408 <vscale x 1 x i64> %1,
411 ret <vscale x 1 x i32> %a
414 declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
422 define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
423 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
424 ; CHECK: # %bb.0: # %entry
425 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
426 ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
429 %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
430 <vscale x 1 x i32> %0,
432 <vscale x 1 x i64> %2,
433 <vscale x 1 x i1> %3,
436 ret <vscale x 1 x i32> %a
439 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
445 define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
446 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64:
447 ; CHECK: # %bb.0: # %entry
448 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
449 ; CHECK-NEXT: vluxei64.v v10, (a0), v8
450 ; CHECK-NEXT: vmv.v.v v8, v10
453 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
454 <vscale x 2 x i32> undef,
456 <vscale x 2 x i64> %1,
459 ret <vscale x 2 x i32> %a
462 declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
470 define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
471 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
472 ; CHECK: # %bb.0: # %entry
473 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
474 ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
477 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
478 <vscale x 2 x i32> %0,
480 <vscale x 2 x i64> %2,
481 <vscale x 2 x i1> %3,
484 ret <vscale x 2 x i32> %a
487 declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
493 define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
494 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64:
495 ; CHECK: # %bb.0: # %entry
496 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
497 ; CHECK-NEXT: vluxei64.v v12, (a0), v8
498 ; CHECK-NEXT: vmv.v.v v8, v12
501 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
502 <vscale x 4 x i32> undef,
504 <vscale x 4 x i64> %1,
507 ret <vscale x 4 x i32> %a
510 declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
518 define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
519 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
520 ; CHECK: # %bb.0: # %entry
521 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
522 ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
525 %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
526 <vscale x 4 x i32> %0,
528 <vscale x 4 x i64> %2,
529 <vscale x 4 x i1> %3,
532 ret <vscale x 4 x i32> %a
535 declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
541 define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
542 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64:
543 ; CHECK: # %bb.0: # %entry
544 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
545 ; CHECK-NEXT: vluxei64.v v16, (a0), v8
546 ; CHECK-NEXT: vmv.v.v v8, v16
549 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
550 <vscale x 8 x i32> undef,
552 <vscale x 8 x i64> %1,
555 ret <vscale x 8 x i32> %a
558 declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
566 define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
567 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
568 ; CHECK: # %bb.0: # %entry
569 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
570 ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
573 %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
574 <vscale x 8 x i32> %0,
576 <vscale x 8 x i64> %2,
577 <vscale x 8 x i1> %3,
580 ret <vscale x 8 x i32> %a
583 declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
589 define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
590 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64:
591 ; CHECK: # %bb.0: # %entry
592 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
593 ; CHECK-NEXT: vluxei64.v v8, (a0), v8
596 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
597 <vscale x 1 x i64> undef,
599 <vscale x 1 x i64> %1,
602 ret <vscale x 1 x i64> %a
605 declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
613 define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
614 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
617 ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
620 %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
621 <vscale x 1 x i64> %0,
623 <vscale x 1 x i64> %2,
624 <vscale x 1 x i1> %3,
627 ret <vscale x 1 x i64> %a
630 declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
636 define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
637 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
640 ; CHECK-NEXT: vluxei64.v v8, (a0), v8
643 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
644 <vscale x 2 x i64> undef,
646 <vscale x 2 x i64> %1,
649 ret <vscale x 2 x i64> %a
652 declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
660 define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
661 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
662 ; CHECK: # %bb.0: # %entry
663 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
664 ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
667 %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
668 <vscale x 2 x i64> %0,
670 <vscale x 2 x i64> %2,
671 <vscale x 2 x i1> %3,
674 ret <vscale x 2 x i64> %a
677 declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
683 define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
684 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64:
685 ; CHECK: # %bb.0: # %entry
686 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
687 ; CHECK-NEXT: vluxei64.v v8, (a0), v8
690 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
691 <vscale x 4 x i64> undef,
693 <vscale x 4 x i64> %1,
696 ret <vscale x 4 x i64> %a
699 declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
707 define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
708 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
709 ; CHECK: # %bb.0: # %entry
710 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
711 ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
714 %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
715 <vscale x 4 x i64> %0,
717 <vscale x 4 x i64> %2,
718 <vscale x 4 x i1> %3,
721 ret <vscale x 4 x i64> %a
724 declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
730 define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
731 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64:
732 ; CHECK: # %bb.0: # %entry
733 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
734 ; CHECK-NEXT: vluxei64.v v8, (a0), v8
737 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
738 <vscale x 8 x i64> undef,
740 <vscale x 8 x i64> %1,
743 ret <vscale x 8 x i64> %a
746 declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
754 define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
755 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
756 ; CHECK: # %bb.0: # %entry
757 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
758 ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
761 %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
762 <vscale x 8 x i64> %0,
764 <vscale x 8 x i64> %2,
765 <vscale x 8 x i1> %3,
768 ret <vscale x 8 x i64> %a
771 declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
777 define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
778 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64:
779 ; CHECK: # %bb.0: # %entry
780 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
781 ; CHECK-NEXT: vluxei64.v v9, (a0), v8
782 ; CHECK-NEXT: vmv1r.v v8, v9
785 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
786 <vscale x 1 x half> undef,
788 <vscale x 1 x i64> %1,
791 ret <vscale x 1 x half> %a
794 declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
802 define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
803 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
804 ; CHECK: # %bb.0: # %entry
805 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
806 ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
809 %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
810 <vscale x 1 x half> %0,
812 <vscale x 1 x i64> %2,
813 <vscale x 1 x i1> %3,
816 ret <vscale x 1 x half> %a
819 declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
825 define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
826 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64:
827 ; CHECK: # %bb.0: # %entry
828 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
829 ; CHECK-NEXT: vluxei64.v v10, (a0), v8
830 ; CHECK-NEXT: vmv1r.v v8, v10
833 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
834 <vscale x 2 x half> undef,
836 <vscale x 2 x i64> %1,
839 ret <vscale x 2 x half> %a
842 declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
850 define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
851 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
852 ; CHECK: # %bb.0: # %entry
853 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
854 ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
857 %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
858 <vscale x 2 x half> %0,
860 <vscale x 2 x i64> %2,
861 <vscale x 2 x i1> %3,
864 ret <vscale x 2 x half> %a
867 declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
873 define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
874 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64:
875 ; CHECK: # %bb.0: # %entry
876 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
877 ; CHECK-NEXT: vluxei64.v v12, (a0), v8
878 ; CHECK-NEXT: vmv.v.v v8, v12
881 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
882 <vscale x 4 x half> undef,
884 <vscale x 4 x i64> %1,
887 ret <vscale x 4 x half> %a
890 declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
898 define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
899 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
900 ; CHECK: # %bb.0: # %entry
901 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
902 ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
905 %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
906 <vscale x 4 x half> %0,
908 <vscale x 4 x i64> %2,
909 <vscale x 4 x i1> %3,
912 ret <vscale x 4 x half> %a
915 declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
921 define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
922 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64:
923 ; CHECK: # %bb.0: # %entry
924 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
925 ; CHECK-NEXT: vluxei64.v v16, (a0), v8
926 ; CHECK-NEXT: vmv.v.v v8, v16
929 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
930 <vscale x 8 x half> undef,
932 <vscale x 8 x i64> %1,
935 ret <vscale x 8 x half> %a
938 declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
946 define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
947 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
948 ; CHECK: # %bb.0: # %entry
949 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
950 ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
953 %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
954 <vscale x 8 x half> %0,
956 <vscale x 8 x i64> %2,
957 <vscale x 8 x i1> %3,
960 ret <vscale x 8 x half> %a
963 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
964 <vscale x 1 x float>,
969 define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
970 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64:
971 ; CHECK: # %bb.0: # %entry
972 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
973 ; CHECK-NEXT: vluxei64.v v9, (a0), v8
974 ; CHECK-NEXT: vmv1r.v v8, v9
977 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
978 <vscale x 1 x float> undef,
980 <vscale x 1 x i64> %1,
983 ret <vscale x 1 x float> %a
986 declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
987 <vscale x 1 x float>,
994 define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
995 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
996 ; CHECK: # %bb.0: # %entry
997 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
998 ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
1001 %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
1002 <vscale x 1 x float> %0,
1004 <vscale x 1 x i64> %2,
1005 <vscale x 1 x i1> %3,
1008 ret <vscale x 1 x float> %a
1011 declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
1012 <vscale x 2 x float>,
1017 define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
1018 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64:
1019 ; CHECK: # %bb.0: # %entry
1020 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1021 ; CHECK-NEXT: vluxei64.v v10, (a0), v8
1022 ; CHECK-NEXT: vmv.v.v v8, v10
1025 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
1026 <vscale x 2 x float> undef,
1028 <vscale x 2 x i64> %1,
1031 ret <vscale x 2 x float> %a
1034 declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
1035 <vscale x 2 x float>,
1042 define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1043 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
1044 ; CHECK: # %bb.0: # %entry
1045 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1046 ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
1049 %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
1050 <vscale x 2 x float> %0,
1052 <vscale x 2 x i64> %2,
1053 <vscale x 2 x i1> %3,
1056 ret <vscale x 2 x float> %a
1059 declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
1060 <vscale x 4 x float>,
1065 define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
1066 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64:
1067 ; CHECK: # %bb.0: # %entry
1068 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1069 ; CHECK-NEXT: vluxei64.v v12, (a0), v8
1070 ; CHECK-NEXT: vmv.v.v v8, v12
1073 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
1074 <vscale x 4 x float> undef,
1076 <vscale x 4 x i64> %1,
1079 ret <vscale x 4 x float> %a
1082 declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
1083 <vscale x 4 x float>,
1090 define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1091 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
1092 ; CHECK: # %bb.0: # %entry
1093 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1094 ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
1097 %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
1098 <vscale x 4 x float> %0,
1100 <vscale x 4 x i64> %2,
1101 <vscale x 4 x i1> %3,
1104 ret <vscale x 4 x float> %a
1107 declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
1108 <vscale x 8 x float>,
1113 define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
1114 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64:
1115 ; CHECK: # %bb.0: # %entry
1116 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1117 ; CHECK-NEXT: vluxei64.v v16, (a0), v8
1118 ; CHECK-NEXT: vmv.v.v v8, v16
1121 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
1122 <vscale x 8 x float> undef,
1124 <vscale x 8 x i64> %1,
1127 ret <vscale x 8 x float> %a
1130 declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
1131 <vscale x 8 x float>,
1138 define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1139 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
1140 ; CHECK: # %bb.0: # %entry
1141 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1142 ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
1145 %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
1146 <vscale x 8 x float> %0,
1148 <vscale x 8 x i64> %2,
1149 <vscale x 8 x i1> %3,
1152 ret <vscale x 8 x float> %a
1155 declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
1156 <vscale x 1 x double>,
1161 define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
1162 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64:
1163 ; CHECK: # %bb.0: # %entry
1164 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1165 ; CHECK-NEXT: vluxei64.v v8, (a0), v8
1168 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
1169 <vscale x 1 x double> undef,
1171 <vscale x 1 x i64> %1,
1174 ret <vscale x 1 x double> %a
1177 declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
1178 <vscale x 1 x double>,
1185 define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1186 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
1187 ; CHECK: # %bb.0: # %entry
1188 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1189 ; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
1192 %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
1193 <vscale x 1 x double> %0,
1195 <vscale x 1 x i64> %2,
1196 <vscale x 1 x i1> %3,
1199 ret <vscale x 1 x double> %a
1202 declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
1203 <vscale x 2 x double>,
1208 define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
1209 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64:
1210 ; CHECK: # %bb.0: # %entry
1211 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1212 ; CHECK-NEXT: vluxei64.v v8, (a0), v8
1215 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
1216 <vscale x 2 x double> undef,
1218 <vscale x 2 x i64> %1,
1221 ret <vscale x 2 x double> %a
1224 declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
1225 <vscale x 2 x double>,
1232 define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1233 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
1234 ; CHECK: # %bb.0: # %entry
1235 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1236 ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
1239 %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
1240 <vscale x 2 x double> %0,
1242 <vscale x 2 x i64> %2,
1243 <vscale x 2 x i1> %3,
1246 ret <vscale x 2 x double> %a
1249 declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
1250 <vscale x 4 x double>,
1255 define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
1256 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64:
1257 ; CHECK: # %bb.0: # %entry
1258 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1259 ; CHECK-NEXT: vluxei64.v v8, (a0), v8
1262 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
1263 <vscale x 4 x double> undef,
1265 <vscale x 4 x i64> %1,
1268 ret <vscale x 4 x double> %a
1271 declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
1272 <vscale x 4 x double>,
1279 define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1280 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
1281 ; CHECK: # %bb.0: # %entry
1282 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
1283 ; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
1286 %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
1287 <vscale x 4 x double> %0,
1289 <vscale x 4 x i64> %2,
1290 <vscale x 4 x i1> %3,
1293 ret <vscale x 4 x double> %a
1296 declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
1297 <vscale x 8 x double>,
1302 define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
1303 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64:
1304 ; CHECK: # %bb.0: # %entry
1305 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1306 ; CHECK-NEXT: vluxei64.v v8, (a0), v8
1309 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
1310 <vscale x 8 x double> undef,
1312 <vscale x 8 x i64> %1,
1315 ret <vscale x 8 x double> %a
1318 declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
1319 <vscale x 8 x double>,
1326 define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1327 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
1328 ; CHECK: # %bb.0: # %entry
1329 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1330 ; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
1333 %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
1334 <vscale x 8 x double> %0,
1336 <vscale x 8 x i64> %2,
1337 <vscale x 8 x i1> %3,
1340 ret <vscale x 8 x double> %a