1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh,,+experimental-zfbfmin,+experimental-zvfbfmin \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,,+experimental-zfbfmin,+experimental-zvfbfmin \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
12 define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, iXLen %1) nounwind {
13 ; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
16 ; CHECK-NEXT: vle64.v v8, (a0)
19 %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
20 <vscale x 1 x i64> undef,
21 <vscale x 1 x i64>* %0,
24 ret <vscale x 1 x i64> %a
27 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
34 define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
35 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64:
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
38 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
41 %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
42 <vscale x 1 x i64> %0,
43 <vscale x 1 x i64>* %1,
47 ret <vscale x 1 x i64> %a
50 declare <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
55 define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, iXLen %1) nounwind {
56 ; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64:
57 ; CHECK: # %bb.0: # %entry
58 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
59 ; CHECK-NEXT: vle64.v v8, (a0)
62 %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
63 <vscale x 2 x i64> undef,
64 <vscale x 2 x i64>* %0,
67 ret <vscale x 2 x i64> %a
70 declare <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
77 define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
78 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64:
79 ; CHECK: # %bb.0: # %entry
80 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
81 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
84 %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
85 <vscale x 2 x i64> %0,
86 <vscale x 2 x i64>* %1,
90 ret <vscale x 2 x i64> %a
93 declare <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
98 define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, iXLen %1) nounwind {
99 ; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64:
100 ; CHECK: # %bb.0: # %entry
101 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
102 ; CHECK-NEXT: vle64.v v8, (a0)
105 %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
106 <vscale x 4 x i64> undef,
107 <vscale x 4 x i64>* %0,
110 ret <vscale x 4 x i64> %a
113 declare <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
120 define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
121 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64:
122 ; CHECK: # %bb.0: # %entry
123 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
124 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
127 %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
128 <vscale x 4 x i64> %0,
129 <vscale x 4 x i64>* %1,
130 <vscale x 4 x i1> %2,
133 ret <vscale x 4 x i64> %a
136 declare <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
141 define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, iXLen %1) nounwind {
142 ; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64:
143 ; CHECK: # %bb.0: # %entry
144 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
145 ; CHECK-NEXT: vle64.v v8, (a0)
148 %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
149 <vscale x 8 x i64> undef,
150 <vscale x 8 x i64>* %0,
153 ret <vscale x 8 x i64> %a
156 declare <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
163 define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
164 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64:
165 ; CHECK: # %bb.0: # %entry
166 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
167 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
170 %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
171 <vscale x 8 x i64> %0,
172 <vscale x 8 x i64>* %1,
173 <vscale x 8 x i1> %2,
176 ret <vscale x 8 x i64> %a
179 declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
180 <vscale x 1 x double>,
181 <vscale x 1 x double>*,
184 define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, iXLen %1) nounwind {
185 ; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64:
186 ; CHECK: # %bb.0: # %entry
187 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
188 ; CHECK-NEXT: vle64.v v8, (a0)
191 %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
192 <vscale x 1 x double> undef,
193 <vscale x 1 x double>* %0,
196 ret <vscale x 1 x double> %a
199 declare <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
200 <vscale x 1 x double>,
201 <vscale x 1 x double>*,
206 define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
207 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64:
208 ; CHECK: # %bb.0: # %entry
209 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
210 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
213 %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
214 <vscale x 1 x double> %0,
215 <vscale x 1 x double>* %1,
216 <vscale x 1 x i1> %2,
219 ret <vscale x 1 x double> %a
222 declare <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
223 <vscale x 2 x double>,
224 <vscale x 2 x double>*,
227 define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, iXLen %1) nounwind {
228 ; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64:
229 ; CHECK: # %bb.0: # %entry
230 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
231 ; CHECK-NEXT: vle64.v v8, (a0)
234 %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
235 <vscale x 2 x double> undef,
236 <vscale x 2 x double>* %0,
239 ret <vscale x 2 x double> %a
242 declare <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
243 <vscale x 2 x double>,
244 <vscale x 2 x double>*,
249 define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
250 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64:
251 ; CHECK: # %bb.0: # %entry
252 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
253 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
256 %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
257 <vscale x 2 x double> %0,
258 <vscale x 2 x double>* %1,
259 <vscale x 2 x i1> %2,
262 ret <vscale x 2 x double> %a
265 declare <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
266 <vscale x 4 x double>,
267 <vscale x 4 x double>*,
270 define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, iXLen %1) nounwind {
271 ; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64:
272 ; CHECK: # %bb.0: # %entry
273 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
274 ; CHECK-NEXT: vle64.v v8, (a0)
277 %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
278 <vscale x 4 x double> undef,
279 <vscale x 4 x double>* %0,
282 ret <vscale x 4 x double> %a
285 declare <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
286 <vscale x 4 x double>,
287 <vscale x 4 x double>*,
292 define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
293 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64:
294 ; CHECK: # %bb.0: # %entry
295 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
296 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
299 %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
300 <vscale x 4 x double> %0,
301 <vscale x 4 x double>* %1,
302 <vscale x 4 x i1> %2,
305 ret <vscale x 4 x double> %a
308 declare <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
309 <vscale x 8 x double>,
310 <vscale x 8 x double>*,
313 define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, iXLen %1) nounwind {
314 ; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64:
315 ; CHECK: # %bb.0: # %entry
316 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
317 ; CHECK-NEXT: vle64.v v8, (a0)
320 %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
321 <vscale x 8 x double> undef,
322 <vscale x 8 x double>* %0,
325 ret <vscale x 8 x double> %a
328 declare <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
329 <vscale x 8 x double>,
330 <vscale x 8 x double>*,
335 define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
336 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64:
337 ; CHECK: # %bb.0: # %entry
338 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
339 ; CHECK-NEXT: vle64.v v8, (a0), v0.t
342 %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
343 <vscale x 8 x double> %0,
344 <vscale x 8 x double>* %1,
345 <vscale x 8 x i1> %2,
348 ret <vscale x 8 x double> %a
351 declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
356 define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, iXLen %1) nounwind {
357 ; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32:
358 ; CHECK: # %bb.0: # %entry
359 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
360 ; CHECK-NEXT: vle32.v v8, (a0)
363 %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
364 <vscale x 1 x i32> undef,
365 <vscale x 1 x i32>* %0,
368 ret <vscale x 1 x i32> %a
371 declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
378 define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
379 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32:
380 ; CHECK: # %bb.0: # %entry
381 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
382 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
385 %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
386 <vscale x 1 x i32> %0,
387 <vscale x 1 x i32>* %1,
388 <vscale x 1 x i1> %2,
391 ret <vscale x 1 x i32> %a
394 declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
399 define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, iXLen %1) nounwind {
400 ; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32:
401 ; CHECK: # %bb.0: # %entry
402 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
403 ; CHECK-NEXT: vle32.v v8, (a0)
406 %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
407 <vscale x 2 x i32> undef,
408 <vscale x 2 x i32>* %0,
411 ret <vscale x 2 x i32> %a
414 declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
421 define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
422 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32:
423 ; CHECK: # %bb.0: # %entry
424 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
425 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
428 %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
429 <vscale x 2 x i32> %0,
430 <vscale x 2 x i32>* %1,
431 <vscale x 2 x i1> %2,
434 ret <vscale x 2 x i32> %a
437 declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
442 define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, iXLen %1) nounwind {
443 ; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32:
444 ; CHECK: # %bb.0: # %entry
445 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
446 ; CHECK-NEXT: vle32.v v8, (a0)
449 %a = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
450 <vscale x 4 x i32> undef,
451 <vscale x 4 x i32>* %0,
454 ret <vscale x 4 x i32> %a
457 declare <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
464 define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
465 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32:
466 ; CHECK: # %bb.0: # %entry
467 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
468 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
471 %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
472 <vscale x 4 x i32> %0,
473 <vscale x 4 x i32>* %1,
474 <vscale x 4 x i1> %2,
477 ret <vscale x 4 x i32> %a
480 declare <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
485 define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, iXLen %1) nounwind {
486 ; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32:
487 ; CHECK: # %bb.0: # %entry
488 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
489 ; CHECK-NEXT: vle32.v v8, (a0)
492 %a = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
493 <vscale x 8 x i32> undef,
494 <vscale x 8 x i32>* %0,
497 ret <vscale x 8 x i32> %a
500 declare <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
507 define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
508 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32:
509 ; CHECK: # %bb.0: # %entry
510 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
511 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
514 %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
515 <vscale x 8 x i32> %0,
516 <vscale x 8 x i32>* %1,
517 <vscale x 8 x i1> %2,
520 ret <vscale x 8 x i32> %a
523 declare <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
525 <vscale x 16 x i32>*,
528 define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, iXLen %1) nounwind {
529 ; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32:
530 ; CHECK: # %bb.0: # %entry
531 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
532 ; CHECK-NEXT: vle32.v v8, (a0)
535 %a = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
536 <vscale x 16 x i32> undef,
537 <vscale x 16 x i32>* %0,
540 ret <vscale x 16 x i32> %a
543 declare <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
545 <vscale x 16 x i32>*,
550 define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
551 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32:
552 ; CHECK: # %bb.0: # %entry
553 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
554 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
557 %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
558 <vscale x 16 x i32> %0,
559 <vscale x 16 x i32>* %1,
560 <vscale x 16 x i1> %2,
563 ret <vscale x 16 x i32> %a
566 declare <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
567 <vscale x 1 x float>,
568 <vscale x 1 x float>*,
571 define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, iXLen %1) nounwind {
572 ; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32:
573 ; CHECK: # %bb.0: # %entry
574 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
575 ; CHECK-NEXT: vle32.v v8, (a0)
578 %a = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
579 <vscale x 1 x float> undef,
580 <vscale x 1 x float>* %0,
583 ret <vscale x 1 x float> %a
586 declare <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
587 <vscale x 1 x float>,
588 <vscale x 1 x float>*,
593 define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
594 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32:
595 ; CHECK: # %bb.0: # %entry
596 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
597 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
600 %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
601 <vscale x 1 x float> %0,
602 <vscale x 1 x float>* %1,
603 <vscale x 1 x i1> %2,
606 ret <vscale x 1 x float> %a
609 declare <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
610 <vscale x 2 x float>,
611 <vscale x 2 x float>*,
614 define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, iXLen %1) nounwind {
615 ; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32:
616 ; CHECK: # %bb.0: # %entry
617 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
618 ; CHECK-NEXT: vle32.v v8, (a0)
621 %a = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
622 <vscale x 2 x float> undef,
623 <vscale x 2 x float>* %0,
626 ret <vscale x 2 x float> %a
629 declare <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
630 <vscale x 2 x float>,
631 <vscale x 2 x float>*,
636 define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
637 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
640 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
643 %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
644 <vscale x 2 x float> %0,
645 <vscale x 2 x float>* %1,
646 <vscale x 2 x i1> %2,
649 ret <vscale x 2 x float> %a
652 declare <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
653 <vscale x 4 x float>,
654 <vscale x 4 x float>*,
657 define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, iXLen %1) nounwind {
658 ; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32:
659 ; CHECK: # %bb.0: # %entry
660 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
661 ; CHECK-NEXT: vle32.v v8, (a0)
664 %a = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
665 <vscale x 4 x float> undef,
666 <vscale x 4 x float>* %0,
669 ret <vscale x 4 x float> %a
672 declare <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
673 <vscale x 4 x float>,
674 <vscale x 4 x float>*,
679 define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
680 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32:
681 ; CHECK: # %bb.0: # %entry
682 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
683 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
686 %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
687 <vscale x 4 x float> %0,
688 <vscale x 4 x float>* %1,
689 <vscale x 4 x i1> %2,
692 ret <vscale x 4 x float> %a
695 declare <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
696 <vscale x 8 x float>,
697 <vscale x 8 x float>*,
700 define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, iXLen %1) nounwind {
701 ; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32:
702 ; CHECK: # %bb.0: # %entry
703 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
704 ; CHECK-NEXT: vle32.v v8, (a0)
707 %a = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
708 <vscale x 8 x float> undef,
709 <vscale x 8 x float>* %0,
712 ret <vscale x 8 x float> %a
715 declare <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
716 <vscale x 8 x float>,
717 <vscale x 8 x float>*,
722 define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
723 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32:
724 ; CHECK: # %bb.0: # %entry
725 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
726 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
729 %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
730 <vscale x 8 x float> %0,
731 <vscale x 8 x float>* %1,
732 <vscale x 8 x i1> %2,
735 ret <vscale x 8 x float> %a
738 declare <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
739 <vscale x 16 x float>,
740 <vscale x 16 x float>*,
743 define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, iXLen %1) nounwind {
744 ; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32:
745 ; CHECK: # %bb.0: # %entry
746 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
747 ; CHECK-NEXT: vle32.v v8, (a0)
750 %a = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
751 <vscale x 16 x float> undef,
752 <vscale x 16 x float>* %0,
755 ret <vscale x 16 x float> %a
758 declare <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
759 <vscale x 16 x float>,
760 <vscale x 16 x float>*,
765 define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
766 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32:
767 ; CHECK: # %bb.0: # %entry
768 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
769 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
772 %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
773 <vscale x 16 x float> %0,
774 <vscale x 16 x float>* %1,
775 <vscale x 16 x i1> %2,
778 ret <vscale x 16 x float> %a
781 declare <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
786 define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, iXLen %1) nounwind {
787 ; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16:
788 ; CHECK: # %bb.0: # %entry
789 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
790 ; CHECK-NEXT: vle16.v v8, (a0)
793 %a = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
794 <vscale x 1 x i16> undef,
795 <vscale x 1 x i16>* %0,
798 ret <vscale x 1 x i16> %a
801 declare <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
808 define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
809 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16:
810 ; CHECK: # %bb.0: # %entry
811 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
812 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
815 %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
816 <vscale x 1 x i16> %0,
817 <vscale x 1 x i16>* %1,
818 <vscale x 1 x i1> %2,
821 ret <vscale x 1 x i16> %a
824 declare <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
829 define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, iXLen %1) nounwind {
830 ; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16:
831 ; CHECK: # %bb.0: # %entry
832 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
833 ; CHECK-NEXT: vle16.v v8, (a0)
836 %a = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
837 <vscale x 2 x i16> undef,
838 <vscale x 2 x i16>* %0,
841 ret <vscale x 2 x i16> %a
844 declare <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
851 define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
852 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16:
853 ; CHECK: # %bb.0: # %entry
854 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
855 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
858 %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
859 <vscale x 2 x i16> %0,
860 <vscale x 2 x i16>* %1,
861 <vscale x 2 x i1> %2,
864 ret <vscale x 2 x i16> %a
867 declare <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
872 define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, iXLen %1) nounwind {
873 ; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16:
874 ; CHECK: # %bb.0: # %entry
875 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
876 ; CHECK-NEXT: vle16.v v8, (a0)
879 %a = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
880 <vscale x 4 x i16> undef,
881 <vscale x 4 x i16>* %0,
884 ret <vscale x 4 x i16> %a
887 declare <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
894 define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
895 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16:
896 ; CHECK: # %bb.0: # %entry
897 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
898 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
901 %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
902 <vscale x 4 x i16> %0,
903 <vscale x 4 x i16>* %1,
904 <vscale x 4 x i1> %2,
907 ret <vscale x 4 x i16> %a
910 declare <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
915 define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, iXLen %1) nounwind {
916 ; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16:
917 ; CHECK: # %bb.0: # %entry
918 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
919 ; CHECK-NEXT: vle16.v v8, (a0)
922 %a = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
923 <vscale x 8 x i16> undef,
924 <vscale x 8 x i16>* %0,
927 ret <vscale x 8 x i16> %a
930 declare <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
937 define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
938 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16:
939 ; CHECK: # %bb.0: # %entry
940 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
941 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
944 %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
945 <vscale x 8 x i16> %0,
946 <vscale x 8 x i16>* %1,
947 <vscale x 8 x i1> %2,
950 ret <vscale x 8 x i16> %a
953 declare <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
955 <vscale x 16 x i16>*,
958 define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, iXLen %1) nounwind {
959 ; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16:
960 ; CHECK: # %bb.0: # %entry
961 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
962 ; CHECK-NEXT: vle16.v v8, (a0)
965 %a = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
966 <vscale x 16 x i16> undef,
967 <vscale x 16 x i16>* %0,
970 ret <vscale x 16 x i16> %a
973 declare <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
975 <vscale x 16 x i16>*,
980 define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
981 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16:
982 ; CHECK: # %bb.0: # %entry
983 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
984 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
987 %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
988 <vscale x 16 x i16> %0,
989 <vscale x 16 x i16>* %1,
990 <vscale x 16 x i1> %2,
993 ret <vscale x 16 x i16> %a
996 declare <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
998 <vscale x 32 x i16>*,
1001 define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, iXLen %1) nounwind {
1002 ; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16:
1003 ; CHECK: # %bb.0: # %entry
1004 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1005 ; CHECK-NEXT: vle16.v v8, (a0)
1008 %a = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
1009 <vscale x 32 x i16> undef,
1010 <vscale x 32 x i16>* %0,
1013 ret <vscale x 32 x i16> %a
1016 declare <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
1017 <vscale x 32 x i16>,
1018 <vscale x 32 x i16>*,
1023 define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1024 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16:
1025 ; CHECK: # %bb.0: # %entry
1026 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1027 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1030 %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
1031 <vscale x 32 x i16> %0,
1032 <vscale x 32 x i16>* %1,
1033 <vscale x 32 x i1> %2,
1036 ret <vscale x 32 x i16> %a
1039 declare <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
1040 <vscale x 1 x half>,
1041 <vscale x 1 x half>*,
1044 define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, iXLen %1) nounwind {
1045 ; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16:
1046 ; CHECK: # %bb.0: # %entry
1047 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1048 ; CHECK-NEXT: vle16.v v8, (a0)
1051 %a = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
1052 <vscale x 1 x half> undef,
1053 <vscale x 1 x half>* %0,
1056 ret <vscale x 1 x half> %a
1059 declare <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
1060 <vscale x 1 x half>,
1061 <vscale x 1 x half>*,
1066 define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1067 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16:
1068 ; CHECK: # %bb.0: # %entry
1069 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1070 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1073 %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
1074 <vscale x 1 x half> %0,
1075 <vscale x 1 x half>* %1,
1076 <vscale x 1 x i1> %2,
1079 ret <vscale x 1 x half> %a
1082 declare <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
1083 <vscale x 2 x half>,
1084 <vscale x 2 x half>*,
1087 define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, iXLen %1) nounwind {
1088 ; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16:
1089 ; CHECK: # %bb.0: # %entry
1090 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1091 ; CHECK-NEXT: vle16.v v8, (a0)
1094 %a = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
1095 <vscale x 2 x half> undef,
1096 <vscale x 2 x half>* %0,
1099 ret <vscale x 2 x half> %a
1102 declare <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
1103 <vscale x 2 x half>,
1104 <vscale x 2 x half>*,
1109 define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1110 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16:
1111 ; CHECK: # %bb.0: # %entry
1112 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1113 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1116 %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
1117 <vscale x 2 x half> %0,
1118 <vscale x 2 x half>* %1,
1119 <vscale x 2 x i1> %2,
1122 ret <vscale x 2 x half> %a
1125 declare <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
1126 <vscale x 4 x half>,
1127 <vscale x 4 x half>*,
1130 define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, iXLen %1) nounwind {
1131 ; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16:
1132 ; CHECK: # %bb.0: # %entry
1133 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1134 ; CHECK-NEXT: vle16.v v8, (a0)
1137 %a = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
1138 <vscale x 4 x half> undef,
1139 <vscale x 4 x half>* %0,
1142 ret <vscale x 4 x half> %a
1145 declare <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
1146 <vscale x 4 x half>,
1147 <vscale x 4 x half>*,
1152 define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1153 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16:
1154 ; CHECK: # %bb.0: # %entry
1155 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1156 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1159 %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
1160 <vscale x 4 x half> %0,
1161 <vscale x 4 x half>* %1,
1162 <vscale x 4 x i1> %2,
1165 ret <vscale x 4 x half> %a
1168 declare <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
1169 <vscale x 8 x half>,
1170 <vscale x 8 x half>*,
1173 define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, iXLen %1) nounwind {
1174 ; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16:
1175 ; CHECK: # %bb.0: # %entry
1176 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1177 ; CHECK-NEXT: vle16.v v8, (a0)
1180 %a = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
1181 <vscale x 8 x half> undef,
1182 <vscale x 8 x half>* %0,
1185 ret <vscale x 8 x half> %a
1188 declare <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
1189 <vscale x 8 x half>,
1190 <vscale x 8 x half>*,
1195 define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1196 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16:
1197 ; CHECK: # %bb.0: # %entry
1198 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1199 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1202 %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
1203 <vscale x 8 x half> %0,
1204 <vscale x 8 x half>* %1,
1205 <vscale x 8 x i1> %2,
1208 ret <vscale x 8 x half> %a
1211 declare <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
1212 <vscale x 16 x half>,
1213 <vscale x 16 x half>*,
1216 define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, iXLen %1) nounwind {
1217 ; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16:
1218 ; CHECK: # %bb.0: # %entry
1219 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1220 ; CHECK-NEXT: vle16.v v8, (a0)
1223 %a = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
1224 <vscale x 16 x half> undef,
1225 <vscale x 16 x half>* %0,
1228 ret <vscale x 16 x half> %a
1231 declare <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
1232 <vscale x 16 x half>,
1233 <vscale x 16 x half>*,
1238 define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1239 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16:
1240 ; CHECK: # %bb.0: # %entry
1241 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1242 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1245 %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
1246 <vscale x 16 x half> %0,
1247 <vscale x 16 x half>* %1,
1248 <vscale x 16 x i1> %2,
1251 ret <vscale x 16 x half> %a
1254 declare <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
1255 <vscale x 32 x half>,
1256 <vscale x 32 x half>*,
1259 define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, iXLen %1) nounwind {
1260 ; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16:
1261 ; CHECK: # %bb.0: # %entry
1262 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1263 ; CHECK-NEXT: vle16.v v8, (a0)
1266 %a = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
1267 <vscale x 32 x half> undef,
1268 <vscale x 32 x half>* %0,
1271 ret <vscale x 32 x half> %a
1274 declare <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
1275 <vscale x 32 x half>,
1276 <vscale x 32 x half>*,
1281 define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1282 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16:
1283 ; CHECK: # %bb.0: # %entry
1284 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1285 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1288 %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
1289 <vscale x 32 x half> %0,
1290 <vscale x 32 x half>* %1,
1291 <vscale x 32 x i1> %2,
1294 ret <vscale x 32 x half> %a
1297 declare <vscale x 1 x bfloat> @llvm.riscv.vle.nxv1bf16(
1298 <vscale x 1 x bfloat>,
1299 <vscale x 1 x bfloat>*,
1302 define <vscale x 1 x bfloat> @intrinsic_vle_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat>* %0, iXLen %1) nounwind {
1303 ; CHECK-LABEL: intrinsic_vle_v_nxv1bf16_nxv1bf16:
1304 ; CHECK: # %bb.0: # %entry
1305 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1306 ; CHECK-NEXT: vle16.v v8, (a0)
1309 %a = call <vscale x 1 x bfloat> @llvm.riscv.vle.nxv1bf16(
1310 <vscale x 1 x bfloat> undef,
1311 <vscale x 1 x bfloat>* %0,
1314 ret <vscale x 1 x bfloat> %a
1317 declare <vscale x 1 x bfloat> @llvm.riscv.vle.mask.nxv1bf16(
1318 <vscale x 1 x bfloat>,
1319 <vscale x 1 x bfloat>*,
1324 define <vscale x 1 x bfloat> @intrinsic_vle_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1325 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1bf16_nxv1bf16:
1326 ; CHECK: # %bb.0: # %entry
1327 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1328 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1331 %a = call <vscale x 1 x bfloat> @llvm.riscv.vle.mask.nxv1bf16(
1332 <vscale x 1 x bfloat> %0,
1333 <vscale x 1 x bfloat>* %1,
1334 <vscale x 1 x i1> %2,
1337 ret <vscale x 1 x bfloat> %a
1340 declare <vscale x 2 x bfloat> @llvm.riscv.vle.nxv2bf16(
1341 <vscale x 2 x bfloat>,
1342 <vscale x 2 x bfloat>*,
1345 define <vscale x 2 x bfloat> @intrinsic_vle_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat>* %0, iXLen %1) nounwind {
1346 ; CHECK-LABEL: intrinsic_vle_v_nxv2bf16_nxv2bf16:
1347 ; CHECK: # %bb.0: # %entry
1348 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1349 ; CHECK-NEXT: vle16.v v8, (a0)
1352 %a = call <vscale x 2 x bfloat> @llvm.riscv.vle.nxv2bf16(
1353 <vscale x 2 x bfloat> undef,
1354 <vscale x 2 x bfloat>* %0,
1357 ret <vscale x 2 x bfloat> %a
1360 declare <vscale x 2 x bfloat> @llvm.riscv.vle.mask.nxv2bf16(
1361 <vscale x 2 x bfloat>,
1362 <vscale x 2 x bfloat>*,
1367 define <vscale x 2 x bfloat> @intrinsic_vle_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1368 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2bf16_nxv2bf16:
1369 ; CHECK: # %bb.0: # %entry
1370 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1371 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1374 %a = call <vscale x 2 x bfloat> @llvm.riscv.vle.mask.nxv2bf16(
1375 <vscale x 2 x bfloat> %0,
1376 <vscale x 2 x bfloat>* %1,
1377 <vscale x 2 x i1> %2,
1380 ret <vscale x 2 x bfloat> %a
1383 declare <vscale x 4 x bfloat> @llvm.riscv.vle.nxv4bf16(
1384 <vscale x 4 x bfloat>,
1385 <vscale x 4 x bfloat>*,
1388 define <vscale x 4 x bfloat> @intrinsic_vle_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat>* %0, iXLen %1) nounwind {
1389 ; CHECK-LABEL: intrinsic_vle_v_nxv4bf16_nxv4bf16:
1390 ; CHECK: # %bb.0: # %entry
1391 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1392 ; CHECK-NEXT: vle16.v v8, (a0)
1395 %a = call <vscale x 4 x bfloat> @llvm.riscv.vle.nxv4bf16(
1396 <vscale x 4 x bfloat> undef,
1397 <vscale x 4 x bfloat>* %0,
1400 ret <vscale x 4 x bfloat> %a
1403 declare <vscale x 4 x bfloat> @llvm.riscv.vle.mask.nxv4bf16(
1404 <vscale x 4 x bfloat>,
1405 <vscale x 4 x bfloat>*,
1410 define <vscale x 4 x bfloat> @intrinsic_vle_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1411 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4bf16_nxv4bf16:
1412 ; CHECK: # %bb.0: # %entry
1413 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1414 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1417 %a = call <vscale x 4 x bfloat> @llvm.riscv.vle.mask.nxv4bf16(
1418 <vscale x 4 x bfloat> %0,
1419 <vscale x 4 x bfloat>* %1,
1420 <vscale x 4 x i1> %2,
1423 ret <vscale x 4 x bfloat> %a
1426 declare <vscale x 8 x bfloat> @llvm.riscv.vle.nxv8bf16(
1427 <vscale x 8 x bfloat>,
1428 <vscale x 8 x bfloat>*,
1431 define <vscale x 8 x bfloat> @intrinsic_vle_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat>* %0, iXLen %1) nounwind {
1432 ; CHECK-LABEL: intrinsic_vle_v_nxv8bf16_nxv8bf16:
1433 ; CHECK: # %bb.0: # %entry
1434 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1435 ; CHECK-NEXT: vle16.v v8, (a0)
1438 %a = call <vscale x 8 x bfloat> @llvm.riscv.vle.nxv8bf16(
1439 <vscale x 8 x bfloat> undef,
1440 <vscale x 8 x bfloat>* %0,
1443 ret <vscale x 8 x bfloat> %a
1446 declare <vscale x 8 x bfloat> @llvm.riscv.vle.mask.nxv8bf16(
1447 <vscale x 8 x bfloat>,
1448 <vscale x 8 x bfloat>*,
1453 define <vscale x 8 x bfloat> @intrinsic_vle_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1454 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8bf16_nxv8bf16:
1455 ; CHECK: # %bb.0: # %entry
1456 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1457 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1460 %a = call <vscale x 8 x bfloat> @llvm.riscv.vle.mask.nxv8bf16(
1461 <vscale x 8 x bfloat> %0,
1462 <vscale x 8 x bfloat>* %1,
1463 <vscale x 8 x i1> %2,
1466 ret <vscale x 8 x bfloat> %a
1469 declare <vscale x 16 x bfloat> @llvm.riscv.vle.nxv16bf16(
1470 <vscale x 16 x bfloat>,
1471 <vscale x 16 x bfloat>*,
1474 define <vscale x 16 x bfloat> @intrinsic_vle_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat>* %0, iXLen %1) nounwind {
1475 ; CHECK-LABEL: intrinsic_vle_v_nxv16bf16_nxv16bf16:
1476 ; CHECK: # %bb.0: # %entry
1477 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1478 ; CHECK-NEXT: vle16.v v8, (a0)
1481 %a = call <vscale x 16 x bfloat> @llvm.riscv.vle.nxv16bf16(
1482 <vscale x 16 x bfloat> undef,
1483 <vscale x 16 x bfloat>* %0,
1486 ret <vscale x 16 x bfloat> %a
1489 declare <vscale x 16 x bfloat> @llvm.riscv.vle.mask.nxv16bf16(
1490 <vscale x 16 x bfloat>,
1491 <vscale x 16 x bfloat>*,
1496 define <vscale x 16 x bfloat> @intrinsic_vle_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1497 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16bf16_nxv16bf16:
1498 ; CHECK: # %bb.0: # %entry
1499 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1500 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1503 %a = call <vscale x 16 x bfloat> @llvm.riscv.vle.mask.nxv16bf16(
1504 <vscale x 16 x bfloat> %0,
1505 <vscale x 16 x bfloat>* %1,
1506 <vscale x 16 x i1> %2,
1509 ret <vscale x 16 x bfloat> %a
1512 declare <vscale x 32 x bfloat> @llvm.riscv.vle.nxv32bf16(
1513 <vscale x 32 x bfloat>,
1514 <vscale x 32 x bfloat>*,
1517 define <vscale x 32 x bfloat> @intrinsic_vle_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat>* %0, iXLen %1) nounwind {
1518 ; CHECK-LABEL: intrinsic_vle_v_nxv32bf16_nxv32bf16:
1519 ; CHECK: # %bb.0: # %entry
1520 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1521 ; CHECK-NEXT: vle16.v v8, (a0)
1524 %a = call <vscale x 32 x bfloat> @llvm.riscv.vle.nxv32bf16(
1525 <vscale x 32 x bfloat> undef,
1526 <vscale x 32 x bfloat>* %0,
1529 ret <vscale x 32 x bfloat> %a
1532 declare <vscale x 32 x bfloat> @llvm.riscv.vle.mask.nxv32bf16(
1533 <vscale x 32 x bfloat>,
1534 <vscale x 32 x bfloat>*,
1539 define <vscale x 32 x bfloat> @intrinsic_vle_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1540 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32bf16_nxv32bf16:
1541 ; CHECK: # %bb.0: # %entry
1542 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1543 ; CHECK-NEXT: vle16.v v8, (a0), v0.t
1546 %a = call <vscale x 32 x bfloat> @llvm.riscv.vle.mask.nxv32bf16(
1547 <vscale x 32 x bfloat> %0,
1548 <vscale x 32 x bfloat>* %1,
1549 <vscale x 32 x i1> %2,
1552 ret <vscale x 32 x bfloat> %a
1555 declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
1560 define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1) nounwind {
1561 ; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8:
1562 ; CHECK: # %bb.0: # %entry
1563 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1564 ; CHECK-NEXT: vle8.v v8, (a0)
1567 %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
1568 <vscale x 1 x i8> undef,
1569 <vscale x 1 x i8>* %0,
1572 ret <vscale x 1 x i8> %a
1575 declare <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
1582 define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1583 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8:
1584 ; CHECK: # %bb.0: # %entry
1585 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1586 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
1589 %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
1590 <vscale x 1 x i8> %0,
1591 <vscale x 1 x i8>* %1,
1592 <vscale x 1 x i1> %2,
1595 ret <vscale x 1 x i8> %a
1598 declare <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
1603 define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1) nounwind {
1604 ; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8:
1605 ; CHECK: # %bb.0: # %entry
1606 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1607 ; CHECK-NEXT: vle8.v v8, (a0)
1610 %a = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
1611 <vscale x 2 x i8> undef,
1612 <vscale x 2 x i8>* %0,
1615 ret <vscale x 2 x i8> %a
1618 declare <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
1625 define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1626 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8:
1627 ; CHECK: # %bb.0: # %entry
1628 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1629 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
1632 %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
1633 <vscale x 2 x i8> %0,
1634 <vscale x 2 x i8>* %1,
1635 <vscale x 2 x i1> %2,
1638 ret <vscale x 2 x i8> %a
1641 declare <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
1646 define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1) nounwind {
1647 ; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8:
1648 ; CHECK: # %bb.0: # %entry
1649 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1650 ; CHECK-NEXT: vle8.v v8, (a0)
1653 %a = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
1654 <vscale x 4 x i8> undef,
1655 <vscale x 4 x i8>* %0,
1658 ret <vscale x 4 x i8> %a
1661 declare <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
1668 define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1669 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8:
1670 ; CHECK: # %bb.0: # %entry
1671 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1672 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
1675 %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
1676 <vscale x 4 x i8> %0,
1677 <vscale x 4 x i8>* %1,
1678 <vscale x 4 x i1> %2,
1681 ret <vscale x 4 x i8> %a
1684 declare <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
1689 define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1) nounwind {
1690 ; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8:
1691 ; CHECK: # %bb.0: # %entry
1692 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1693 ; CHECK-NEXT: vle8.v v8, (a0)
1696 %a = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
1697 <vscale x 8 x i8> undef,
1698 <vscale x 8 x i8>* %0,
1701 ret <vscale x 8 x i8> %a
1704 declare <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
1711 define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1712 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8:
1713 ; CHECK: # %bb.0: # %entry
1714 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1715 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
1718 %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
1719 <vscale x 8 x i8> %0,
1720 <vscale x 8 x i8>* %1,
1721 <vscale x 8 x i1> %2,
1724 ret <vscale x 8 x i8> %a
1727 declare <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
1729 <vscale x 16 x i8>*,
1732 define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1) nounwind {
1733 ; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8:
1734 ; CHECK: # %bb.0: # %entry
1735 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1736 ; CHECK-NEXT: vle8.v v8, (a0)
1739 %a = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
1740 <vscale x 16 x i8> undef,
1741 <vscale x 16 x i8>* %0,
1744 ret <vscale x 16 x i8> %a
1747 declare <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
1749 <vscale x 16 x i8>*,
1754 define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1755 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8:
1756 ; CHECK: # %bb.0: # %entry
1757 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1758 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
1761 %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
1762 <vscale x 16 x i8> %0,
1763 <vscale x 16 x i8>* %1,
1764 <vscale x 16 x i1> %2,
1767 ret <vscale x 16 x i8> %a
1770 declare <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
1772 <vscale x 32 x i8>*,
1775 define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1) nounwind {
1776 ; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8:
1777 ; CHECK: # %bb.0: # %entry
1778 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1779 ; CHECK-NEXT: vle8.v v8, (a0)
1782 %a = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
1783 <vscale x 32 x i8> undef,
1784 <vscale x 32 x i8>* %0,
1787 ret <vscale x 32 x i8> %a
1790 declare <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
1792 <vscale x 32 x i8>*,
1797 define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1798 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8:
1799 ; CHECK: # %bb.0: # %entry
1800 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1801 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
1804 %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
1805 <vscale x 32 x i8> %0,
1806 <vscale x 32 x i8>* %1,
1807 <vscale x 32 x i1> %2,
1810 ret <vscale x 32 x i8> %a
1813 declare <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
1815 <vscale x 64 x i8>*,
1818 define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1) nounwind {
1819 ; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8:
1820 ; CHECK: # %bb.0: # %entry
1821 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1822 ; CHECK-NEXT: vle8.v v8, (a0)
1825 %a = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
1826 <vscale x 64 x i8> undef,
1827 <vscale x 64 x i8>* %0,
1830 ret <vscale x 64 x i8> %a
1833 declare <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
1835 <vscale x 64 x i8>*,
1840 define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
1841 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8:
1842 ; CHECK: # %bb.0: # %entry
1843 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1844 ; CHECK-NEXT: vle8.v v8, (a0), v0.t
1847 %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
1848 <vscale x 64 x i8> %0,
1849 <vscale x 64 x i8>* %1,
1850 <vscale x 64 x i1> %2,
1853 ret <vscale x 64 x i8> %a