1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
7 declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
12 define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
13 ; RV32-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
14 ; RV32: # %bb.0: # %entry
15 ; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
16 ; RV32-NEXT: vle64ff.v v8, (a0)
17 ; RV32-NEXT: csrr a0, vl
18 ; RV32-NEXT: sw a0, 0(a2)
21 ; RV64-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
22 ; RV64: # %bb.0: # %entry
23 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
24 ; RV64-NEXT: vle64ff.v v8, (a0)
25 ; RV64-NEXT: csrr a0, vl
26 ; RV64-NEXT: sd a0, 0(a2)
29 %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
30 <vscale x 1 x i64> undef,
33 %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
34 %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
35 store iXLen %c, iXLen* %2
36 ret <vscale x 1 x i64> %b
39 declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
46 define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
47 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
48 ; RV32: # %bb.0: # %entry
49 ; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu
50 ; RV32-NEXT: vle64ff.v v8, (a0), v0.t
51 ; RV32-NEXT: csrr a0, vl
52 ; RV32-NEXT: sw a0, 0(a2)
55 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
56 ; RV64: # %bb.0: # %entry
57 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
58 ; RV64-NEXT: vle64ff.v v8, (a0), v0.t
59 ; RV64-NEXT: csrr a0, vl
60 ; RV64-NEXT: sd a0, 0(a2)
63 %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
64 <vscale x 1 x i64> %0,
68 %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
69 %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
70 store iXLen %c, iXLen* %4
72 ret <vscale x 1 x i64> %b
75 declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.nxv2i64(
80 define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
81 ; RV32-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
82 ; RV32: # %bb.0: # %entry
83 ; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma
84 ; RV32-NEXT: vle64ff.v v8, (a0)
85 ; RV32-NEXT: csrr a0, vl
86 ; RV32-NEXT: sw a0, 0(a2)
89 ; RV64-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
90 ; RV64: # %bb.0: # %entry
91 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
92 ; RV64-NEXT: vle64ff.v v8, (a0)
93 ; RV64-NEXT: csrr a0, vl
94 ; RV64-NEXT: sd a0, 0(a2)
97 %a = call { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.nxv2i64(
98 <vscale x 2 x i64> undef,
101 %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
102 %c = extractvalue { <vscale x 2 x i64>, iXLen } %a, 1
103 store iXLen %c, iXLen* %2
104 ret <vscale x 2 x i64> %b
107 declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv2i64(
114 define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
115 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
116 ; RV32: # %bb.0: # %entry
117 ; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, mu
118 ; RV32-NEXT: vle64ff.v v8, (a0), v0.t
119 ; RV32-NEXT: csrr a0, vl
120 ; RV32-NEXT: sw a0, 0(a2)
123 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
124 ; RV64: # %bb.0: # %entry
125 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
126 ; RV64-NEXT: vle64ff.v v8, (a0), v0.t
127 ; RV64-NEXT: csrr a0, vl
128 ; RV64-NEXT: sd a0, 0(a2)
131 %a = call { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv2i64(
132 <vscale x 2 x i64> %0,
134 <vscale x 2 x i1> %2,
136 %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
137 %c = extractvalue { <vscale x 2 x i64>, iXLen } %a, 1
138 store iXLen %c, iXLen* %4
140 ret <vscale x 2 x i64> %b
143 declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.nxv4i64(
148 define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
149 ; RV32-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
150 ; RV32: # %bb.0: # %entry
151 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
152 ; RV32-NEXT: vle64ff.v v8, (a0)
153 ; RV32-NEXT: csrr a0, vl
154 ; RV32-NEXT: sw a0, 0(a2)
157 ; RV64-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
158 ; RV64: # %bb.0: # %entry
159 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
160 ; RV64-NEXT: vle64ff.v v8, (a0)
161 ; RV64-NEXT: csrr a0, vl
162 ; RV64-NEXT: sd a0, 0(a2)
165 %a = call { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.nxv4i64(
166 <vscale x 4 x i64> undef,
169 %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
170 %c = extractvalue { <vscale x 4 x i64>, iXLen } %a, 1
171 store iXLen %c, iXLen* %2
172 ret <vscale x 4 x i64> %b
175 declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv4i64(
182 define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
183 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
184 ; RV32: # %bb.0: # %entry
185 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
186 ; RV32-NEXT: vle64ff.v v8, (a0), v0.t
187 ; RV32-NEXT: csrr a0, vl
188 ; RV32-NEXT: sw a0, 0(a2)
191 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
192 ; RV64: # %bb.0: # %entry
193 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
194 ; RV64-NEXT: vle64ff.v v8, (a0), v0.t
195 ; RV64-NEXT: csrr a0, vl
196 ; RV64-NEXT: sd a0, 0(a2)
199 %a = call { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv4i64(
200 <vscale x 4 x i64> %0,
202 <vscale x 4 x i1> %2,
204 %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
205 %c = extractvalue { <vscale x 4 x i64>, iXLen } %a, 1
206 store iXLen %c, iXLen* %4
208 ret <vscale x 4 x i64> %b
211 declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.nxv8i64(
216 define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
217 ; RV32-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
218 ; RV32: # %bb.0: # %entry
219 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
220 ; RV32-NEXT: vle64ff.v v8, (a0)
221 ; RV32-NEXT: csrr a0, vl
222 ; RV32-NEXT: sw a0, 0(a2)
225 ; RV64-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
226 ; RV64: # %bb.0: # %entry
227 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
228 ; RV64-NEXT: vle64ff.v v8, (a0)
229 ; RV64-NEXT: csrr a0, vl
230 ; RV64-NEXT: sd a0, 0(a2)
233 %a = call { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.nxv8i64(
234 <vscale x 8 x i64> undef,
237 %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
238 %c = extractvalue { <vscale x 8 x i64>, iXLen } %a, 1
239 store iXLen %c, iXLen* %2
240 ret <vscale x 8 x i64> %b
243 declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv8i64(
250 define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
251 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
252 ; RV32: # %bb.0: # %entry
253 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
254 ; RV32-NEXT: vle64ff.v v8, (a0), v0.t
255 ; RV32-NEXT: csrr a0, vl
256 ; RV32-NEXT: sw a0, 0(a2)
259 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
260 ; RV64: # %bb.0: # %entry
261 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
262 ; RV64-NEXT: vle64ff.v v8, (a0), v0.t
263 ; RV64-NEXT: csrr a0, vl
264 ; RV64-NEXT: sd a0, 0(a2)
267 %a = call { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv8i64(
268 <vscale x 8 x i64> %0,
270 <vscale x 8 x i1> %2,
272 %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
273 %c = extractvalue { <vscale x 8 x i64>, iXLen } %a, 1
274 store iXLen %c, iXLen* %4
276 ret <vscale x 8 x i64> %b
279 declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
280 <vscale x 1 x double>,
284 define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
285 ; RV32-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
286 ; RV32: # %bb.0: # %entry
287 ; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
288 ; RV32-NEXT: vle64ff.v v8, (a0)
289 ; RV32-NEXT: csrr a0, vl
290 ; RV32-NEXT: sw a0, 0(a2)
293 ; RV64-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
294 ; RV64: # %bb.0: # %entry
295 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
296 ; RV64-NEXT: vle64ff.v v8, (a0)
297 ; RV64-NEXT: csrr a0, vl
298 ; RV64-NEXT: sd a0, 0(a2)
301 %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
302 <vscale x 1 x double> undef,
305 %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
306 %c = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
307 store iXLen %c, iXLen* %2
308 ret <vscale x 1 x double> %b
311 declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
312 <vscale x 1 x double>,
318 define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
319 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
320 ; RV32: # %bb.0: # %entry
321 ; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu
322 ; RV32-NEXT: vle64ff.v v8, (a0), v0.t
323 ; RV32-NEXT: csrr a0, vl
324 ; RV32-NEXT: sw a0, 0(a2)
327 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
328 ; RV64: # %bb.0: # %entry
329 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
330 ; RV64-NEXT: vle64ff.v v8, (a0), v0.t
331 ; RV64-NEXT: csrr a0, vl
332 ; RV64-NEXT: sd a0, 0(a2)
335 %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
336 <vscale x 1 x double> %0,
338 <vscale x 1 x i1> %2,
340 %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
341 %c = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
342 store iXLen %c, iXLen* %4
344 ret <vscale x 1 x double> %b
347 declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.nxv2f64(
348 <vscale x 2 x double>,
352 define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
353 ; RV32-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
354 ; RV32: # %bb.0: # %entry
355 ; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, ma
356 ; RV32-NEXT: vle64ff.v v8, (a0)
357 ; RV32-NEXT: csrr a0, vl
358 ; RV32-NEXT: sw a0, 0(a2)
361 ; RV64-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
362 ; RV64: # %bb.0: # %entry
363 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
364 ; RV64-NEXT: vle64ff.v v8, (a0)
365 ; RV64-NEXT: csrr a0, vl
366 ; RV64-NEXT: sd a0, 0(a2)
369 %a = call { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.nxv2f64(
370 <vscale x 2 x double> undef,
373 %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
374 %c = extractvalue { <vscale x 2 x double>, iXLen } %a, 1
375 store iXLen %c, iXLen* %2
376 ret <vscale x 2 x double> %b
379 declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.mask.nxv2f64(
380 <vscale x 2 x double>,
386 define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
387 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
388 ; RV32: # %bb.0: # %entry
389 ; RV32-NEXT: vsetvli zero, a1, e64, m2, ta, mu
390 ; RV32-NEXT: vle64ff.v v8, (a0), v0.t
391 ; RV32-NEXT: csrr a0, vl
392 ; RV32-NEXT: sw a0, 0(a2)
395 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
396 ; RV64: # %bb.0: # %entry
397 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
398 ; RV64-NEXT: vle64ff.v v8, (a0), v0.t
399 ; RV64-NEXT: csrr a0, vl
400 ; RV64-NEXT: sd a0, 0(a2)
403 %a = call { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.mask.nxv2f64(
404 <vscale x 2 x double> %0,
406 <vscale x 2 x i1> %2,
408 %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
409 %c = extractvalue { <vscale x 2 x double>, iXLen } %a, 1
410 store iXLen %c, iXLen* %4
412 ret <vscale x 2 x double> %b
415 declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.nxv4f64(
416 <vscale x 4 x double>,
420 define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
421 ; RV32-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
422 ; RV32: # %bb.0: # %entry
423 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
424 ; RV32-NEXT: vle64ff.v v8, (a0)
425 ; RV32-NEXT: csrr a0, vl
426 ; RV32-NEXT: sw a0, 0(a2)
429 ; RV64-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
430 ; RV64: # %bb.0: # %entry
431 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
432 ; RV64-NEXT: vle64ff.v v8, (a0)
433 ; RV64-NEXT: csrr a0, vl
434 ; RV64-NEXT: sd a0, 0(a2)
437 %a = call { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.nxv4f64(
438 <vscale x 4 x double> undef,
441 %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
442 %c = extractvalue { <vscale x 4 x double>, iXLen } %a, 1
443 store iXLen %c, iXLen* %2
444 ret <vscale x 4 x double> %b
447 declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.mask.nxv4f64(
448 <vscale x 4 x double>,
454 define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
455 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
456 ; RV32: # %bb.0: # %entry
457 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
458 ; RV32-NEXT: vle64ff.v v8, (a0), v0.t
459 ; RV32-NEXT: csrr a0, vl
460 ; RV32-NEXT: sw a0, 0(a2)
463 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
464 ; RV64: # %bb.0: # %entry
465 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
466 ; RV64-NEXT: vle64ff.v v8, (a0), v0.t
467 ; RV64-NEXT: csrr a0, vl
468 ; RV64-NEXT: sd a0, 0(a2)
471 %a = call { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.mask.nxv4f64(
472 <vscale x 4 x double> %0,
474 <vscale x 4 x i1> %2,
476 %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
477 %c = extractvalue { <vscale x 4 x double>, iXLen } %a, 1
478 store iXLen %c, iXLen* %4
480 ret <vscale x 4 x double> %b
483 declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.nxv8f64(
484 <vscale x 8 x double>,
488 define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
489 ; RV32-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
490 ; RV32: # %bb.0: # %entry
491 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
492 ; RV32-NEXT: vle64ff.v v8, (a0)
493 ; RV32-NEXT: csrr a0, vl
494 ; RV32-NEXT: sw a0, 0(a2)
497 ; RV64-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
498 ; RV64: # %bb.0: # %entry
499 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
500 ; RV64-NEXT: vle64ff.v v8, (a0)
501 ; RV64-NEXT: csrr a0, vl
502 ; RV64-NEXT: sd a0, 0(a2)
505 %a = call { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.nxv8f64(
506 <vscale x 8 x double> undef,
509 %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
510 %c = extractvalue { <vscale x 8 x double>, iXLen } %a, 1
511 store iXLen %c, iXLen* %2
512 ret <vscale x 8 x double> %b
515 declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.mask.nxv8f64(
516 <vscale x 8 x double>,
522 define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
523 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
524 ; RV32: # %bb.0: # %entry
525 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
526 ; RV32-NEXT: vle64ff.v v8, (a0), v0.t
527 ; RV32-NEXT: csrr a0, vl
528 ; RV32-NEXT: sw a0, 0(a2)
531 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
532 ; RV64: # %bb.0: # %entry
533 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
534 ; RV64-NEXT: vle64ff.v v8, (a0), v0.t
535 ; RV64-NEXT: csrr a0, vl
536 ; RV64-NEXT: sd a0, 0(a2)
539 %a = call { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.mask.nxv8f64(
540 <vscale x 8 x double> %0,
542 <vscale x 8 x i1> %2,
544 %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
545 %c = extractvalue { <vscale x 8 x double>, iXLen } %a, 1
546 store iXLen %c, iXLen* %4
548 ret <vscale x 8 x double> %b
551 declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.nxv1i32(
556 define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
557 ; RV32-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
558 ; RV32: # %bb.0: # %entry
559 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
560 ; RV32-NEXT: vle32ff.v v8, (a0)
561 ; RV32-NEXT: csrr a0, vl
562 ; RV32-NEXT: sw a0, 0(a2)
565 ; RV64-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
566 ; RV64: # %bb.0: # %entry
567 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
568 ; RV64-NEXT: vle32ff.v v8, (a0)
569 ; RV64-NEXT: csrr a0, vl
570 ; RV64-NEXT: sd a0, 0(a2)
573 %a = call { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.nxv1i32(
574 <vscale x 1 x i32> undef,
577 %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
578 %c = extractvalue { <vscale x 1 x i32>, iXLen } %a, 1
579 store iXLen %c, iXLen* %2
580 ret <vscale x 1 x i32> %b
583 declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv1i32(
590 define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
591 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
592 ; RV32: # %bb.0: # %entry
593 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
594 ; RV32-NEXT: vle32ff.v v8, (a0), v0.t
595 ; RV32-NEXT: csrr a0, vl
596 ; RV32-NEXT: sw a0, 0(a2)
599 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
600 ; RV64: # %bb.0: # %entry
601 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
602 ; RV64-NEXT: vle32ff.v v8, (a0), v0.t
603 ; RV64-NEXT: csrr a0, vl
604 ; RV64-NEXT: sd a0, 0(a2)
607 %a = call { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv1i32(
608 <vscale x 1 x i32> %0,
610 <vscale x 1 x i1> %2,
612 %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
613 %c = extractvalue { <vscale x 1 x i32>, iXLen } %a, 1
614 store iXLen %c, iXLen* %4
616 ret <vscale x 1 x i32> %b
619 declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.nxv2i32(
624 define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
625 ; RV32-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
626 ; RV32: # %bb.0: # %entry
627 ; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma
628 ; RV32-NEXT: vle32ff.v v8, (a0)
629 ; RV32-NEXT: csrr a0, vl
630 ; RV32-NEXT: sw a0, 0(a2)
633 ; RV64-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
634 ; RV64: # %bb.0: # %entry
635 ; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
636 ; RV64-NEXT: vle32ff.v v8, (a0)
637 ; RV64-NEXT: csrr a0, vl
638 ; RV64-NEXT: sd a0, 0(a2)
641 %a = call { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.nxv2i32(
642 <vscale x 2 x i32> undef,
645 %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
646 %c = extractvalue { <vscale x 2 x i32>, iXLen } %a, 1
647 store iXLen %c, iXLen* %2
648 ret <vscale x 2 x i32> %b
651 declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv2i32(
658 define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
659 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
660 ; RV32: # %bb.0: # %entry
661 ; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, mu
662 ; RV32-NEXT: vle32ff.v v8, (a0), v0.t
663 ; RV32-NEXT: csrr a0, vl
664 ; RV32-NEXT: sw a0, 0(a2)
667 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
668 ; RV64: # %bb.0: # %entry
669 ; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, mu
670 ; RV64-NEXT: vle32ff.v v8, (a0), v0.t
671 ; RV64-NEXT: csrr a0, vl
672 ; RV64-NEXT: sd a0, 0(a2)
675 %a = call { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv2i32(
676 <vscale x 2 x i32> %0,
678 <vscale x 2 x i1> %2,
680 %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
681 %c = extractvalue { <vscale x 2 x i32>, iXLen } %a, 1
682 store iXLen %c, iXLen* %4
684 ret <vscale x 2 x i32> %b
687 declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.nxv4i32(
692 define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
693 ; RV32-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
694 ; RV32: # %bb.0: # %entry
695 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
696 ; RV32-NEXT: vle32ff.v v8, (a0)
697 ; RV32-NEXT: csrr a0, vl
698 ; RV32-NEXT: sw a0, 0(a2)
701 ; RV64-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
702 ; RV64: # %bb.0: # %entry
703 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
704 ; RV64-NEXT: vle32ff.v v8, (a0)
705 ; RV64-NEXT: csrr a0, vl
706 ; RV64-NEXT: sd a0, 0(a2)
709 %a = call { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.nxv4i32(
710 <vscale x 4 x i32> undef,
713 %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
714 %c = extractvalue { <vscale x 4 x i32>, iXLen } %a, 1
715 store iXLen %c, iXLen* %2
716 ret <vscale x 4 x i32> %b
719 declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv4i32(
726 define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
727 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
728 ; RV32: # %bb.0: # %entry
729 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
730 ; RV32-NEXT: vle32ff.v v8, (a0), v0.t
731 ; RV32-NEXT: csrr a0, vl
732 ; RV32-NEXT: sw a0, 0(a2)
735 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
736 ; RV64: # %bb.0: # %entry
737 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
738 ; RV64-NEXT: vle32ff.v v8, (a0), v0.t
739 ; RV64-NEXT: csrr a0, vl
740 ; RV64-NEXT: sd a0, 0(a2)
743 %a = call { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv4i32(
744 <vscale x 4 x i32> %0,
746 <vscale x 4 x i1> %2,
748 %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
749 %c = extractvalue { <vscale x 4 x i32>, iXLen } %a, 1
750 store iXLen %c, iXLen* %4
752 ret <vscale x 4 x i32> %b
755 declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.nxv8i32(
760 define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
761 ; RV32-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
762 ; RV32: # %bb.0: # %entry
763 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
764 ; RV32-NEXT: vle32ff.v v8, (a0)
765 ; RV32-NEXT: csrr a0, vl
766 ; RV32-NEXT: sw a0, 0(a2)
769 ; RV64-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
770 ; RV64: # %bb.0: # %entry
771 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
772 ; RV64-NEXT: vle32ff.v v8, (a0)
773 ; RV64-NEXT: csrr a0, vl
774 ; RV64-NEXT: sd a0, 0(a2)
777 %a = call { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.nxv8i32(
778 <vscale x 8 x i32> undef,
781 %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
782 %c = extractvalue { <vscale x 8 x i32>, iXLen } %a, 1
783 store iXLen %c, iXLen* %2
784 ret <vscale x 8 x i32> %b
787 declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv8i32(
794 define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
795 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
796 ; RV32: # %bb.0: # %entry
797 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
798 ; RV32-NEXT: vle32ff.v v8, (a0), v0.t
799 ; RV32-NEXT: csrr a0, vl
800 ; RV32-NEXT: sw a0, 0(a2)
803 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
804 ; RV64: # %bb.0: # %entry
805 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
806 ; RV64-NEXT: vle32ff.v v8, (a0), v0.t
807 ; RV64-NEXT: csrr a0, vl
808 ; RV64-NEXT: sd a0, 0(a2)
811 %a = call { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv8i32(
812 <vscale x 8 x i32> %0,
814 <vscale x 8 x i1> %2,
816 %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
817 %c = extractvalue { <vscale x 8 x i32>, iXLen } %a, 1
818 store iXLen %c, iXLen* %4
820 ret <vscale x 8 x i32> %b
823 declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.nxv16i32(
828 define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
829 ; RV32-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
830 ; RV32: # %bb.0: # %entry
831 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
832 ; RV32-NEXT: vle32ff.v v8, (a0)
833 ; RV32-NEXT: csrr a0, vl
834 ; RV32-NEXT: sw a0, 0(a2)
837 ; RV64-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
838 ; RV64: # %bb.0: # %entry
839 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
840 ; RV64-NEXT: vle32ff.v v8, (a0)
841 ; RV64-NEXT: csrr a0, vl
842 ; RV64-NEXT: sd a0, 0(a2)
845 %a = call { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.nxv16i32(
846 <vscale x 16 x i32> undef,
849 %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
850 %c = extractvalue { <vscale x 16 x i32>, iXLen } %a, 1
851 store iXLen %c, iXLen* %2
852 ret <vscale x 16 x i32> %b
855 declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv16i32(
862 define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
863 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
864 ; RV32: # %bb.0: # %entry
865 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu
866 ; RV32-NEXT: vle32ff.v v8, (a0), v0.t
867 ; RV32-NEXT: csrr a0, vl
868 ; RV32-NEXT: sw a0, 0(a2)
871 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
872 ; RV64: # %bb.0: # %entry
873 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu
874 ; RV64-NEXT: vle32ff.v v8, (a0), v0.t
875 ; RV64-NEXT: csrr a0, vl
876 ; RV64-NEXT: sd a0, 0(a2)
879 %a = call { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv16i32(
880 <vscale x 16 x i32> %0,
882 <vscale x 16 x i1> %2,
884 %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
885 %c = extractvalue { <vscale x 16 x i32>, iXLen } %a, 1
886 store iXLen %c, iXLen* %4
888 ret <vscale x 16 x i32> %b
891 declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.nxv1f32(
892 <vscale x 1 x float>,
896 define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
897 ; RV32-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
898 ; RV32: # %bb.0: # %entry
899 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
900 ; RV32-NEXT: vle32ff.v v8, (a0)
901 ; RV32-NEXT: csrr a0, vl
902 ; RV32-NEXT: sw a0, 0(a2)
905 ; RV64-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
906 ; RV64: # %bb.0: # %entry
907 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
908 ; RV64-NEXT: vle32ff.v v8, (a0)
909 ; RV64-NEXT: csrr a0, vl
910 ; RV64-NEXT: sd a0, 0(a2)
913 %a = call { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.nxv1f32(
914 <vscale x 1 x float> undef,
917 %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
918 %c = extractvalue { <vscale x 1 x float>, iXLen } %a, 1
919 store iXLen %c, iXLen* %2
920 ret <vscale x 1 x float> %b
923 declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.mask.nxv1f32(
924 <vscale x 1 x float>,
930 define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
931 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
932 ; RV32: # %bb.0: # %entry
933 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
934 ; RV32-NEXT: vle32ff.v v8, (a0), v0.t
935 ; RV32-NEXT: csrr a0, vl
936 ; RV32-NEXT: sw a0, 0(a2)
939 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
940 ; RV64: # %bb.0: # %entry
941 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
942 ; RV64-NEXT: vle32ff.v v8, (a0), v0.t
943 ; RV64-NEXT: csrr a0, vl
944 ; RV64-NEXT: sd a0, 0(a2)
947 %a = call { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.mask.nxv1f32(
948 <vscale x 1 x float> %0,
950 <vscale x 1 x i1> %2,
952 %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
953 %c = extractvalue { <vscale x 1 x float>, iXLen } %a, 1
954 store iXLen %c, iXLen* %4
956 ret <vscale x 1 x float> %b
959 declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.nxv2f32(
960 <vscale x 2 x float>,
964 define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
965 ; RV32-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
966 ; RV32: # %bb.0: # %entry
967 ; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma
968 ; RV32-NEXT: vle32ff.v v8, (a0)
969 ; RV32-NEXT: csrr a0, vl
970 ; RV32-NEXT: sw a0, 0(a2)
973 ; RV64-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
974 ; RV64: # %bb.0: # %entry
975 ; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
976 ; RV64-NEXT: vle32ff.v v8, (a0)
977 ; RV64-NEXT: csrr a0, vl
978 ; RV64-NEXT: sd a0, 0(a2)
981 %a = call { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.nxv2f32(
982 <vscale x 2 x float> undef,
985 %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
986 %c = extractvalue { <vscale x 2 x float>, iXLen } %a, 1
987 store iXLen %c, iXLen* %2
988 ret <vscale x 2 x float> %b
991 declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.mask.nxv2f32(
992 <vscale x 2 x float>,
998 define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
999 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
1000 ; RV32: # %bb.0: # %entry
1001 ; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1002 ; RV32-NEXT: vle32ff.v v8, (a0), v0.t
1003 ; RV32-NEXT: csrr a0, vl
1004 ; RV32-NEXT: sw a0, 0(a2)
1007 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
1008 ; RV64: # %bb.0: # %entry
1009 ; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1010 ; RV64-NEXT: vle32ff.v v8, (a0), v0.t
1011 ; RV64-NEXT: csrr a0, vl
1012 ; RV64-NEXT: sd a0, 0(a2)
1015 %a = call { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.mask.nxv2f32(
1016 <vscale x 2 x float> %0,
1018 <vscale x 2 x i1> %2,
1020 %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
1021 %c = extractvalue { <vscale x 2 x float>, iXLen } %a, 1
1022 store iXLen %c, iXLen* %4
1024 ret <vscale x 2 x float> %b
1027 declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.nxv4f32(
1028 <vscale x 4 x float>,
1032 define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
1033 ; RV32-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
1034 ; RV32: # %bb.0: # %entry
1035 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1036 ; RV32-NEXT: vle32ff.v v8, (a0)
1037 ; RV32-NEXT: csrr a0, vl
1038 ; RV32-NEXT: sw a0, 0(a2)
1041 ; RV64-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
1042 ; RV64: # %bb.0: # %entry
1043 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1044 ; RV64-NEXT: vle32ff.v v8, (a0)
1045 ; RV64-NEXT: csrr a0, vl
1046 ; RV64-NEXT: sd a0, 0(a2)
1049 %a = call { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.nxv4f32(
1050 <vscale x 4 x float> undef,
1053 %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
1054 %c = extractvalue { <vscale x 4 x float>, iXLen } %a, 1
1055 store iXLen %c, iXLen* %2
1056 ret <vscale x 4 x float> %b
1059 declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.mask.nxv4f32(
1060 <vscale x 4 x float>,
1066 define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1067 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
1068 ; RV32: # %bb.0: # %entry
1069 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1070 ; RV32-NEXT: vle32ff.v v8, (a0), v0.t
1071 ; RV32-NEXT: csrr a0, vl
1072 ; RV32-NEXT: sw a0, 0(a2)
1075 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
1076 ; RV64: # %bb.0: # %entry
1077 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1078 ; RV64-NEXT: vle32ff.v v8, (a0), v0.t
1079 ; RV64-NEXT: csrr a0, vl
1080 ; RV64-NEXT: sd a0, 0(a2)
1083 %a = call { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.mask.nxv4f32(
1084 <vscale x 4 x float> %0,
1086 <vscale x 4 x i1> %2,
1088 %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
1089 %c = extractvalue { <vscale x 4 x float>, iXLen } %a, 1
1090 store iXLen %c, iXLen* %4
1092 ret <vscale x 4 x float> %b
1095 declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.nxv8f32(
1096 <vscale x 8 x float>,
1100 define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
1101 ; RV32-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
1102 ; RV32: # %bb.0: # %entry
1103 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1104 ; RV32-NEXT: vle32ff.v v8, (a0)
1105 ; RV32-NEXT: csrr a0, vl
1106 ; RV32-NEXT: sw a0, 0(a2)
1109 ; RV64-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
1110 ; RV64: # %bb.0: # %entry
1111 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1112 ; RV64-NEXT: vle32ff.v v8, (a0)
1113 ; RV64-NEXT: csrr a0, vl
1114 ; RV64-NEXT: sd a0, 0(a2)
1117 %a = call { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.nxv8f32(
1118 <vscale x 8 x float> undef,
1121 %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
1122 %c = extractvalue { <vscale x 8 x float>, iXLen } %a, 1
1123 store iXLen %c, iXLen* %2
1124 ret <vscale x 8 x float> %b
1127 declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.mask.nxv8f32(
1128 <vscale x 8 x float>,
1134 define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1135 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
1136 ; RV32: # %bb.0: # %entry
1137 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1138 ; RV32-NEXT: vle32ff.v v8, (a0), v0.t
1139 ; RV32-NEXT: csrr a0, vl
1140 ; RV32-NEXT: sw a0, 0(a2)
1143 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
1144 ; RV64: # %bb.0: # %entry
1145 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1146 ; RV64-NEXT: vle32ff.v v8, (a0), v0.t
1147 ; RV64-NEXT: csrr a0, vl
1148 ; RV64-NEXT: sd a0, 0(a2)
1151 %a = call { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.mask.nxv8f32(
1152 <vscale x 8 x float> %0,
1154 <vscale x 8 x i1> %2,
1156 %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
1157 %c = extractvalue { <vscale x 8 x float>, iXLen } %a, 1
1158 store iXLen %c, iXLen* %4
1160 ret <vscale x 8 x float> %b
1163 declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.nxv16f32(
1164 <vscale x 16 x float>,
1168 define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
1169 ; RV32-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
1170 ; RV32: # %bb.0: # %entry
1171 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1172 ; RV32-NEXT: vle32ff.v v8, (a0)
1173 ; RV32-NEXT: csrr a0, vl
1174 ; RV32-NEXT: sw a0, 0(a2)
1177 ; RV64-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
1178 ; RV64: # %bb.0: # %entry
1179 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1180 ; RV64-NEXT: vle32ff.v v8, (a0)
1181 ; RV64-NEXT: csrr a0, vl
1182 ; RV64-NEXT: sd a0, 0(a2)
1185 %a = call { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.nxv16f32(
1186 <vscale x 16 x float> undef,
1189 %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
1190 %c = extractvalue { <vscale x 16 x float>, iXLen } %a, 1
1191 store iXLen %c, iXLen* %2
1192 ret <vscale x 16 x float> %b
1195 declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.mask.nxv16f32(
1196 <vscale x 16 x float>,
1202 define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1203 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
1204 ; RV32: # %bb.0: # %entry
1205 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1206 ; RV32-NEXT: vle32ff.v v8, (a0), v0.t
1207 ; RV32-NEXT: csrr a0, vl
1208 ; RV32-NEXT: sw a0, 0(a2)
1211 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
1212 ; RV64: # %bb.0: # %entry
1213 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1214 ; RV64-NEXT: vle32ff.v v8, (a0), v0.t
1215 ; RV64-NEXT: csrr a0, vl
1216 ; RV64-NEXT: sd a0, 0(a2)
1219 %a = call { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.mask.nxv16f32(
1220 <vscale x 16 x float> %0,
1222 <vscale x 16 x i1> %2,
1224 %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
1225 %c = extractvalue { <vscale x 16 x float>, iXLen } %a, 1
1226 store iXLen %c, iXLen* %4
1228 ret <vscale x 16 x float> %b
1231 declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.nxv1i16(
1236 define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1237 ; RV32-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
1238 ; RV32: # %bb.0: # %entry
1239 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1240 ; RV32-NEXT: vle16ff.v v8, (a0)
1241 ; RV32-NEXT: csrr a0, vl
1242 ; RV32-NEXT: sw a0, 0(a2)
1245 ; RV64-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
1246 ; RV64: # %bb.0: # %entry
1247 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1248 ; RV64-NEXT: vle16ff.v v8, (a0)
1249 ; RV64-NEXT: csrr a0, vl
1250 ; RV64-NEXT: sd a0, 0(a2)
1253 %a = call { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.nxv1i16(
1254 <vscale x 1 x i16> undef,
1257 %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
1258 %c = extractvalue { <vscale x 1 x i16>, iXLen } %a, 1
1259 store iXLen %c, iXLen* %2
1260 ret <vscale x 1 x i16> %b
1263 declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv1i16(
1270 define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1271 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
1272 ; RV32: # %bb.0: # %entry
1273 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1274 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
1275 ; RV32-NEXT: csrr a0, vl
1276 ; RV32-NEXT: sw a0, 0(a2)
1279 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
1280 ; RV64: # %bb.0: # %entry
1281 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1282 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
1283 ; RV64-NEXT: csrr a0, vl
1284 ; RV64-NEXT: sd a0, 0(a2)
1287 %a = call { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv1i16(
1288 <vscale x 1 x i16> %0,
1290 <vscale x 1 x i1> %2,
1292 %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
1293 %c = extractvalue { <vscale x 1 x i16>, iXLen } %a, 1
1294 store iXLen %c, iXLen* %4
1296 ret <vscale x 1 x i16> %b
1299 declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.nxv2i16(
1304 define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1305 ; RV32-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
1306 ; RV32: # %bb.0: # %entry
1307 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1308 ; RV32-NEXT: vle16ff.v v8, (a0)
1309 ; RV32-NEXT: csrr a0, vl
1310 ; RV32-NEXT: sw a0, 0(a2)
1313 ; RV64-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
1314 ; RV64: # %bb.0: # %entry
1315 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1316 ; RV64-NEXT: vle16ff.v v8, (a0)
1317 ; RV64-NEXT: csrr a0, vl
1318 ; RV64-NEXT: sd a0, 0(a2)
1321 %a = call { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.nxv2i16(
1322 <vscale x 2 x i16> undef,
1325 %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
1326 %c = extractvalue { <vscale x 2 x i16>, iXLen } %a, 1
1327 store iXLen %c, iXLen* %2
1328 ret <vscale x 2 x i16> %b
1331 declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv2i16(
1338 define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1339 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
1340 ; RV32: # %bb.0: # %entry
1341 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1342 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
1343 ; RV32-NEXT: csrr a0, vl
1344 ; RV32-NEXT: sw a0, 0(a2)
1347 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
1348 ; RV64: # %bb.0: # %entry
1349 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1350 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
1351 ; RV64-NEXT: csrr a0, vl
1352 ; RV64-NEXT: sd a0, 0(a2)
1355 %a = call { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv2i16(
1356 <vscale x 2 x i16> %0,
1358 <vscale x 2 x i1> %2,
1360 %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
1361 %c = extractvalue { <vscale x 2 x i16>, iXLen } %a, 1
1362 store iXLen %c, iXLen* %4
1364 ret <vscale x 2 x i16> %b
1367 declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.nxv4i16(
1372 define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1373 ; RV32-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
1374 ; RV32: # %bb.0: # %entry
1375 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1376 ; RV32-NEXT: vle16ff.v v8, (a0)
1377 ; RV32-NEXT: csrr a0, vl
1378 ; RV32-NEXT: sw a0, 0(a2)
1381 ; RV64-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
1382 ; RV64: # %bb.0: # %entry
1383 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1384 ; RV64-NEXT: vle16ff.v v8, (a0)
1385 ; RV64-NEXT: csrr a0, vl
1386 ; RV64-NEXT: sd a0, 0(a2)
1389 %a = call { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.nxv4i16(
1390 <vscale x 4 x i16> undef,
1393 %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
1394 %c = extractvalue { <vscale x 4 x i16>, iXLen } %a, 1
1395 store iXLen %c, iXLen* %2
1396 ret <vscale x 4 x i16> %b
1399 declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv4i16(
1406 define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1407 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
1408 ; RV32: # %bb.0: # %entry
1409 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1410 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
1411 ; RV32-NEXT: csrr a0, vl
1412 ; RV32-NEXT: sw a0, 0(a2)
1415 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
1416 ; RV64: # %bb.0: # %entry
1417 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1418 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
1419 ; RV64-NEXT: csrr a0, vl
1420 ; RV64-NEXT: sd a0, 0(a2)
1423 %a = call { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv4i16(
1424 <vscale x 4 x i16> %0,
1426 <vscale x 4 x i1> %2,
1428 %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
1429 %c = extractvalue { <vscale x 4 x i16>, iXLen } %a, 1
1430 store iXLen %c, iXLen* %4
1432 ret <vscale x 4 x i16> %b
1435 declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.nxv8i16(
1440 define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1441 ; RV32-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
1442 ; RV32: # %bb.0: # %entry
1443 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1444 ; RV32-NEXT: vle16ff.v v8, (a0)
1445 ; RV32-NEXT: csrr a0, vl
1446 ; RV32-NEXT: sw a0, 0(a2)
1449 ; RV64-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
1450 ; RV64: # %bb.0: # %entry
1451 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1452 ; RV64-NEXT: vle16ff.v v8, (a0)
1453 ; RV64-NEXT: csrr a0, vl
1454 ; RV64-NEXT: sd a0, 0(a2)
1457 %a = call { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.nxv8i16(
1458 <vscale x 8 x i16> undef,
1461 %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
1462 %c = extractvalue { <vscale x 8 x i16>, iXLen } %a, 1
1463 store iXLen %c, iXLen* %2
1464 ret <vscale x 8 x i16> %b
1467 declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv8i16(
1474 define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1475 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
1476 ; RV32: # %bb.0: # %entry
1477 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1478 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
1479 ; RV32-NEXT: csrr a0, vl
1480 ; RV32-NEXT: sw a0, 0(a2)
1483 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
1484 ; RV64: # %bb.0: # %entry
1485 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1486 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
1487 ; RV64-NEXT: csrr a0, vl
1488 ; RV64-NEXT: sd a0, 0(a2)
1491 %a = call { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv8i16(
1492 <vscale x 8 x i16> %0,
1494 <vscale x 8 x i1> %2,
1496 %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
1497 %c = extractvalue { <vscale x 8 x i16>, iXLen } %a, 1
1498 store iXLen %c, iXLen* %4
1500 ret <vscale x 8 x i16> %b
1503 declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.nxv16i16(
1504 <vscale x 16 x i16>,
1508 define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1509 ; RV32-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
1510 ; RV32: # %bb.0: # %entry
1511 ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1512 ; RV32-NEXT: vle16ff.v v8, (a0)
1513 ; RV32-NEXT: csrr a0, vl
1514 ; RV32-NEXT: sw a0, 0(a2)
1517 ; RV64-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
1518 ; RV64: # %bb.0: # %entry
1519 ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1520 ; RV64-NEXT: vle16ff.v v8, (a0)
1521 ; RV64-NEXT: csrr a0, vl
1522 ; RV64-NEXT: sd a0, 0(a2)
1525 %a = call { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.nxv16i16(
1526 <vscale x 16 x i16> undef,
1529 %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
1530 %c = extractvalue { <vscale x 16 x i16>, iXLen } %a, 1
1531 store iXLen %c, iXLen* %2
1532 ret <vscale x 16 x i16> %b
1535 declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv16i16(
1536 <vscale x 16 x i16>,
1542 define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1543 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
1544 ; RV32: # %bb.0: # %entry
1545 ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1546 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
1547 ; RV32-NEXT: csrr a0, vl
1548 ; RV32-NEXT: sw a0, 0(a2)
1551 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
1552 ; RV64: # %bb.0: # %entry
1553 ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1554 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
1555 ; RV64-NEXT: csrr a0, vl
1556 ; RV64-NEXT: sd a0, 0(a2)
1559 %a = call { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv16i16(
1560 <vscale x 16 x i16> %0,
1562 <vscale x 16 x i1> %2,
1564 %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
1565 %c = extractvalue { <vscale x 16 x i16>, iXLen } %a, 1
1566 store iXLen %c, iXLen* %4
1568 ret <vscale x 16 x i16> %b
1571 declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.nxv32i16(
1572 <vscale x 32 x i16>,
1576 define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1577 ; RV32-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
1578 ; RV32: # %bb.0: # %entry
1579 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1580 ; RV32-NEXT: vle16ff.v v8, (a0)
1581 ; RV32-NEXT: csrr a0, vl
1582 ; RV32-NEXT: sw a0, 0(a2)
1585 ; RV64-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
1586 ; RV64: # %bb.0: # %entry
1587 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1588 ; RV64-NEXT: vle16ff.v v8, (a0)
1589 ; RV64-NEXT: csrr a0, vl
1590 ; RV64-NEXT: sd a0, 0(a2)
1593 %a = call { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.nxv32i16(
1594 <vscale x 32 x i16> undef,
1597 %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
1598 %c = extractvalue { <vscale x 32 x i16>, iXLen } %a, 1
1599 store iXLen %c, iXLen* %2
1600 ret <vscale x 32 x i16> %b
1603 declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv32i16(
1604 <vscale x 32 x i16>,
1610 define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1611 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
1612 ; RV32: # %bb.0: # %entry
1613 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1614 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
1615 ; RV32-NEXT: csrr a0, vl
1616 ; RV32-NEXT: sw a0, 0(a2)
1619 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
1620 ; RV64: # %bb.0: # %entry
1621 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1622 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
1623 ; RV64-NEXT: csrr a0, vl
1624 ; RV64-NEXT: sd a0, 0(a2)
1627 %a = call { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv32i16(
1628 <vscale x 32 x i16> %0,
1630 <vscale x 32 x i1> %2,
1632 %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
1633 %c = extractvalue { <vscale x 32 x i16>, iXLen } %a, 1
1634 store iXLen %c, iXLen* %4
1636 ret <vscale x 32 x i16> %b
1639 declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1f16(
1640 <vscale x 1 x half>,
1644 define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1645 ; RV32-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
1646 ; RV32: # %bb.0: # %entry
1647 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1648 ; RV32-NEXT: vle16ff.v v8, (a0)
1649 ; RV32-NEXT: csrr a0, vl
1650 ; RV32-NEXT: sw a0, 0(a2)
1653 ; RV64-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
1654 ; RV64: # %bb.0: # %entry
1655 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1656 ; RV64-NEXT: vle16ff.v v8, (a0)
1657 ; RV64-NEXT: csrr a0, vl
1658 ; RV64-NEXT: sd a0, 0(a2)
1661 %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1f16(
1662 <vscale x 1 x half> undef,
1665 %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
1666 %c = extractvalue { <vscale x 1 x half>, iXLen } %a, 1
1667 store iXLen %c, iXLen* %2
1668 ret <vscale x 1 x half> %b
1671 declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
1672 <vscale x 1 x half>,
1678 define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1679 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
1680 ; RV32: # %bb.0: # %entry
1681 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1682 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
1683 ; RV32-NEXT: csrr a0, vl
1684 ; RV32-NEXT: sw a0, 0(a2)
1687 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
1688 ; RV64: # %bb.0: # %entry
1689 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1690 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
1691 ; RV64-NEXT: csrr a0, vl
1692 ; RV64-NEXT: sd a0, 0(a2)
1695 %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
1696 <vscale x 1 x half> %0,
1698 <vscale x 1 x i1> %2,
1700 %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
1701 %c = extractvalue { <vscale x 1 x half>, iXLen } %a, 1
1702 store iXLen %c, iXLen* %4
1704 ret <vscale x 1 x half> %b
1707 declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2f16(
1708 <vscale x 2 x half>,
1712 define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1713 ; RV32-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
1714 ; RV32: # %bb.0: # %entry
1715 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1716 ; RV32-NEXT: vle16ff.v v8, (a0)
1717 ; RV32-NEXT: csrr a0, vl
1718 ; RV32-NEXT: sw a0, 0(a2)
1721 ; RV64-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
1722 ; RV64: # %bb.0: # %entry
1723 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1724 ; RV64-NEXT: vle16ff.v v8, (a0)
1725 ; RV64-NEXT: csrr a0, vl
1726 ; RV64-NEXT: sd a0, 0(a2)
1729 %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2f16(
1730 <vscale x 2 x half> undef,
1733 %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
1734 %c = extractvalue { <vscale x 2 x half>, iXLen } %a, 1
1735 store iXLen %c, iXLen* %2
1736 ret <vscale x 2 x half> %b
1739 declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
1740 <vscale x 2 x half>,
1746 define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1747 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
1748 ; RV32: # %bb.0: # %entry
1749 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1750 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
1751 ; RV32-NEXT: csrr a0, vl
1752 ; RV32-NEXT: sw a0, 0(a2)
1755 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
1756 ; RV64: # %bb.0: # %entry
1757 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1758 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
1759 ; RV64-NEXT: csrr a0, vl
1760 ; RV64-NEXT: sd a0, 0(a2)
1763 %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
1764 <vscale x 2 x half> %0,
1766 <vscale x 2 x i1> %2,
1768 %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
1769 %c = extractvalue { <vscale x 2 x half>, iXLen } %a, 1
1770 store iXLen %c, iXLen* %4
1772 ret <vscale x 2 x half> %b
1775 declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4f16(
1776 <vscale x 4 x half>,
1780 define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1781 ; RV32-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
1782 ; RV32: # %bb.0: # %entry
1783 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1784 ; RV32-NEXT: vle16ff.v v8, (a0)
1785 ; RV32-NEXT: csrr a0, vl
1786 ; RV32-NEXT: sw a0, 0(a2)
1789 ; RV64-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
1790 ; RV64: # %bb.0: # %entry
1791 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1792 ; RV64-NEXT: vle16ff.v v8, (a0)
1793 ; RV64-NEXT: csrr a0, vl
1794 ; RV64-NEXT: sd a0, 0(a2)
1797 %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4f16(
1798 <vscale x 4 x half> undef,
1801 %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
1802 %c = extractvalue { <vscale x 4 x half>, iXLen } %a, 1
1803 store iXLen %c, iXLen* %2
1804 ret <vscale x 4 x half> %b
1807 declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
1808 <vscale x 4 x half>,
1814 define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1815 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
1816 ; RV32: # %bb.0: # %entry
1817 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1818 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
1819 ; RV32-NEXT: csrr a0, vl
1820 ; RV32-NEXT: sw a0, 0(a2)
1823 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
1824 ; RV64: # %bb.0: # %entry
1825 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1826 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
1827 ; RV64-NEXT: csrr a0, vl
1828 ; RV64-NEXT: sd a0, 0(a2)
1831 %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
1832 <vscale x 4 x half> %0,
1834 <vscale x 4 x i1> %2,
1836 %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
1837 %c = extractvalue { <vscale x 4 x half>, iXLen } %a, 1
1838 store iXLen %c, iXLen* %4
1840 ret <vscale x 4 x half> %b
1843 declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8f16(
1844 <vscale x 8 x half>,
1848 define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1849 ; RV32-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
1850 ; RV32: # %bb.0: # %entry
1851 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1852 ; RV32-NEXT: vle16ff.v v8, (a0)
1853 ; RV32-NEXT: csrr a0, vl
1854 ; RV32-NEXT: sw a0, 0(a2)
1857 ; RV64-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
1858 ; RV64: # %bb.0: # %entry
1859 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1860 ; RV64-NEXT: vle16ff.v v8, (a0)
1861 ; RV64-NEXT: csrr a0, vl
1862 ; RV64-NEXT: sd a0, 0(a2)
1865 %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8f16(
1866 <vscale x 8 x half> undef,
1869 %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
1870 %c = extractvalue { <vscale x 8 x half>, iXLen } %a, 1
1871 store iXLen %c, iXLen* %2
1872 ret <vscale x 8 x half> %b
1875 declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
1876 <vscale x 8 x half>,
1882 define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1883 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
1884 ; RV32: # %bb.0: # %entry
1885 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1886 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
1887 ; RV32-NEXT: csrr a0, vl
1888 ; RV32-NEXT: sw a0, 0(a2)
1891 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
1892 ; RV64: # %bb.0: # %entry
1893 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1894 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
1895 ; RV64-NEXT: csrr a0, vl
1896 ; RV64-NEXT: sd a0, 0(a2)
1899 %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
1900 <vscale x 8 x half> %0,
1902 <vscale x 8 x i1> %2,
1904 %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
1905 %c = extractvalue { <vscale x 8 x half>, iXLen } %a, 1
1906 store iXLen %c, iXLen* %4
1908 ret <vscale x 8 x half> %b
1911 declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16f16(
1912 <vscale x 16 x half>,
1916 define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1917 ; RV32-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
1918 ; RV32: # %bb.0: # %entry
1919 ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1920 ; RV32-NEXT: vle16ff.v v8, (a0)
1921 ; RV32-NEXT: csrr a0, vl
1922 ; RV32-NEXT: sw a0, 0(a2)
1925 ; RV64-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
1926 ; RV64: # %bb.0: # %entry
1927 ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1928 ; RV64-NEXT: vle16ff.v v8, (a0)
1929 ; RV64-NEXT: csrr a0, vl
1930 ; RV64-NEXT: sd a0, 0(a2)
1933 %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16f16(
1934 <vscale x 16 x half> undef,
1937 %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
1938 %c = extractvalue { <vscale x 16 x half>, iXLen } %a, 1
1939 store iXLen %c, iXLen* %2
1940 ret <vscale x 16 x half> %b
1943 declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
1944 <vscale x 16 x half>,
1950 define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1951 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
1952 ; RV32: # %bb.0: # %entry
1953 ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1954 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
1955 ; RV32-NEXT: csrr a0, vl
1956 ; RV32-NEXT: sw a0, 0(a2)
1959 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
1960 ; RV64: # %bb.0: # %entry
1961 ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1962 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
1963 ; RV64-NEXT: csrr a0, vl
1964 ; RV64-NEXT: sd a0, 0(a2)
1967 %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
1968 <vscale x 16 x half> %0,
1970 <vscale x 16 x i1> %2,
1972 %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
1973 %c = extractvalue { <vscale x 16 x half>, iXLen } %a, 1
1974 store iXLen %c, iXLen* %4
1976 ret <vscale x 16 x half> %b
1979 declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32f16(
1980 <vscale x 32 x half>,
1984 define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1985 ; RV32-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
1986 ; RV32: # %bb.0: # %entry
1987 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1988 ; RV32-NEXT: vle16ff.v v8, (a0)
1989 ; RV32-NEXT: csrr a0, vl
1990 ; RV32-NEXT: sw a0, 0(a2)
1993 ; RV64-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
1994 ; RV64: # %bb.0: # %entry
1995 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1996 ; RV64-NEXT: vle16ff.v v8, (a0)
1997 ; RV64-NEXT: csrr a0, vl
1998 ; RV64-NEXT: sd a0, 0(a2)
2001 %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32f16(
2002 <vscale x 32 x half> undef,
2005 %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
2006 %c = extractvalue { <vscale x 32 x half>, iXLen } %a, 1
2007 store iXLen %c, iXLen* %2
2008 ret <vscale x 32 x half> %b
2011 declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
2012 <vscale x 32 x half>,
2018 define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2019 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
2020 ; RV32: # %bb.0: # %entry
2021 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu
2022 ; RV32-NEXT: vle16ff.v v8, (a0), v0.t
2023 ; RV32-NEXT: csrr a0, vl
2024 ; RV32-NEXT: sw a0, 0(a2)
2027 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
2028 ; RV64: # %bb.0: # %entry
2029 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu
2030 ; RV64-NEXT: vle16ff.v v8, (a0), v0.t
2031 ; RV64-NEXT: csrr a0, vl
2032 ; RV64-NEXT: sd a0, 0(a2)
2035 %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
2036 <vscale x 32 x half> %0,
2038 <vscale x 32 x i1> %2,
2040 %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
2041 %c = extractvalue { <vscale x 32 x half>, iXLen } %a, 1
2042 store iXLen %c, iXLen* %4
2044 ret <vscale x 32 x half> %b
2047 declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
2052 define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2053 ; RV32-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
2054 ; RV32: # %bb.0: # %entry
2055 ; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2056 ; RV32-NEXT: vle8ff.v v8, (a0)
2057 ; RV32-NEXT: csrr a0, vl
2058 ; RV32-NEXT: sw a0, 0(a2)
2061 ; RV64-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
2062 ; RV64: # %bb.0: # %entry
2063 ; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2064 ; RV64-NEXT: vle8ff.v v8, (a0)
2065 ; RV64-NEXT: csrr a0, vl
2066 ; RV64-NEXT: sd a0, 0(a2)
2069 %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
2070 <vscale x 1 x i8> undef,
2073 %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
2074 %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
2075 store iXLen %c, iXLen* %2
2076 ret <vscale x 1 x i8> %b
2079 declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv1i8(
2086 define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2087 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
2088 ; RV32: # %bb.0: # %entry
2089 ; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
2090 ; RV32-NEXT: vle8ff.v v8, (a0), v0.t
2091 ; RV32-NEXT: csrr a0, vl
2092 ; RV32-NEXT: sw a0, 0(a2)
2095 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
2096 ; RV64: # %bb.0: # %entry
2097 ; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
2098 ; RV64-NEXT: vle8ff.v v8, (a0), v0.t
2099 ; RV64-NEXT: csrr a0, vl
2100 ; RV64-NEXT: sd a0, 0(a2)
2103 %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv1i8(
2104 <vscale x 1 x i8> %0,
2106 <vscale x 1 x i1> %2,
2108 %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
2109 %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
2110 store iXLen %c, iXLen* %4
2112 ret <vscale x 1 x i8> %b
2115 declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.nxv2i8(
2120 define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2121 ; RV32-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
2122 ; RV32: # %bb.0: # %entry
2123 ; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2124 ; RV32-NEXT: vle8ff.v v8, (a0)
2125 ; RV32-NEXT: csrr a0, vl
2126 ; RV32-NEXT: sw a0, 0(a2)
2129 ; RV64-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
2130 ; RV64: # %bb.0: # %entry
2131 ; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2132 ; RV64-NEXT: vle8ff.v v8, (a0)
2133 ; RV64-NEXT: csrr a0, vl
2134 ; RV64-NEXT: sd a0, 0(a2)
2137 %a = call { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.nxv2i8(
2138 <vscale x 2 x i8> undef,
2141 %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
2142 %c = extractvalue { <vscale x 2 x i8>, iXLen } %a, 1
2143 store iXLen %c, iXLen* %2
2144 ret <vscale x 2 x i8> %b
2147 declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv2i8(
2154 define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2155 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
2156 ; RV32: # %bb.0: # %entry
2157 ; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
2158 ; RV32-NEXT: vle8ff.v v8, (a0), v0.t
2159 ; RV32-NEXT: csrr a0, vl
2160 ; RV32-NEXT: sw a0, 0(a2)
2163 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
2164 ; RV64: # %bb.0: # %entry
2165 ; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
2166 ; RV64-NEXT: vle8ff.v v8, (a0), v0.t
2167 ; RV64-NEXT: csrr a0, vl
2168 ; RV64-NEXT: sd a0, 0(a2)
2171 %a = call { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv2i8(
2172 <vscale x 2 x i8> %0,
2174 <vscale x 2 x i1> %2,
2176 %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
2177 %c = extractvalue { <vscale x 2 x i8>, iXLen } %a, 1
2178 store iXLen %c, iXLen* %4
2180 ret <vscale x 2 x i8> %b
2183 declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.nxv4i8(
2188 define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2189 ; RV32-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
2190 ; RV32: # %bb.0: # %entry
2191 ; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
2192 ; RV32-NEXT: vle8ff.v v8, (a0)
2193 ; RV32-NEXT: csrr a0, vl
2194 ; RV32-NEXT: sw a0, 0(a2)
2197 ; RV64-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
2198 ; RV64: # %bb.0: # %entry
2199 ; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
2200 ; RV64-NEXT: vle8ff.v v8, (a0)
2201 ; RV64-NEXT: csrr a0, vl
2202 ; RV64-NEXT: sd a0, 0(a2)
2205 %a = call { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.nxv4i8(
2206 <vscale x 4 x i8> undef,
2209 %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
2210 %c = extractvalue { <vscale x 4 x i8>, iXLen } %a, 1
2211 store iXLen %c, iXLen* %2
2212 ret <vscale x 4 x i8> %b
2215 declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv4i8(
2222 define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2223 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
2224 ; RV32: # %bb.0: # %entry
2225 ; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
2226 ; RV32-NEXT: vle8ff.v v8, (a0), v0.t
2227 ; RV32-NEXT: csrr a0, vl
2228 ; RV32-NEXT: sw a0, 0(a2)
2231 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
2232 ; RV64: # %bb.0: # %entry
2233 ; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
2234 ; RV64-NEXT: vle8ff.v v8, (a0), v0.t
2235 ; RV64-NEXT: csrr a0, vl
2236 ; RV64-NEXT: sd a0, 0(a2)
2239 %a = call { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv4i8(
2240 <vscale x 4 x i8> %0,
2242 <vscale x 4 x i1> %2,
2244 %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
2245 %c = extractvalue { <vscale x 4 x i8>, iXLen } %a, 1
2246 store iXLen %c, iXLen* %4
2248 ret <vscale x 4 x i8> %b
2251 declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.nxv8i8(
2256 define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2257 ; RV32-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
2258 ; RV32: # %bb.0: # %entry
2259 ; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2260 ; RV32-NEXT: vle8ff.v v8, (a0)
2261 ; RV32-NEXT: csrr a0, vl
2262 ; RV32-NEXT: sw a0, 0(a2)
2265 ; RV64-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
2266 ; RV64: # %bb.0: # %entry
2267 ; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2268 ; RV64-NEXT: vle8ff.v v8, (a0)
2269 ; RV64-NEXT: csrr a0, vl
2270 ; RV64-NEXT: sd a0, 0(a2)
2273 %a = call { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.nxv8i8(
2274 <vscale x 8 x i8> undef,
2277 %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
2278 %c = extractvalue { <vscale x 8 x i8>, iXLen } %a, 1
2279 store iXLen %c, iXLen* %2
2280 ret <vscale x 8 x i8> %b
2283 declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv8i8(
2290 define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2291 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
2292 ; RV32: # %bb.0: # %entry
2293 ; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, mu
2294 ; RV32-NEXT: vle8ff.v v8, (a0), v0.t
2295 ; RV32-NEXT: csrr a0, vl
2296 ; RV32-NEXT: sw a0, 0(a2)
2299 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
2300 ; RV64: # %bb.0: # %entry
2301 ; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu
2302 ; RV64-NEXT: vle8ff.v v8, (a0), v0.t
2303 ; RV64-NEXT: csrr a0, vl
2304 ; RV64-NEXT: sd a0, 0(a2)
2307 %a = call { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv8i8(
2308 <vscale x 8 x i8> %0,
2310 <vscale x 8 x i1> %2,
2312 %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
2313 %c = extractvalue { <vscale x 8 x i8>, iXLen } %a, 1
2314 store iXLen %c, iXLen* %4
2316 ret <vscale x 8 x i8> %b
2319 declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.nxv16i8(
2324 define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2325 ; RV32-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
2326 ; RV32: # %bb.0: # %entry
2327 ; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
2328 ; RV32-NEXT: vle8ff.v v8, (a0)
2329 ; RV32-NEXT: csrr a0, vl
2330 ; RV32-NEXT: sw a0, 0(a2)
2333 ; RV64-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
2334 ; RV64: # %bb.0: # %entry
2335 ; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
2336 ; RV64-NEXT: vle8ff.v v8, (a0)
2337 ; RV64-NEXT: csrr a0, vl
2338 ; RV64-NEXT: sd a0, 0(a2)
2341 %a = call { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.nxv16i8(
2342 <vscale x 16 x i8> undef,
2345 %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
2346 %c = extractvalue { <vscale x 16 x i8>, iXLen } %a, 1
2347 store iXLen %c, iXLen* %2
2348 ret <vscale x 16 x i8> %b
2351 declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv16i8(
2358 define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2359 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
2360 ; RV32: # %bb.0: # %entry
2361 ; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu
2362 ; RV32-NEXT: vle8ff.v v8, (a0), v0.t
2363 ; RV32-NEXT: csrr a0, vl
2364 ; RV32-NEXT: sw a0, 0(a2)
2367 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
2368 ; RV64: # %bb.0: # %entry
2369 ; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu
2370 ; RV64-NEXT: vle8ff.v v8, (a0), v0.t
2371 ; RV64-NEXT: csrr a0, vl
2372 ; RV64-NEXT: sd a0, 0(a2)
2375 %a = call { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv16i8(
2376 <vscale x 16 x i8> %0,
2378 <vscale x 16 x i1> %2,
2380 %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
2381 %c = extractvalue { <vscale x 16 x i8>, iXLen } %a, 1
2382 store iXLen %c, iXLen* %4
2384 ret <vscale x 16 x i8> %b
2387 declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.nxv32i8(
2392 define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2393 ; RV32-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
2394 ; RV32: # %bb.0: # %entry
2395 ; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
2396 ; RV32-NEXT: vle8ff.v v8, (a0)
2397 ; RV32-NEXT: csrr a0, vl
2398 ; RV32-NEXT: sw a0, 0(a2)
2401 ; RV64-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
2402 ; RV64: # %bb.0: # %entry
2403 ; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
2404 ; RV64-NEXT: vle8ff.v v8, (a0)
2405 ; RV64-NEXT: csrr a0, vl
2406 ; RV64-NEXT: sd a0, 0(a2)
2409 %a = call { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.nxv32i8(
2410 <vscale x 32 x i8> undef,
2413 %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
2414 %c = extractvalue { <vscale x 32 x i8>, iXLen } %a, 1
2415 store iXLen %c, iXLen* %2
2416 ret <vscale x 32 x i8> %b
2419 declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv32i8(
2426 define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2427 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
2428 ; RV32: # %bb.0: # %entry
2429 ; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, mu
2430 ; RV32-NEXT: vle8ff.v v8, (a0), v0.t
2431 ; RV32-NEXT: csrr a0, vl
2432 ; RV32-NEXT: sw a0, 0(a2)
2435 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
2436 ; RV64: # %bb.0: # %entry
2437 ; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, mu
2438 ; RV64-NEXT: vle8ff.v v8, (a0), v0.t
2439 ; RV64-NEXT: csrr a0, vl
2440 ; RV64-NEXT: sd a0, 0(a2)
2443 %a = call { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv32i8(
2444 <vscale x 32 x i8> %0,
2446 <vscale x 32 x i1> %2,
2448 %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
2449 %c = extractvalue { <vscale x 32 x i8>, iXLen } %a, 1
2450 store iXLen %c, iXLen* %4
2452 ret <vscale x 32 x i8> %b
2455 declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.nxv64i8(
2460 define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2461 ; RV32-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
2462 ; RV32: # %bb.0: # %entry
2463 ; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
2464 ; RV32-NEXT: vle8ff.v v8, (a0)
2465 ; RV32-NEXT: csrr a0, vl
2466 ; RV32-NEXT: sw a0, 0(a2)
2469 ; RV64-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
2470 ; RV64: # %bb.0: # %entry
2471 ; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, ma
2472 ; RV64-NEXT: vle8ff.v v8, (a0)
2473 ; RV64-NEXT: csrr a0, vl
2474 ; RV64-NEXT: sd a0, 0(a2)
2477 %a = call { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.nxv64i8(
2478 <vscale x 64 x i8> undef,
2481 %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
2482 %c = extractvalue { <vscale x 64 x i8>, iXLen } %a, 1
2483 store iXLen %c, iXLen* %2
2484 ret <vscale x 64 x i8> %b
2487 declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv64i8(
2494 define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2495 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
2496 ; RV32: # %bb.0: # %entry
2497 ; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, mu
2498 ; RV32-NEXT: vle8ff.v v8, (a0), v0.t
2499 ; RV32-NEXT: csrr a0, vl
2500 ; RV32-NEXT: sw a0, 0(a2)
2503 ; RV64-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
2504 ; RV64: # %bb.0: # %entry
2505 ; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, mu
2506 ; RV64-NEXT: vle8ff.v v8, (a0), v0.t
2507 ; RV64-NEXT: csrr a0, vl
2508 ; RV64-NEXT: sd a0, 0(a2)
2511 %a = call { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv64i8(
2512 <vscale x 64 x i8> %0,
2514 <vscale x 64 x i1> %2,
2516 %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
2517 %c = extractvalue { <vscale x 64 x i8>, iXLen } %a, 1
2518 store iXLen %c, iXLen* %4
2520 ret <vscale x 64 x i8> %b
2523 ; Test with the VL output unused
2524 define <vscale x 1 x double> @intrinsic_vleff_dead_vl(ptr %0, iXLen %1, iXLen* %2) nounwind {
2525 ; CHECK-LABEL: intrinsic_vleff_dead_vl:
2526 ; CHECK: # %bb.0: # %entry
2527 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2528 ; CHECK-NEXT: vle64ff.v v8, (a0)
2531 %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
2532 <vscale x 1 x double> undef,
2535 %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
2536 ret <vscale x 1 x double> %b
2539 define <vscale x 1 x double> @intrinsic_vleff_mask_dead_vl(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2540 ; CHECK-LABEL: intrinsic_vleff_mask_dead_vl:
2541 ; CHECK: # %bb.0: # %entry
2542 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
2543 ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
2546 %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
2547 <vscale x 1 x double> %0,
2549 <vscale x 1 x i1> %2,
2551 %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
2553 ret <vscale x 1 x double> %b
2556 ; Test with the loaded value unused
2557 define void @intrinsic_vleff_dead_value(ptr %0, iXLen %1, iXLen* %2) nounwind {
2558 ; RV32-LABEL: intrinsic_vleff_dead_value:
2559 ; RV32: # %bb.0: # %entry
2560 ; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2561 ; RV32-NEXT: vle64ff.v v8, (a0)
2562 ; RV32-NEXT: csrr a0, vl
2563 ; RV32-NEXT: sw a0, 0(a2)
2566 ; RV64-LABEL: intrinsic_vleff_dead_value:
2567 ; RV64: # %bb.0: # %entry
2568 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2569 ; RV64-NEXT: vle64ff.v v8, (a0)
2570 ; RV64-NEXT: csrr a0, vl
2571 ; RV64-NEXT: sd a0, 0(a2)
2574 %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
2575 <vscale x 1 x double> undef,
2578 %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
2579 store iXLen %b, iXLen* %2
2583 define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2584 ; RV32-LABEL: intrinsic_vleff_mask_dead_value:
2585 ; RV32: # %bb.0: # %entry
2586 ; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu
2587 ; RV32-NEXT: vle64ff.v v8, (a0), v0.t
2588 ; RV32-NEXT: csrr a0, vl
2589 ; RV32-NEXT: sw a0, 0(a2)
2592 ; RV64-LABEL: intrinsic_vleff_mask_dead_value:
2593 ; RV64: # %bb.0: # %entry
2594 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
2595 ; RV64-NEXT: vle64ff.v v8, (a0), v0.t
2596 ; RV64-NEXT: csrr a0, vl
2597 ; RV64-NEXT: sd a0, 0(a2)
2600 %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
2601 <vscale x 1 x double> %0,
2603 <vscale x 1 x i1> %2,
2605 %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
2606 store iXLen %b, iXLen* %4
2611 ; Test with both outputs dead. Make sure the vleff isn't deleted.
2612 define void @intrinsic_vleff_dead_all(ptr %0, iXLen %1, iXLen* %2) nounwind {
2613 ; CHECK-LABEL: intrinsic_vleff_dead_all:
2614 ; CHECK: # %bb.0: # %entry
2615 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2616 ; CHECK-NEXT: vle64ff.v v8, (a0)
2619 %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
2620 <vscale x 1 x double> undef,
2626 define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2627 ; CHECK-LABEL: intrinsic_vleff_mask_dead_all:
2628 ; CHECK: # %bb.0: # %entry
2629 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
2630 ; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
2633 %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
2634 <vscale x 1 x double> %0,
2636 <vscale x 1 x i1> %2,