1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
12 define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
13 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f16_nxv1f16:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
16 ; CHECK-NEXT: vmfle.vv v0, v8, v9
19 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
20 <vscale x 1 x half> %0,
21 <vscale x 1 x half> %1,
24 ret <vscale x 1 x i1> %a
27 declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
34 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
35 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16:
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: vmv1r.v v11, v0
38 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
39 ; CHECK-NEXT: vmfle.vv v0, v8, v9
40 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
41 ; CHECK-NEXT: vmv1r.v v0, v11
44 %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
45 <vscale x 1 x half> %1,
46 <vscale x 1 x half> %2,
48 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
50 <vscale x 1 x half> %2,
51 <vscale x 1 x half> %3,
52 <vscale x 1 x i1> %mask,
55 ret <vscale x 1 x i1> %a
58 declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
63 define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
64 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f16_nxv2f16:
65 ; CHECK: # %bb.0: # %entry
66 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
67 ; CHECK-NEXT: vmfle.vv v0, v8, v9
70 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
71 <vscale x 2 x half> %0,
72 <vscale x 2 x half> %1,
75 ret <vscale x 2 x i1> %a
78 declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
85 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
86 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16:
87 ; CHECK: # %bb.0: # %entry
88 ; CHECK-NEXT: vmv1r.v v11, v0
89 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
90 ; CHECK-NEXT: vmfle.vv v0, v8, v9
91 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
92 ; CHECK-NEXT: vmv1r.v v0, v11
95 %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
96 <vscale x 2 x half> %1,
97 <vscale x 2 x half> %2,
99 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
100 <vscale x 2 x i1> %0,
101 <vscale x 2 x half> %2,
102 <vscale x 2 x half> %3,
103 <vscale x 2 x i1> %mask,
106 ret <vscale x 2 x i1> %a
109 declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
114 define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
115 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f16_nxv4f16:
116 ; CHECK: # %bb.0: # %entry
117 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
118 ; CHECK-NEXT: vmfle.vv v0, v8, v9
121 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
122 <vscale x 4 x half> %0,
123 <vscale x 4 x half> %1,
126 ret <vscale x 4 x i1> %a
129 declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
136 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
137 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16:
138 ; CHECK: # %bb.0: # %entry
139 ; CHECK-NEXT: vmv1r.v v11, v0
140 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
141 ; CHECK-NEXT: vmfle.vv v0, v8, v9
142 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
143 ; CHECK-NEXT: vmv.v.v v0, v11
146 %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
147 <vscale x 4 x half> %1,
148 <vscale x 4 x half> %2,
150 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
151 <vscale x 4 x i1> %0,
152 <vscale x 4 x half> %2,
153 <vscale x 4 x half> %3,
154 <vscale x 4 x i1> %mask,
157 ret <vscale x 4 x i1> %a
160 declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
165 define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
166 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f16_nxv8f16:
167 ; CHECK: # %bb.0: # %entry
168 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
169 ; CHECK-NEXT: vmfle.vv v0, v8, v10
172 %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
173 <vscale x 8 x half> %0,
174 <vscale x 8 x half> %1,
177 ret <vscale x 8 x i1> %a
180 declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
187 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
188 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16:
189 ; CHECK: # %bb.0: # %entry
190 ; CHECK-NEXT: vmv1r.v v14, v0
191 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
192 ; CHECK-NEXT: vmfle.vv v0, v8, v10
193 ; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
194 ; CHECK-NEXT: vmv1r.v v0, v14
197 %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
198 <vscale x 8 x half> %1,
199 <vscale x 8 x half> %2,
201 %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
202 <vscale x 8 x i1> %0,
203 <vscale x 8 x half> %2,
204 <vscale x 8 x half> %3,
205 <vscale x 8 x i1> %mask,
208 ret <vscale x 8 x i1> %a
211 declare <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
212 <vscale x 16 x half>,
213 <vscale x 16 x half>,
216 define <vscale x 16 x i1> @intrinsic_vmfle_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
217 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv16f16_nxv16f16:
218 ; CHECK: # %bb.0: # %entry
219 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
220 ; CHECK-NEXT: vmfle.vv v0, v8, v12
223 %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
224 <vscale x 16 x half> %0,
225 <vscale x 16 x half> %1,
228 ret <vscale x 16 x i1> %a
231 declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
233 <vscale x 16 x half>,
234 <vscale x 16 x half>,
238 define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
239 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16:
240 ; CHECK: # %bb.0: # %entry
241 ; CHECK-NEXT: vmv1r.v v20, v0
242 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
243 ; CHECK-NEXT: vmfle.vv v0, v8, v12
244 ; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
245 ; CHECK-NEXT: vmv1r.v v0, v20
248 %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
249 <vscale x 16 x half> %1,
250 <vscale x 16 x half> %2,
252 %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
253 <vscale x 16 x i1> %0,
254 <vscale x 16 x half> %2,
255 <vscale x 16 x half> %3,
256 <vscale x 16 x i1> %mask,
259 ret <vscale x 16 x i1> %a
262 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
263 <vscale x 1 x float>,
264 <vscale x 1 x float>,
267 define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
268 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f32_nxv1f32:
269 ; CHECK: # %bb.0: # %entry
270 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
271 ; CHECK-NEXT: vmfle.vv v0, v8, v9
274 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
275 <vscale x 1 x float> %0,
276 <vscale x 1 x float> %1,
279 ret <vscale x 1 x i1> %a
282 declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
284 <vscale x 1 x float>,
285 <vscale x 1 x float>,
289 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
290 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vmv1r.v v11, v0
293 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
294 ; CHECK-NEXT: vmfle.vv v0, v8, v9
295 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
296 ; CHECK-NEXT: vmv1r.v v0, v11
299 %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
300 <vscale x 1 x float> %1,
301 <vscale x 1 x float> %2,
303 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
304 <vscale x 1 x i1> %0,
305 <vscale x 1 x float> %2,
306 <vscale x 1 x float> %3,
307 <vscale x 1 x i1> %mask,
310 ret <vscale x 1 x i1> %a
313 declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
314 <vscale x 2 x float>,
315 <vscale x 2 x float>,
318 define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
319 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f32_nxv2f32:
320 ; CHECK: # %bb.0: # %entry
321 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
322 ; CHECK-NEXT: vmfle.vv v0, v8, v9
325 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
326 <vscale x 2 x float> %0,
327 <vscale x 2 x float> %1,
330 ret <vscale x 2 x i1> %a
333 declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
335 <vscale x 2 x float>,
336 <vscale x 2 x float>,
340 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
341 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32:
342 ; CHECK: # %bb.0: # %entry
343 ; CHECK-NEXT: vmv1r.v v11, v0
344 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
345 ; CHECK-NEXT: vmfle.vv v0, v8, v9
346 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
347 ; CHECK-NEXT: vmv.v.v v0, v11
350 %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
351 <vscale x 2 x float> %1,
352 <vscale x 2 x float> %2,
354 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
355 <vscale x 2 x i1> %0,
356 <vscale x 2 x float> %2,
357 <vscale x 2 x float> %3,
358 <vscale x 2 x i1> %mask,
361 ret <vscale x 2 x i1> %a
364 declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
365 <vscale x 4 x float>,
366 <vscale x 4 x float>,
369 define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
370 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f32_nxv4f32:
371 ; CHECK: # %bb.0: # %entry
372 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
373 ; CHECK-NEXT: vmfle.vv v0, v8, v10
376 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
377 <vscale x 4 x float> %0,
378 <vscale x 4 x float> %1,
381 ret <vscale x 4 x i1> %a
384 declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
386 <vscale x 4 x float>,
387 <vscale x 4 x float>,
391 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
392 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32:
393 ; CHECK: # %bb.0: # %entry
394 ; CHECK-NEXT: vmv1r.v v14, v0
395 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
396 ; CHECK-NEXT: vmfle.vv v0, v8, v10
397 ; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
398 ; CHECK-NEXT: vmv1r.v v0, v14
401 %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
402 <vscale x 4 x float> %1,
403 <vscale x 4 x float> %2,
405 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
406 <vscale x 4 x i1> %0,
407 <vscale x 4 x float> %2,
408 <vscale x 4 x float> %3,
409 <vscale x 4 x i1> %mask,
412 ret <vscale x 4 x i1> %a
415 declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
416 <vscale x 8 x float>,
417 <vscale x 8 x float>,
420 define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
421 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f32_nxv8f32:
422 ; CHECK: # %bb.0: # %entry
423 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
424 ; CHECK-NEXT: vmfle.vv v0, v8, v12
427 %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
428 <vscale x 8 x float> %0,
429 <vscale x 8 x float> %1,
432 ret <vscale x 8 x i1> %a
435 declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
437 <vscale x 8 x float>,
438 <vscale x 8 x float>,
442 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
443 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32:
444 ; CHECK: # %bb.0: # %entry
445 ; CHECK-NEXT: vmv1r.v v20, v0
446 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
447 ; CHECK-NEXT: vmfle.vv v0, v8, v12
448 ; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
449 ; CHECK-NEXT: vmv1r.v v0, v20
452 %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
453 <vscale x 8 x float> %1,
454 <vscale x 8 x float> %2,
456 %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
457 <vscale x 8 x i1> %0,
458 <vscale x 8 x float> %2,
459 <vscale x 8 x float> %3,
460 <vscale x 8 x i1> %mask,
463 ret <vscale x 8 x i1> %a
466 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
467 <vscale x 1 x double>,
468 <vscale x 1 x double>,
471 define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
472 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f64_nxv1f64:
473 ; CHECK: # %bb.0: # %entry
474 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
475 ; CHECK-NEXT: vmfle.vv v0, v8, v9
478 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
479 <vscale x 1 x double> %0,
480 <vscale x 1 x double> %1,
483 ret <vscale x 1 x i1> %a
486 declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
488 <vscale x 1 x double>,
489 <vscale x 1 x double>,
493 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
494 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64:
495 ; CHECK: # %bb.0: # %entry
496 ; CHECK-NEXT: vmv1r.v v11, v0
497 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
498 ; CHECK-NEXT: vmfle.vv v0, v8, v9
499 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
500 ; CHECK-NEXT: vmv.v.v v0, v11
503 %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
504 <vscale x 1 x double> %1,
505 <vscale x 1 x double> %2,
507 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
508 <vscale x 1 x i1> %0,
509 <vscale x 1 x double> %2,
510 <vscale x 1 x double> %3,
511 <vscale x 1 x i1> %mask,
514 ret <vscale x 1 x i1> %a
517 declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
518 <vscale x 2 x double>,
519 <vscale x 2 x double>,
522 define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
523 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f64_nxv2f64:
524 ; CHECK: # %bb.0: # %entry
525 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
526 ; CHECK-NEXT: vmfle.vv v0, v8, v10
529 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
530 <vscale x 2 x double> %0,
531 <vscale x 2 x double> %1,
534 ret <vscale x 2 x i1> %a
537 declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
539 <vscale x 2 x double>,
540 <vscale x 2 x double>,
544 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
545 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64:
546 ; CHECK: # %bb.0: # %entry
547 ; CHECK-NEXT: vmv1r.v v14, v0
548 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
549 ; CHECK-NEXT: vmfle.vv v0, v8, v10
550 ; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
551 ; CHECK-NEXT: vmv1r.v v0, v14
554 %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
555 <vscale x 2 x double> %1,
556 <vscale x 2 x double> %2,
558 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
559 <vscale x 2 x i1> %0,
560 <vscale x 2 x double> %2,
561 <vscale x 2 x double> %3,
562 <vscale x 2 x i1> %mask,
565 ret <vscale x 2 x i1> %a
568 declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
569 <vscale x 4 x double>,
570 <vscale x 4 x double>,
573 define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
574 ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f64_nxv4f64:
575 ; CHECK: # %bb.0: # %entry
576 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
577 ; CHECK-NEXT: vmfle.vv v0, v8, v12
580 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
581 <vscale x 4 x double> %0,
582 <vscale x 4 x double> %1,
585 ret <vscale x 4 x i1> %a
588 declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
590 <vscale x 4 x double>,
591 <vscale x 4 x double>,
595 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
596 ; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64:
597 ; CHECK: # %bb.0: # %entry
598 ; CHECK-NEXT: vmv1r.v v20, v0
599 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
600 ; CHECK-NEXT: vmfle.vv v0, v8, v12
601 ; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
602 ; CHECK-NEXT: vmv1r.v v0, v20
605 %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
606 <vscale x 4 x double> %1,
607 <vscale x 4 x double> %2,
609 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
610 <vscale x 4 x i1> %0,
611 <vscale x 4 x double> %2,
612 <vscale x 4 x double> %3,
613 <vscale x 4 x i1> %mask,
616 ret <vscale x 4 x i1> %a
619 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
624 define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
625 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f16_f16:
626 ; CHECK: # %bb.0: # %entry
627 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
628 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
631 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
632 <vscale x 1 x half> %0,
636 ret <vscale x 1 x i1> %a
639 declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
646 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
647 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16:
648 ; CHECK: # %bb.0: # %entry
649 ; CHECK-NEXT: vmv1r.v v10, v0
650 ; CHECK-NEXT: vmv1r.v v0, v9
651 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
652 ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
653 ; CHECK-NEXT: vmv1r.v v0, v10
656 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
657 <vscale x 1 x i1> %0,
658 <vscale x 1 x half> %1,
660 <vscale x 1 x i1> %3,
663 ret <vscale x 1 x i1> %a
666 declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.f16(
671 define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
672 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f16_f16:
673 ; CHECK: # %bb.0: # %entry
674 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
675 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
678 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.f16(
679 <vscale x 2 x half> %0,
683 ret <vscale x 2 x i1> %a
686 declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
693 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
694 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16:
695 ; CHECK: # %bb.0: # %entry
696 ; CHECK-NEXT: vmv1r.v v10, v0
697 ; CHECK-NEXT: vmv1r.v v0, v9
698 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
699 ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
700 ; CHECK-NEXT: vmv1r.v v0, v10
703 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
704 <vscale x 2 x i1> %0,
705 <vscale x 2 x half> %1,
707 <vscale x 2 x i1> %3,
710 ret <vscale x 2 x i1> %a
713 declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.f16(
718 define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
719 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f16_f16:
720 ; CHECK: # %bb.0: # %entry
721 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
722 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
725 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.f16(
726 <vscale x 4 x half> %0,
730 ret <vscale x 4 x i1> %a
733 declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
740 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
741 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16:
742 ; CHECK: # %bb.0: # %entry
743 ; CHECK-NEXT: vmv1r.v v10, v0
744 ; CHECK-NEXT: vmv1r.v v0, v9
745 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
746 ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
747 ; CHECK-NEXT: vmv.v.v v0, v10
750 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
751 <vscale x 4 x i1> %0,
752 <vscale x 4 x half> %1,
754 <vscale x 4 x i1> %3,
757 ret <vscale x 4 x i1> %a
760 declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.f16(
765 define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
766 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f16_f16:
767 ; CHECK: # %bb.0: # %entry
768 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
769 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
772 %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.f16(
773 <vscale x 8 x half> %0,
777 ret <vscale x 8 x i1> %a
780 declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
787 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
788 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16:
789 ; CHECK: # %bb.0: # %entry
790 ; CHECK-NEXT: vmv1r.v v11, v0
791 ; CHECK-NEXT: vmv1r.v v0, v10
792 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
793 ; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t
794 ; CHECK-NEXT: vmv1r.v v0, v11
797 %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
798 <vscale x 8 x i1> %0,
799 <vscale x 8 x half> %1,
801 <vscale x 8 x i1> %3,
804 ret <vscale x 8 x i1> %a
807 declare <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.f16(
808 <vscale x 16 x half>,
812 define <vscale x 16 x i1> @intrinsic_vmfle_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
813 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv16f16_f16:
814 ; CHECK: # %bb.0: # %entry
815 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
816 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
819 %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.f16(
820 <vscale x 16 x half> %0,
824 ret <vscale x 16 x i1> %a
827 declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
829 <vscale x 16 x half>,
834 define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
835 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16:
836 ; CHECK: # %bb.0: # %entry
837 ; CHECK-NEXT: vmv1r.v v13, v0
838 ; CHECK-NEXT: vmv1r.v v0, v12
839 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
840 ; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t
841 ; CHECK-NEXT: vmv1r.v v0, v13
844 %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
845 <vscale x 16 x i1> %0,
846 <vscale x 16 x half> %1,
848 <vscale x 16 x i1> %3,
851 ret <vscale x 16 x i1> %a
854 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32(
855 <vscale x 1 x float>,
859 define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
860 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f32_f32:
861 ; CHECK: # %bb.0: # %entry
862 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
863 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
866 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32(
867 <vscale x 1 x float> %0,
871 ret <vscale x 1 x i1> %a
874 declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
876 <vscale x 1 x float>,
881 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
882 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32:
883 ; CHECK: # %bb.0: # %entry
884 ; CHECK-NEXT: vmv1r.v v10, v0
885 ; CHECK-NEXT: vmv1r.v v0, v9
886 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
887 ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
888 ; CHECK-NEXT: vmv1r.v v0, v10
891 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
892 <vscale x 1 x i1> %0,
893 <vscale x 1 x float> %1,
895 <vscale x 1 x i1> %3,
898 ret <vscale x 1 x i1> %a
901 declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32(
902 <vscale x 2 x float>,
906 define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
907 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f32_f32:
908 ; CHECK: # %bb.0: # %entry
909 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
910 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
913 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32(
914 <vscale x 2 x float> %0,
918 ret <vscale x 2 x i1> %a
921 declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
923 <vscale x 2 x float>,
928 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
929 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32:
930 ; CHECK: # %bb.0: # %entry
931 ; CHECK-NEXT: vmv1r.v v10, v0
932 ; CHECK-NEXT: vmv1r.v v0, v9
933 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
934 ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
935 ; CHECK-NEXT: vmv.v.v v0, v10
938 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
939 <vscale x 2 x i1> %0,
940 <vscale x 2 x float> %1,
942 <vscale x 2 x i1> %3,
945 ret <vscale x 2 x i1> %a
948 declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32(
949 <vscale x 4 x float>,
953 define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
954 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f32_f32:
955 ; CHECK: # %bb.0: # %entry
956 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
957 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
960 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32(
961 <vscale x 4 x float> %0,
965 ret <vscale x 4 x i1> %a
968 declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
970 <vscale x 4 x float>,
975 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
976 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32:
977 ; CHECK: # %bb.0: # %entry
978 ; CHECK-NEXT: vmv1r.v v11, v0
979 ; CHECK-NEXT: vmv1r.v v0, v10
980 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
981 ; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t
982 ; CHECK-NEXT: vmv1r.v v0, v11
985 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
986 <vscale x 4 x i1> %0,
987 <vscale x 4 x float> %1,
989 <vscale x 4 x i1> %3,
992 ret <vscale x 4 x i1> %a
995 declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32(
996 <vscale x 8 x float>,
1000 define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
1001 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f32_f32:
1002 ; CHECK: # %bb.0: # %entry
1003 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1004 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
1007 %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32(
1008 <vscale x 8 x float> %0,
1012 ret <vscale x 8 x i1> %a
1015 declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
1017 <vscale x 8 x float>,
1022 define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1023 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32:
1024 ; CHECK: # %bb.0: # %entry
1025 ; CHECK-NEXT: vmv1r.v v13, v0
1026 ; CHECK-NEXT: vmv1r.v v0, v12
1027 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1028 ; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t
1029 ; CHECK-NEXT: vmv1r.v v0, v13
1032 %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
1033 <vscale x 8 x i1> %0,
1034 <vscale x 8 x float> %1,
1036 <vscale x 8 x i1> %3,
1039 ret <vscale x 8 x i1> %a
1042 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
1043 <vscale x 1 x double>,
1047 define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
1048 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f64_f64:
1049 ; CHECK: # %bb.0: # %entry
1050 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1051 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
1054 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
1055 <vscale x 1 x double> %0,
1059 ret <vscale x 1 x i1> %a
1062 declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
1064 <vscale x 1 x double>,
1069 define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1070 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64:
1071 ; CHECK: # %bb.0: # %entry
1072 ; CHECK-NEXT: vmv1r.v v10, v0
1073 ; CHECK-NEXT: vmv1r.v v0, v9
1074 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
1075 ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
1076 ; CHECK-NEXT: vmv.v.v v0, v10
1079 %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
1080 <vscale x 1 x i1> %0,
1081 <vscale x 1 x double> %1,
1083 <vscale x 1 x i1> %3,
1086 ret <vscale x 1 x i1> %a
1089 declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
1090 <vscale x 2 x double>,
1094 define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
1095 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f64_f64:
1096 ; CHECK: # %bb.0: # %entry
1097 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1098 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
1101 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
1102 <vscale x 2 x double> %0,
1106 ret <vscale x 2 x i1> %a
1109 declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
1111 <vscale x 2 x double>,
1116 define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1117 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64:
1118 ; CHECK: # %bb.0: # %entry
1119 ; CHECK-NEXT: vmv1r.v v11, v0
1120 ; CHECK-NEXT: vmv1r.v v0, v10
1121 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
1122 ; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t
1123 ; CHECK-NEXT: vmv1r.v v0, v11
1126 %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
1127 <vscale x 2 x i1> %0,
1128 <vscale x 2 x double> %1,
1130 <vscale x 2 x i1> %3,
1133 ret <vscale x 2 x i1> %a
1136 declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
1137 <vscale x 4 x double>,
1141 define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
1142 ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f64_f64:
1143 ; CHECK: # %bb.0: # %entry
1144 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1145 ; CHECK-NEXT: vmfle.vf v0, v8, fa0
1148 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
1149 <vscale x 4 x double> %0,
1153 ret <vscale x 4 x i1> %a
1156 declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
1158 <vscale x 4 x double>,
1163 define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1164 ; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64:
1165 ; CHECK: # %bb.0: # %entry
1166 ; CHECK-NEXT: vmv1r.v v13, v0
1167 ; CHECK-NEXT: vmv1r.v v0, v12
1168 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1169 ; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t
1170 ; CHECK-NEXT: vmv1r.v v0, v13
1173 %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
1174 <vscale x 4 x i1> %0,
1175 <vscale x 4 x double> %1,
1177 <vscale x 4 x i1> %3,
1180 ret <vscale x 4 x i1> %a