1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
12 define <vscale x 1 x i1> @intrinsic_vmfge_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
13 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f16_nxv1f16:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
16 ; CHECK-NEXT: vmfle.vv v0, v9, v8
19 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
20 <vscale x 1 x half> %0,
21 <vscale x 1 x half> %1,
24 ret <vscale x 1 x i1> %a
27 declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16(
34 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
35 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16:
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
38 ; CHECK-NEXT: vmfle.vv v8, v9, v8
39 ; CHECK-NEXT: vmv1r.v v11, v0
40 ; CHECK-NEXT: vmv1r.v v0, v8
41 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
42 ; CHECK-NEXT: vmv1r.v v0, v11
45 %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
46 <vscale x 1 x half> %1,
47 <vscale x 1 x half> %2,
49 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16(
51 <vscale x 1 x half> %2,
52 <vscale x 1 x half> %3,
53 <vscale x 1 x i1> %mask,
56 ret <vscale x 1 x i1> %a
59 declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
64 define <vscale x 2 x i1> @intrinsic_vmfge_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
65 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f16_nxv2f16:
66 ; CHECK: # %bb.0: # %entry
67 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
68 ; CHECK-NEXT: vmfle.vv v0, v9, v8
71 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
72 <vscale x 2 x half> %0,
73 <vscale x 2 x half> %1,
76 ret <vscale x 2 x i1> %a
79 declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16(
86 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
87 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
90 ; CHECK-NEXT: vmfle.vv v8, v9, v8
91 ; CHECK-NEXT: vmv1r.v v11, v0
92 ; CHECK-NEXT: vmv1r.v v0, v8
93 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
94 ; CHECK-NEXT: vmv1r.v v0, v11
97 %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
98 <vscale x 2 x half> %1,
99 <vscale x 2 x half> %2,
101 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16(
102 <vscale x 2 x i1> %0,
103 <vscale x 2 x half> %2,
104 <vscale x 2 x half> %3,
105 <vscale x 2 x i1> %mask,
108 ret <vscale x 2 x i1> %a
111 declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
116 define <vscale x 4 x i1> @intrinsic_vmfge_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
117 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f16_nxv4f16:
118 ; CHECK: # %bb.0: # %entry
119 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
120 ; CHECK-NEXT: vmfle.vv v0, v9, v8
123 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
124 <vscale x 4 x half> %0,
125 <vscale x 4 x half> %1,
128 ret <vscale x 4 x i1> %a
131 declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16(
138 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
139 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16:
140 ; CHECK: # %bb.0: # %entry
141 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
142 ; CHECK-NEXT: vmfle.vv v8, v9, v8
143 ; CHECK-NEXT: vmv1r.v v11, v0
144 ; CHECK-NEXT: vmv.v.v v0, v8
145 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
146 ; CHECK-NEXT: vmv.v.v v0, v11
149 %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
150 <vscale x 4 x half> %1,
151 <vscale x 4 x half> %2,
153 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16(
154 <vscale x 4 x i1> %0,
155 <vscale x 4 x half> %2,
156 <vscale x 4 x half> %3,
157 <vscale x 4 x i1> %mask,
160 ret <vscale x 4 x i1> %a
163 declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
168 define <vscale x 8 x i1> @intrinsic_vmfge_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
169 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f16_nxv8f16:
170 ; CHECK: # %bb.0: # %entry
171 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
172 ; CHECK-NEXT: vmfle.vv v0, v10, v8
175 %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
176 <vscale x 8 x half> %0,
177 <vscale x 8 x half> %1,
180 ret <vscale x 8 x i1> %a
183 declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16(
190 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
191 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16:
192 ; CHECK: # %bb.0: # %entry
193 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
194 ; CHECK-NEXT: vmfle.vv v14, v10, v8
195 ; CHECK-NEXT: vmv1r.v v8, v0
196 ; CHECK-NEXT: vmv1r.v v0, v14
197 ; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
198 ; CHECK-NEXT: vmv1r.v v0, v8
201 %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
202 <vscale x 8 x half> %1,
203 <vscale x 8 x half> %2,
205 %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16(
206 <vscale x 8 x i1> %0,
207 <vscale x 8 x half> %2,
208 <vscale x 8 x half> %3,
209 <vscale x 8 x i1> %mask,
212 ret <vscale x 8 x i1> %a
215 declare <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
216 <vscale x 16 x half>,
217 <vscale x 16 x half>,
220 define <vscale x 16 x i1> @intrinsic_vmfge_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
221 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv16f16_nxv16f16:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
224 ; CHECK-NEXT: vmfle.vv v0, v12, v8
227 %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
228 <vscale x 16 x half> %0,
229 <vscale x 16 x half> %1,
232 ret <vscale x 16 x i1> %a
235 declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16(
237 <vscale x 16 x half>,
238 <vscale x 16 x half>,
242 define <vscale x 16 x i1> @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
243 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16:
244 ; CHECK: # %bb.0: # %entry
245 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
246 ; CHECK-NEXT: vmfle.vv v20, v12, v8
247 ; CHECK-NEXT: vmv1r.v v8, v0
248 ; CHECK-NEXT: vmv1r.v v0, v20
249 ; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
250 ; CHECK-NEXT: vmv1r.v v0, v8
253 %mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
254 <vscale x 16 x half> %1,
255 <vscale x 16 x half> %2,
257 %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16(
258 <vscale x 16 x i1> %0,
259 <vscale x 16 x half> %2,
260 <vscale x 16 x half> %3,
261 <vscale x 16 x i1> %mask,
264 ret <vscale x 16 x i1> %a
267 declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
268 <vscale x 1 x float>,
269 <vscale x 1 x float>,
272 define <vscale x 1 x i1> @intrinsic_vmfge_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
273 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f32_nxv1f32:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
276 ; CHECK-NEXT: vmfle.vv v0, v9, v8
279 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
280 <vscale x 1 x float> %0,
281 <vscale x 1 x float> %1,
284 ret <vscale x 1 x i1> %a
287 declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32(
289 <vscale x 1 x float>,
290 <vscale x 1 x float>,
294 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
295 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32:
296 ; CHECK: # %bb.0: # %entry
297 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
298 ; CHECK-NEXT: vmfle.vv v8, v9, v8
299 ; CHECK-NEXT: vmv1r.v v11, v0
300 ; CHECK-NEXT: vmv1r.v v0, v8
301 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
302 ; CHECK-NEXT: vmv1r.v v0, v11
305 %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
306 <vscale x 1 x float> %1,
307 <vscale x 1 x float> %2,
309 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32(
310 <vscale x 1 x i1> %0,
311 <vscale x 1 x float> %2,
312 <vscale x 1 x float> %3,
313 <vscale x 1 x i1> %mask,
316 ret <vscale x 1 x i1> %a
319 declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
320 <vscale x 2 x float>,
321 <vscale x 2 x float>,
324 define <vscale x 2 x i1> @intrinsic_vmfge_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
325 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f32_nxv2f32:
326 ; CHECK: # %bb.0: # %entry
327 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
328 ; CHECK-NEXT: vmfle.vv v0, v9, v8
331 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
332 <vscale x 2 x float> %0,
333 <vscale x 2 x float> %1,
336 ret <vscale x 2 x i1> %a
339 declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32(
341 <vscale x 2 x float>,
342 <vscale x 2 x float>,
346 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
347 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32:
348 ; CHECK: # %bb.0: # %entry
349 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
350 ; CHECK-NEXT: vmfle.vv v8, v9, v8
351 ; CHECK-NEXT: vmv1r.v v11, v0
352 ; CHECK-NEXT: vmv.v.v v0, v8
353 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
354 ; CHECK-NEXT: vmv.v.v v0, v11
357 %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
358 <vscale x 2 x float> %1,
359 <vscale x 2 x float> %2,
361 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32(
362 <vscale x 2 x i1> %0,
363 <vscale x 2 x float> %2,
364 <vscale x 2 x float> %3,
365 <vscale x 2 x i1> %mask,
368 ret <vscale x 2 x i1> %a
371 declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
372 <vscale x 4 x float>,
373 <vscale x 4 x float>,
376 define <vscale x 4 x i1> @intrinsic_vmfge_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
377 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f32_nxv4f32:
378 ; CHECK: # %bb.0: # %entry
379 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
380 ; CHECK-NEXT: vmfle.vv v0, v10, v8
383 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
384 <vscale x 4 x float> %0,
385 <vscale x 4 x float> %1,
388 ret <vscale x 4 x i1> %a
391 declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32(
393 <vscale x 4 x float>,
394 <vscale x 4 x float>,
398 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
399 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32:
400 ; CHECK: # %bb.0: # %entry
401 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
402 ; CHECK-NEXT: vmfle.vv v14, v10, v8
403 ; CHECK-NEXT: vmv1r.v v8, v0
404 ; CHECK-NEXT: vmv1r.v v0, v14
405 ; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
406 ; CHECK-NEXT: vmv1r.v v0, v8
409 %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
410 <vscale x 4 x float> %1,
411 <vscale x 4 x float> %2,
413 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32(
414 <vscale x 4 x i1> %0,
415 <vscale x 4 x float> %2,
416 <vscale x 4 x float> %3,
417 <vscale x 4 x i1> %mask,
420 ret <vscale x 4 x i1> %a
423 declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
424 <vscale x 8 x float>,
425 <vscale x 8 x float>,
428 define <vscale x 8 x i1> @intrinsic_vmfge_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
429 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f32_nxv8f32:
430 ; CHECK: # %bb.0: # %entry
431 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
432 ; CHECK-NEXT: vmfle.vv v0, v12, v8
435 %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
436 <vscale x 8 x float> %0,
437 <vscale x 8 x float> %1,
440 ret <vscale x 8 x i1> %a
443 declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32(
445 <vscale x 8 x float>,
446 <vscale x 8 x float>,
450 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
451 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32:
452 ; CHECK: # %bb.0: # %entry
453 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
454 ; CHECK-NEXT: vmfle.vv v20, v12, v8
455 ; CHECK-NEXT: vmv1r.v v8, v0
456 ; CHECK-NEXT: vmv1r.v v0, v20
457 ; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
458 ; CHECK-NEXT: vmv1r.v v0, v8
461 %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
462 <vscale x 8 x float> %1,
463 <vscale x 8 x float> %2,
465 %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32(
466 <vscale x 8 x i1> %0,
467 <vscale x 8 x float> %2,
468 <vscale x 8 x float> %3,
469 <vscale x 8 x i1> %mask,
472 ret <vscale x 8 x i1> %a
475 declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
476 <vscale x 1 x double>,
477 <vscale x 1 x double>,
480 define <vscale x 1 x i1> @intrinsic_vmfge_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
481 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f64_nxv1f64:
482 ; CHECK: # %bb.0: # %entry
483 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
484 ; CHECK-NEXT: vmfle.vv v0, v9, v8
487 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
488 <vscale x 1 x double> %0,
489 <vscale x 1 x double> %1,
492 ret <vscale x 1 x i1> %a
495 declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64(
497 <vscale x 1 x double>,
498 <vscale x 1 x double>,
502 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
503 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64:
504 ; CHECK: # %bb.0: # %entry
505 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
506 ; CHECK-NEXT: vmfle.vv v8, v9, v8
507 ; CHECK-NEXT: vmv1r.v v11, v0
508 ; CHECK-NEXT: vmv.v.v v0, v8
509 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
510 ; CHECK-NEXT: vmv.v.v v0, v11
513 %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
514 <vscale x 1 x double> %1,
515 <vscale x 1 x double> %2,
517 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64(
518 <vscale x 1 x i1> %0,
519 <vscale x 1 x double> %2,
520 <vscale x 1 x double> %3,
521 <vscale x 1 x i1> %mask,
524 ret <vscale x 1 x i1> %a
527 declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
528 <vscale x 2 x double>,
529 <vscale x 2 x double>,
532 define <vscale x 2 x i1> @intrinsic_vmfge_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
533 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f64_nxv2f64:
534 ; CHECK: # %bb.0: # %entry
535 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
536 ; CHECK-NEXT: vmfle.vv v0, v10, v8
539 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
540 <vscale x 2 x double> %0,
541 <vscale x 2 x double> %1,
544 ret <vscale x 2 x i1> %a
547 declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64(
549 <vscale x 2 x double>,
550 <vscale x 2 x double>,
554 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
555 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64:
556 ; CHECK: # %bb.0: # %entry
557 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
558 ; CHECK-NEXT: vmfle.vv v14, v10, v8
559 ; CHECK-NEXT: vmv1r.v v8, v0
560 ; CHECK-NEXT: vmv1r.v v0, v14
561 ; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
562 ; CHECK-NEXT: vmv1r.v v0, v8
565 %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
566 <vscale x 2 x double> %1,
567 <vscale x 2 x double> %2,
569 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64(
570 <vscale x 2 x i1> %0,
571 <vscale x 2 x double> %2,
572 <vscale x 2 x double> %3,
573 <vscale x 2 x i1> %mask,
576 ret <vscale x 2 x i1> %a
579 declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
580 <vscale x 4 x double>,
581 <vscale x 4 x double>,
584 define <vscale x 4 x i1> @intrinsic_vmfge_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
585 ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f64_nxv4f64:
586 ; CHECK: # %bb.0: # %entry
587 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
588 ; CHECK-NEXT: vmfle.vv v0, v12, v8
591 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
592 <vscale x 4 x double> %0,
593 <vscale x 4 x double> %1,
596 ret <vscale x 4 x i1> %a
599 declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64(
601 <vscale x 4 x double>,
602 <vscale x 4 x double>,
606 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
607 ; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64:
608 ; CHECK: # %bb.0: # %entry
609 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
610 ; CHECK-NEXT: vmfle.vv v20, v12, v8
611 ; CHECK-NEXT: vmv1r.v v8, v0
612 ; CHECK-NEXT: vmv1r.v v0, v20
613 ; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
614 ; CHECK-NEXT: vmv1r.v v0, v8
617 %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
618 <vscale x 4 x double> %1,
619 <vscale x 4 x double> %2,
621 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64(
622 <vscale x 4 x i1> %0,
623 <vscale x 4 x double> %2,
624 <vscale x 4 x double> %3,
625 <vscale x 4 x i1> %mask,
628 ret <vscale x 4 x i1> %a
631 declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
636 define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
637 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f16_f16:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
640 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
643 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
644 <vscale x 1 x half> %0,
648 ret <vscale x 1 x i1> %a
651 declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
658 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
659 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16:
660 ; CHECK: # %bb.0: # %entry
661 ; CHECK-NEXT: vmv1r.v v10, v0
662 ; CHECK-NEXT: vmv1r.v v0, v9
663 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
664 ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
665 ; CHECK-NEXT: vmv1r.v v0, v10
668 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
669 <vscale x 1 x i1> %0,
670 <vscale x 1 x half> %1,
672 <vscale x 1 x i1> %3,
675 ret <vscale x 1 x i1> %a
678 declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.f16(
683 define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
684 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f16_f16:
685 ; CHECK: # %bb.0: # %entry
686 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
687 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
690 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.f16(
691 <vscale x 2 x half> %0,
695 ret <vscale x 2 x i1> %a
698 declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
705 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
706 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16:
707 ; CHECK: # %bb.0: # %entry
708 ; CHECK-NEXT: vmv1r.v v10, v0
709 ; CHECK-NEXT: vmv1r.v v0, v9
710 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
711 ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
712 ; CHECK-NEXT: vmv1r.v v0, v10
715 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
716 <vscale x 2 x i1> %0,
717 <vscale x 2 x half> %1,
719 <vscale x 2 x i1> %3,
722 ret <vscale x 2 x i1> %a
725 declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.f16(
730 define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
731 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f16_f16:
732 ; CHECK: # %bb.0: # %entry
733 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
734 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
737 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.f16(
738 <vscale x 4 x half> %0,
742 ret <vscale x 4 x i1> %a
745 declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
752 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
753 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16:
754 ; CHECK: # %bb.0: # %entry
755 ; CHECK-NEXT: vmv1r.v v10, v0
756 ; CHECK-NEXT: vmv1r.v v0, v9
757 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
758 ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
759 ; CHECK-NEXT: vmv.v.v v0, v10
762 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
763 <vscale x 4 x i1> %0,
764 <vscale x 4 x half> %1,
766 <vscale x 4 x i1> %3,
769 ret <vscale x 4 x i1> %a
772 declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.f16(
777 define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
778 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f16_f16:
779 ; CHECK: # %bb.0: # %entry
780 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
781 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
784 %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.f16(
785 <vscale x 8 x half> %0,
789 ret <vscale x 8 x i1> %a
792 declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
799 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
800 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16:
801 ; CHECK: # %bb.0: # %entry
802 ; CHECK-NEXT: vmv1r.v v11, v0
803 ; CHECK-NEXT: vmv1r.v v0, v10
804 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
805 ; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t
806 ; CHECK-NEXT: vmv1r.v v0, v11
809 %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
810 <vscale x 8 x i1> %0,
811 <vscale x 8 x half> %1,
813 <vscale x 8 x i1> %3,
816 ret <vscale x 8 x i1> %a
819 declare <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.f16(
820 <vscale x 16 x half>,
824 define <vscale x 16 x i1> @intrinsic_vmfge_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
825 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv16f16_f16:
826 ; CHECK: # %bb.0: # %entry
827 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
828 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
831 %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.f16(
832 <vscale x 16 x half> %0,
836 ret <vscale x 16 x i1> %a
839 declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
841 <vscale x 16 x half>,
846 define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
847 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16:
848 ; CHECK: # %bb.0: # %entry
849 ; CHECK-NEXT: vmv1r.v v13, v0
850 ; CHECK-NEXT: vmv1r.v v0, v12
851 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
852 ; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t
853 ; CHECK-NEXT: vmv1r.v v0, v13
856 %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
857 <vscale x 16 x i1> %0,
858 <vscale x 16 x half> %1,
860 <vscale x 16 x i1> %3,
863 ret <vscale x 16 x i1> %a
866 declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32(
867 <vscale x 1 x float>,
871 define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
872 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f32_f32:
873 ; CHECK: # %bb.0: # %entry
874 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
875 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
878 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32(
879 <vscale x 1 x float> %0,
883 ret <vscale x 1 x i1> %a
886 declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
888 <vscale x 1 x float>,
893 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
894 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32:
895 ; CHECK: # %bb.0: # %entry
896 ; CHECK-NEXT: vmv1r.v v10, v0
897 ; CHECK-NEXT: vmv1r.v v0, v9
898 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
899 ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
900 ; CHECK-NEXT: vmv1r.v v0, v10
903 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
904 <vscale x 1 x i1> %0,
905 <vscale x 1 x float> %1,
907 <vscale x 1 x i1> %3,
910 ret <vscale x 1 x i1> %a
913 declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32(
914 <vscale x 2 x float>,
918 define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
919 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f32_f32:
920 ; CHECK: # %bb.0: # %entry
921 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
922 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
925 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32(
926 <vscale x 2 x float> %0,
930 ret <vscale x 2 x i1> %a
933 declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
935 <vscale x 2 x float>,
940 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
941 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32:
942 ; CHECK: # %bb.0: # %entry
943 ; CHECK-NEXT: vmv1r.v v10, v0
944 ; CHECK-NEXT: vmv1r.v v0, v9
945 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
946 ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
947 ; CHECK-NEXT: vmv.v.v v0, v10
950 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
951 <vscale x 2 x i1> %0,
952 <vscale x 2 x float> %1,
954 <vscale x 2 x i1> %3,
957 ret <vscale x 2 x i1> %a
960 declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32(
961 <vscale x 4 x float>,
965 define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
966 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f32_f32:
967 ; CHECK: # %bb.0: # %entry
968 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
969 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
972 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32(
973 <vscale x 4 x float> %0,
977 ret <vscale x 4 x i1> %a
980 declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
982 <vscale x 4 x float>,
987 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
988 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32:
989 ; CHECK: # %bb.0: # %entry
990 ; CHECK-NEXT: vmv1r.v v11, v0
991 ; CHECK-NEXT: vmv1r.v v0, v10
992 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
993 ; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t
994 ; CHECK-NEXT: vmv1r.v v0, v11
997 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
998 <vscale x 4 x i1> %0,
999 <vscale x 4 x float> %1,
1001 <vscale x 4 x i1> %3,
1004 ret <vscale x 4 x i1> %a
1007 declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32(
1008 <vscale x 8 x float>,
1012 define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
1013 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f32_f32:
1014 ; CHECK: # %bb.0: # %entry
1015 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1016 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
1019 %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32(
1020 <vscale x 8 x float> %0,
1024 ret <vscale x 8 x i1> %a
1027 declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
1029 <vscale x 8 x float>,
1034 define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1035 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32:
1036 ; CHECK: # %bb.0: # %entry
1037 ; CHECK-NEXT: vmv1r.v v13, v0
1038 ; CHECK-NEXT: vmv1r.v v0, v12
1039 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1040 ; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t
1041 ; CHECK-NEXT: vmv1r.v v0, v13
1044 %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
1045 <vscale x 8 x i1> %0,
1046 <vscale x 8 x float> %1,
1048 <vscale x 8 x i1> %3,
1051 ret <vscale x 8 x i1> %a
1054 declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
1055 <vscale x 1 x double>,
1059 define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
1060 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f64_f64:
1061 ; CHECK: # %bb.0: # %entry
1062 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1063 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
1066 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
1067 <vscale x 1 x double> %0,
1071 ret <vscale x 1 x i1> %a
1074 declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
1076 <vscale x 1 x double>,
1081 define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1082 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64:
1083 ; CHECK: # %bb.0: # %entry
1084 ; CHECK-NEXT: vmv1r.v v10, v0
1085 ; CHECK-NEXT: vmv1r.v v0, v9
1086 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
1087 ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
1088 ; CHECK-NEXT: vmv.v.v v0, v10
1091 %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
1092 <vscale x 1 x i1> %0,
1093 <vscale x 1 x double> %1,
1095 <vscale x 1 x i1> %3,
1098 ret <vscale x 1 x i1> %a
1101 declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
1102 <vscale x 2 x double>,
1106 define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
1107 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f64_f64:
1108 ; CHECK: # %bb.0: # %entry
1109 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1110 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
1113 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
1114 <vscale x 2 x double> %0,
1118 ret <vscale x 2 x i1> %a
1121 declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
1123 <vscale x 2 x double>,
1128 define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1129 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64:
1130 ; CHECK: # %bb.0: # %entry
1131 ; CHECK-NEXT: vmv1r.v v11, v0
1132 ; CHECK-NEXT: vmv1r.v v0, v10
1133 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
1134 ; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t
1135 ; CHECK-NEXT: vmv1r.v v0, v11
1138 %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
1139 <vscale x 2 x i1> %0,
1140 <vscale x 2 x double> %1,
1142 <vscale x 2 x i1> %3,
1145 ret <vscale x 2 x i1> %a
1148 declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
1149 <vscale x 4 x double>,
1153 define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
1154 ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f64_f64:
1155 ; CHECK: # %bb.0: # %entry
1156 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1157 ; CHECK-NEXT: vmfge.vf v0, v8, fa0
1160 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
1161 <vscale x 4 x double> %0,
1165 ret <vscale x 4 x i1> %a
1168 declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
1170 <vscale x 4 x double>,
1175 define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1176 ; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64:
1177 ; CHECK: # %bb.0: # %entry
1178 ; CHECK-NEXT: vmv1r.v v13, v0
1179 ; CHECK-NEXT: vmv1r.v v0, v12
1180 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1181 ; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t
1182 ; CHECK-NEXT: vmv1r.v v0, v13
1185 %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
1186 <vscale x 4 x i1> %0,
1187 <vscale x 4 x double> %1,
1189 <vscale x 4 x i1> %3,
1192 ret <vscale x 4 x i1> %a