1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
11 declare <2 x half> @llvm.vp.minnum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
13 define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
14 ; ZVFH-LABEL: vfmin_vv_v2f16:
16 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
17 ; ZVFH-NEXT: vfmin.vv v8, v8, v9, v0.t
20 ; ZVFHMIN-LABEL: vfmin_vv_v2f16:
22 ; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
23 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
24 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
25 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
26 ; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10, v0.t
27 ; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
28 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
30 %v = call <2 x half> @llvm.vp.minnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
34 define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 zeroext %evl) {
35 ; ZVFH-LABEL: vfmin_vv_v2f16_unmasked:
37 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
38 ; ZVFH-NEXT: vfmin.vv v8, v8, v9
41 ; ZVFHMIN-LABEL: vfmin_vv_v2f16_unmasked:
43 ; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
44 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
45 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
46 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
47 ; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10
48 ; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
49 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
51 %head = insertelement <2 x i1> poison, i1 true, i32 0
52 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
53 %v = call <2 x half> @llvm.vp.minnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
57 declare <4 x half> @llvm.vp.minnum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
59 define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
60 ; ZVFH-LABEL: vfmin_vv_v4f16:
62 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
63 ; ZVFH-NEXT: vfmin.vv v8, v8, v9, v0.t
66 ; ZVFHMIN-LABEL: vfmin_vv_v4f16:
68 ; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
69 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
70 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
71 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
72 ; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10, v0.t
73 ; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
74 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
76 %v = call <4 x half> @llvm.vp.minnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
80 define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 zeroext %evl) {
81 ; ZVFH-LABEL: vfmin_vv_v4f16_unmasked:
83 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
84 ; ZVFH-NEXT: vfmin.vv v8, v8, v9
87 ; ZVFHMIN-LABEL: vfmin_vv_v4f16_unmasked:
89 ; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
90 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
91 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
92 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
93 ; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10
94 ; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
95 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
97 %head = insertelement <4 x i1> poison, i1 true, i32 0
98 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
99 %v = call <4 x half> @llvm.vp.minnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
103 declare <8 x half> @llvm.vp.minnum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
105 define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
106 ; ZVFH-LABEL: vfmin_vv_v8f16:
108 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
109 ; ZVFH-NEXT: vfmin.vv v8, v8, v9, v0.t
112 ; ZVFHMIN-LABEL: vfmin_vv_v8f16:
114 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
115 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
116 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
117 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
118 ; ZVFHMIN-NEXT: vfmin.vv v10, v12, v10, v0.t
119 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
120 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
122 %v = call <8 x half> @llvm.vp.minnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
126 define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 zeroext %evl) {
127 ; ZVFH-LABEL: vfmin_vv_v8f16_unmasked:
129 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
130 ; ZVFH-NEXT: vfmin.vv v8, v8, v9
133 ; ZVFHMIN-LABEL: vfmin_vv_v8f16_unmasked:
135 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
136 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
137 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
138 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
139 ; ZVFHMIN-NEXT: vfmin.vv v10, v12, v10
140 ; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
141 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
143 %head = insertelement <8 x i1> poison, i1 true, i32 0
144 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
145 %v = call <8 x half> @llvm.vp.minnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
149 declare <16 x half> @llvm.vp.minnum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32)
151 define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
152 ; ZVFH-LABEL: vfmin_vv_v16f16:
154 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
155 ; ZVFH-NEXT: vfmin.vv v8, v8, v10, v0.t
158 ; ZVFHMIN-LABEL: vfmin_vv_v16f16:
160 ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
161 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
162 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
163 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
164 ; ZVFHMIN-NEXT: vfmin.vv v12, v16, v12, v0.t
165 ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
166 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
168 %v = call <16 x half> @llvm.vp.minnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
172 define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i32 zeroext %evl) {
173 ; ZVFH-LABEL: vfmin_vv_v16f16_unmasked:
175 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
176 ; ZVFH-NEXT: vfmin.vv v8, v8, v10
179 ; ZVFHMIN-LABEL: vfmin_vv_v16f16_unmasked:
181 ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
182 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
183 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
184 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
185 ; ZVFHMIN-NEXT: vfmin.vv v12, v16, v12
186 ; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
187 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
189 %head = insertelement <16 x i1> poison, i1 true, i32 0
190 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
191 %v = call <16 x half> @llvm.vp.minnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
195 declare <2 x float> @llvm.vp.minnum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32)
197 define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
198 ; CHECK-LABEL: vfmin_vv_v2f32:
200 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
201 ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
203 %v = call <2 x float> @llvm.vp.minnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
207 define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i32 zeroext %evl) {
208 ; CHECK-LABEL: vfmin_vv_v2f32_unmasked:
210 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
211 ; CHECK-NEXT: vfmin.vv v8, v8, v9
213 %head = insertelement <2 x i1> poison, i1 true, i32 0
214 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
215 %v = call <2 x float> @llvm.vp.minnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
219 declare <4 x float> @llvm.vp.minnum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
221 define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
222 ; CHECK-LABEL: vfmin_vv_v4f32:
224 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
225 ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
227 %v = call <4 x float> @llvm.vp.minnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
231 define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i32 zeroext %evl) {
232 ; CHECK-LABEL: vfmin_vv_v4f32_unmasked:
234 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
235 ; CHECK-NEXT: vfmin.vv v8, v8, v9
237 %head = insertelement <4 x i1> poison, i1 true, i32 0
238 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
239 %v = call <4 x float> @llvm.vp.minnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
243 declare <8 x float> @llvm.vp.minnum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
245 define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
246 ; CHECK-LABEL: vfmin_vv_v8f32:
248 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
249 ; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t
251 %v = call <8 x float> @llvm.vp.minnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
255 define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i32 zeroext %evl) {
256 ; CHECK-LABEL: vfmin_vv_v8f32_unmasked:
258 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
259 ; CHECK-NEXT: vfmin.vv v8, v8, v10
261 %head = insertelement <8 x i1> poison, i1 true, i32 0
262 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
263 %v = call <8 x float> @llvm.vp.minnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
267 declare <16 x float> @llvm.vp.minnum.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32)
269 define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
270 ; CHECK-LABEL: vfmin_vv_v16f32:
272 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
273 ; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t
275 %v = call <16 x float> @llvm.vp.minnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
279 define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb, i32 zeroext %evl) {
280 ; CHECK-LABEL: vfmin_vv_v16f32_unmasked:
282 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
283 ; CHECK-NEXT: vfmin.vv v8, v8, v12
285 %head = insertelement <16 x i1> poison, i1 true, i32 0
286 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
287 %v = call <16 x float> @llvm.vp.minnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
291 declare <2 x double> @llvm.vp.minnum.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32)
293 define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
294 ; CHECK-LABEL: vfmin_vv_v2f64:
296 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
297 ; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
299 %v = call <2 x double> @llvm.vp.minnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
303 define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, i32 zeroext %evl) {
304 ; CHECK-LABEL: vfmin_vv_v2f64_unmasked:
306 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
307 ; CHECK-NEXT: vfmin.vv v8, v8, v9
309 %head = insertelement <2 x i1> poison, i1 true, i32 0
310 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
311 %v = call <2 x double> @llvm.vp.minnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
315 declare <4 x double> @llvm.vp.minnum.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32)
317 define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
318 ; CHECK-LABEL: vfmin_vv_v4f64:
320 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
321 ; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t
323 %v = call <4 x double> @llvm.vp.minnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
327 define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, i32 zeroext %evl) {
328 ; CHECK-LABEL: vfmin_vv_v4f64_unmasked:
330 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
331 ; CHECK-NEXT: vfmin.vv v8, v8, v10
333 %head = insertelement <4 x i1> poison, i1 true, i32 0
334 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
335 %v = call <4 x double> @llvm.vp.minnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
339 declare <8 x double> @llvm.vp.minnum.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32)
341 define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
342 ; CHECK-LABEL: vfmin_vv_v8f64:
344 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
345 ; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t
347 %v = call <8 x double> @llvm.vp.minnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
351 define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, i32 zeroext %evl) {
352 ; CHECK-LABEL: vfmin_vv_v8f64_unmasked:
354 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
355 ; CHECK-NEXT: vfmin.vv v8, v8, v12
357 %head = insertelement <8 x i1> poison, i1 true, i32 0
358 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
359 %v = call <8 x double> @llvm.vp.minnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
363 declare <15 x double> @llvm.vp.minnum.v15f64(<15 x double>, <15 x double>, <15 x i1>, i32)
365 define <15 x double> @vfmin_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) {
366 ; CHECK-LABEL: vfmin_vv_v15f64:
368 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
369 ; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
371 %v = call <15 x double> @llvm.vp.minnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
375 define <15 x double> @vfmin_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %vb, i32 zeroext %evl) {
376 ; CHECK-LABEL: vfmin_vv_v15f64_unmasked:
378 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
379 ; CHECK-NEXT: vfmin.vv v8, v8, v16
381 %head = insertelement <15 x i1> poison, i1 true, i32 0
382 %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
383 %v = call <15 x double> @llvm.vp.minnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
387 declare <16 x double> @llvm.vp.minnum.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32)
389 define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) {
390 ; CHECK-LABEL: vfmin_vv_v16f64:
392 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
393 ; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
395 %v = call <16 x double> @llvm.vp.minnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
399 define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %vb, i32 zeroext %evl) {
400 ; CHECK-LABEL: vfmin_vv_v16f64_unmasked:
402 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
403 ; CHECK-NEXT: vfmin.vv v8, v8, v16
405 %head = insertelement <16 x i1> poison, i1 true, i32 0
406 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
407 %v = call <16 x double> @llvm.vp.minnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
411 declare <32 x double> @llvm.vp.minnum.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32)
413 define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) {
414 ; CHECK-LABEL: vfmin_vv_v32f64:
416 ; CHECK-NEXT: addi sp, sp, -16
417 ; CHECK-NEXT: .cfi_def_cfa_offset 16
418 ; CHECK-NEXT: csrr a1, vlenb
419 ; CHECK-NEXT: slli a1, a1, 3
420 ; CHECK-NEXT: sub sp, sp, a1
421 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
422 ; CHECK-NEXT: addi a1, a0, 128
423 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
424 ; CHECK-NEXT: vle64.v v24, (a1)
425 ; CHECK-NEXT: addi a1, sp, 16
426 ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
427 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
428 ; CHECK-NEXT: vslidedown.vi v1, v0, 2
429 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
430 ; CHECK-NEXT: vle64.v v24, (a0)
431 ; CHECK-NEXT: li a1, 16
432 ; CHECK-NEXT: mv a0, a2
433 ; CHECK-NEXT: bltu a2, a1, .LBB26_2
434 ; CHECK-NEXT: # %bb.1:
435 ; CHECK-NEXT: li a0, 16
436 ; CHECK-NEXT: .LBB26_2:
437 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
438 ; CHECK-NEXT: vfmin.vv v8, v8, v24, v0.t
439 ; CHECK-NEXT: addi a0, a2, -16
440 ; CHECK-NEXT: sltu a1, a2, a0
441 ; CHECK-NEXT: addi a1, a1, -1
442 ; CHECK-NEXT: and a0, a1, a0
443 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
444 ; CHECK-NEXT: vmv1r.v v0, v1
445 ; CHECK-NEXT: addi a0, sp, 16
446 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
447 ; CHECK-NEXT: vfmin.vv v16, v16, v24, v0.t
448 ; CHECK-NEXT: csrr a0, vlenb
449 ; CHECK-NEXT: slli a0, a0, 3
450 ; CHECK-NEXT: add sp, sp, a0
451 ; CHECK-NEXT: addi sp, sp, 16
453 %v = call <32 x double> @llvm.vp.minnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
457 define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %vb, i32 zeroext %evl) {
458 ; CHECK-LABEL: vfmin_vv_v32f64_unmasked:
460 ; CHECK-NEXT: addi a1, a0, 128
461 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
462 ; CHECK-NEXT: vle64.v v24, (a1)
463 ; CHECK-NEXT: vle64.v v0, (a0)
464 ; CHECK-NEXT: li a1, 16
465 ; CHECK-NEXT: mv a0, a2
466 ; CHECK-NEXT: bltu a2, a1, .LBB27_2
467 ; CHECK-NEXT: # %bb.1:
468 ; CHECK-NEXT: li a0, 16
469 ; CHECK-NEXT: .LBB27_2:
470 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
471 ; CHECK-NEXT: vfmin.vv v8, v8, v0
472 ; CHECK-NEXT: addi a0, a2, -16
473 ; CHECK-NEXT: sltu a1, a2, a0
474 ; CHECK-NEXT: addi a1, a1, -1
475 ; CHECK-NEXT: and a0, a1, a0
476 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
477 ; CHECK-NEXT: vfmin.vv v16, v16, v24
479 %head = insertelement <32 x i1> poison, i1 true, i32 0
480 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
481 %v = call <32 x double> @llvm.vp.minnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)